templateTable_x86.cpp revision 9111:a41fe5ffa839
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "interpreter/interpreter.hpp"
28#include "interpreter/interpreterRuntime.hpp"
29#include "interpreter/interp_masm.hpp"
30#include "interpreter/templateTable.hpp"
31#include "memory/universe.inline.hpp"
32#include "oops/methodData.hpp"
33#include "oops/objArrayKlass.hpp"
34#include "oops/oop.inline.hpp"
35#include "prims/methodHandles.hpp"
36#include "runtime/sharedRuntime.hpp"
37#include "runtime/stubRoutines.hpp"
38#include "runtime/synchronizer.hpp"
39#include "utilities/macros.hpp"
40
41#ifndef CC_INTERP
42
43#define __ _masm->
44
45// Global Register Names
46Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
47Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
48
49// Platform-dependent initialization
50void TemplateTable::pd_initialize() {
51  // No x86 specific initialization
52}
53
54// Address Computation: local variables
55static inline Address iaddress(int n) {
56  return Address(rlocals, Interpreter::local_offset_in_bytes(n));
57}
58
59static inline Address laddress(int n) {
60  return iaddress(n + 1);
61}
62
63#ifndef _LP64
64static inline Address haddress(int n) {
65  return iaddress(n + 0);
66}
67#endif
68
69static inline Address faddress(int n) {
70  return iaddress(n);
71}
72
73static inline Address daddress(int n) {
74  return laddress(n);
75}
76
77static inline Address aaddress(int n) {
78  return iaddress(n);
79}
80
81static inline Address iaddress(Register r) {
82  return Address(rlocals, r, Address::times_ptr);
83}
84
85static inline Address laddress(Register r) {
86  return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
87}
88
89#ifndef _LP64
90static inline Address haddress(Register r)       {
91  return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
92}
93#endif
94
95static inline Address faddress(Register r) {
96  return iaddress(r);
97}
98
99static inline Address daddress(Register r) {
100  return laddress(r);
101}
102
103static inline Address aaddress(Register r) {
104  return iaddress(r);
105}
106
107
108// expression stack
109// (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
110// data beyond the rsp which is potentially unsafe in an MT environment;
111// an interrupt may overwrite that data.)
112static inline Address at_rsp   () {
113  return Address(rsp, 0);
114}
115
116// At top of Java expression stack which may be different than esp().  It
117// isn't for category 1 objects.
118static inline Address at_tos   () {
119  return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
120}
121
122static inline Address at_tos_p1() {
123  return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
124}
125
126static inline Address at_tos_p2() {
127  return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
128}
129
130// Condition conversion
131static Assembler::Condition j_not(TemplateTable::Condition cc) {
132  switch (cc) {
133  case TemplateTable::equal        : return Assembler::notEqual;
134  case TemplateTable::not_equal    : return Assembler::equal;
135  case TemplateTable::less         : return Assembler::greaterEqual;
136  case TemplateTable::less_equal   : return Assembler::greater;
137  case TemplateTable::greater      : return Assembler::lessEqual;
138  case TemplateTable::greater_equal: return Assembler::less;
139  }
140  ShouldNotReachHere();
141  return Assembler::zero;
142}
143
144
145
146// Miscelaneous helper routines
147// Store an oop (or NULL) at the address described by obj.
148// If val == noreg this means store a NULL
149
150
151static void do_oop_store(InterpreterMacroAssembler* _masm,
152                         Address obj,
153                         Register val,
154                         BarrierSet::Name barrier,
155                         bool precise) {
156  assert(val == noreg || val == rax, "parameter is just for looks");
157  switch (barrier) {
158#if INCLUDE_ALL_GCS
159    case BarrierSet::G1SATBCTLogging:
160      {
161        // flatten object address if needed
162        // We do it regardless of precise because we need the registers
163        if (obj.index() == noreg && obj.disp() == 0) {
164          if (obj.base() != rdx) {
165            __ movptr(rdx, obj.base());
166          }
167        } else {
168          __ lea(rdx, obj);
169        }
170
171        Register rtmp    = LP64_ONLY(r8)         NOT_LP64(rsi);
172        Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
173
174        NOT_LP64(__ get_thread(rcx));
175        NOT_LP64(__ save_bcp());
176
177        __ g1_write_barrier_pre(rdx /* obj */,
178                                rbx /* pre_val */,
179                                rthread /* thread */,
180                                rtmp  /* tmp */,
181                                val != noreg /* tosca_live */,
182                                false /* expand_call */);
183        if (val == noreg) {
184          __ store_heap_oop_null(Address(rdx, 0));
185        } else {
186          // G1 barrier needs uncompressed oop for region cross check.
187          Register new_val = val;
188          if (UseCompressedOops) {
189            new_val = rbx;
190            __ movptr(new_val, val);
191          }
192          __ store_heap_oop(Address(rdx, 0), val);
193          __ g1_write_barrier_post(rdx /* store_adr */,
194                                   new_val /* new_val */,
195                                   rthread /* thread */,
196                                   rtmp /* tmp */,
197                                   rbx /* tmp2 */);
198        }
199        NOT_LP64( __ restore_bcp());
200      }
201      break;
202#endif // INCLUDE_ALL_GCS
203    case BarrierSet::CardTableForRS:
204    case BarrierSet::CardTableExtension:
205      {
206        if (val == noreg) {
207          __ store_heap_oop_null(obj);
208        } else {
209          __ store_heap_oop(obj, val);
210          // flatten object address if needed
211          if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
212            __ store_check(obj.base());
213          } else {
214            __ lea(rdx, obj);
215            __ store_check(rdx);
216          }
217        }
218      }
219      break;
220    case BarrierSet::ModRef:
221      if (val == noreg) {
222        __ store_heap_oop_null(obj);
223      } else {
224        __ store_heap_oop(obj, val);
225      }
226      break;
227    default      :
228      ShouldNotReachHere();
229
230  }
231}
232
233Address TemplateTable::at_bcp(int offset) {
234  assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
235  return Address(rbcp, offset);
236}
237
238
239void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
240                                   Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
241                                   int byte_no) {
242  if (!RewriteBytecodes)  return;
243  Label L_patch_done;
244
245  switch (bc) {
246  case Bytecodes::_fast_aputfield:
247  case Bytecodes::_fast_bputfield:
248  case Bytecodes::_fast_cputfield:
249  case Bytecodes::_fast_dputfield:
250  case Bytecodes::_fast_fputfield:
251  case Bytecodes::_fast_iputfield:
252  case Bytecodes::_fast_lputfield:
253  case Bytecodes::_fast_sputfield:
254    {
255      // We skip bytecode quickening for putfield instructions when
256      // the put_code written to the constant pool cache is zero.
257      // This is required so that every execution of this instruction
258      // calls out to InterpreterRuntime::resolve_get_put to do
259      // additional, required work.
260      assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
261      assert(load_bc_into_bc_reg, "we use bc_reg as temp");
262      __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
263      __ movl(bc_reg, bc);
264      __ cmpl(temp_reg, (int) 0);
265      __ jcc(Assembler::zero, L_patch_done);  // don't patch
266    }
267    break;
268  default:
269    assert(byte_no == -1, "sanity");
270    // the pair bytecodes have already done the load.
271    if (load_bc_into_bc_reg) {
272      __ movl(bc_reg, bc);
273    }
274  }
275
276  if (JvmtiExport::can_post_breakpoint()) {
277    Label L_fast_patch;
278    // if a breakpoint is present we can't rewrite the stream directly
279    __ movzbl(temp_reg, at_bcp(0));
280    __ cmpl(temp_reg, Bytecodes::_breakpoint);
281    __ jcc(Assembler::notEqual, L_fast_patch);
282    __ get_method(temp_reg);
283    // Let breakpoint table handling rewrite to quicker bytecode
284    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
285#ifndef ASSERT
286    __ jmpb(L_patch_done);
287#else
288    __ jmp(L_patch_done);
289#endif
290    __ bind(L_fast_patch);
291  }
292
293#ifdef ASSERT
294  Label L_okay;
295  __ load_unsigned_byte(temp_reg, at_bcp(0));
296  __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
297  __ jcc(Assembler::equal, L_okay);
298  __ cmpl(temp_reg, bc_reg);
299  __ jcc(Assembler::equal, L_okay);
300  __ stop("patching the wrong bytecode");
301  __ bind(L_okay);
302#endif
303
304  // patch bytecode
305  __ movb(at_bcp(0), bc_reg);
306  __ bind(L_patch_done);
307}
308// Individual instructions
309
310
311void TemplateTable::nop() {
312  transition(vtos, vtos);
313  // nothing to do
314}
315
316void TemplateTable::shouldnotreachhere() {
317  transition(vtos, vtos);
318  __ stop("shouldnotreachhere bytecode");
319}
320
321void TemplateTable::aconst_null() {
322  transition(vtos, atos);
323  __ xorl(rax, rax);
324}
325
326void TemplateTable::iconst(int value) {
327  transition(vtos, itos);
328  if (value == 0) {
329    __ xorl(rax, rax);
330  } else {
331    __ movl(rax, value);
332  }
333}
334
335void TemplateTable::lconst(int value) {
336  transition(vtos, ltos);
337  if (value == 0) {
338    __ xorl(rax, rax);
339  } else {
340    __ movl(rax, value);
341  }
342#ifndef _LP64
343  assert(value >= 0, "check this code");
344  __ xorptr(rdx, rdx);
345#endif
346}
347
348
349
350void TemplateTable::fconst(int value) {
351  transition(vtos, ftos);
352  if (UseSSE >= 1) {
353    static float one = 1.0f, two = 2.0f;
354    switch (value) {
355    case 0:
356      __ xorps(xmm0, xmm0);
357      break;
358    case 1:
359      __ movflt(xmm0, ExternalAddress((address) &one));
360      break;
361    case 2:
362      __ movflt(xmm0, ExternalAddress((address) &two));
363      break;
364    default:
365      ShouldNotReachHere();
366      break;
367    }
368  } else {
369#ifdef _LP64
370    ShouldNotReachHere();
371#else
372           if (value == 0) { __ fldz();
373    } else if (value == 1) { __ fld1();
374    } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
375    } else                 { ShouldNotReachHere();
376    }
377#endif // _LP64
378  }
379}
380
381void TemplateTable::dconst(int value) {
382  transition(vtos, dtos);
383  if (UseSSE >= 2) {
384    static double one = 1.0;
385    switch (value) {
386    case 0:
387      __ xorpd(xmm0, xmm0);
388      break;
389    case 1:
390      __ movdbl(xmm0, ExternalAddress((address) &one));
391      break;
392    default:
393      ShouldNotReachHere();
394      break;
395    }
396  } else {
397#ifdef _LP64
398    ShouldNotReachHere();
399#else
400           if (value == 0) { __ fldz();
401    } else if (value == 1) { __ fld1();
402    } else                 { ShouldNotReachHere();
403    }
404#endif
405  }
406}
407
408void TemplateTable::bipush() {
409  transition(vtos, itos);
410  __ load_signed_byte(rax, at_bcp(1));
411}
412
413void TemplateTable::sipush() {
414  transition(vtos, itos);
415  __ load_unsigned_short(rax, at_bcp(1));
416  __ bswapl(rax);
417  __ sarl(rax, 16);
418}
419
420void TemplateTable::ldc(bool wide) {
421  transition(vtos, vtos);
422  Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
423  Label call_ldc, notFloat, notClass, Done;
424
425  if (wide) {
426    __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
427  } else {
428    __ load_unsigned_byte(rbx, at_bcp(1));
429  }
430
431  __ get_cpool_and_tags(rcx, rax);
432  const int base_offset = ConstantPool::header_size() * wordSize;
433  const int tags_offset = Array<u1>::base_offset_in_bytes();
434
435  // get type
436  __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
437
438  // unresolved class - get the resolved class
439  __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
440  __ jccb(Assembler::equal, call_ldc);
441
442  // unresolved class in error state - call into runtime to throw the error
443  // from the first resolution attempt
444  __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
445  __ jccb(Assembler::equal, call_ldc);
446
447  // resolved class - need to call vm to get java mirror of the class
448  __ cmpl(rdx, JVM_CONSTANT_Class);
449  __ jcc(Assembler::notEqual, notClass);
450
451  __ bind(call_ldc);
452
453  __ movl(rarg, wide);
454  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
455
456  __ push(atos);
457  __ jmp(Done);
458
459  __ bind(notClass);
460  __ cmpl(rdx, JVM_CONSTANT_Float);
461  __ jccb(Assembler::notEqual, notFloat);
462
463  // ftos
464  __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
465  __ push(ftos);
466  __ jmp(Done);
467
468  __ bind(notFloat);
469#ifdef ASSERT
470  {
471    Label L;
472    __ cmpl(rdx, JVM_CONSTANT_Integer);
473    __ jcc(Assembler::equal, L);
474    // String and Object are rewritten to fast_aldc
475    __ stop("unexpected tag type in ldc");
476    __ bind(L);
477  }
478#endif
479  // itos JVM_CONSTANT_Integer only
480  __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
481  __ push(itos);
482  __ bind(Done);
483}
484
485// Fast path for caching oop constants.
486void TemplateTable::fast_aldc(bool wide) {
487  transition(vtos, atos);
488
489  Register result = rax;
490  Register tmp = rdx;
491  int index_size = wide ? sizeof(u2) : sizeof(u1);
492
493  Label resolved;
494
495  // We are resolved if the resolved reference cache entry contains a
496  // non-null object (String, MethodType, etc.)
497  assert_different_registers(result, tmp);
498  __ get_cache_index_at_bcp(tmp, 1, index_size);
499  __ load_resolved_reference_at_index(result, tmp);
500  __ testl(result, result);
501  __ jcc(Assembler::notZero, resolved);
502
503  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
504
505  // first time invocation - must resolve first
506  __ movl(tmp, (int)bytecode());
507  __ call_VM(result, entry, tmp);
508
509  __ bind(resolved);
510
511  if (VerifyOops) {
512    __ verify_oop(result);
513  }
514}
515
516void TemplateTable::ldc2_w() {
517  transition(vtos, vtos);
518  Label Long, Done;
519  __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
520
521  __ get_cpool_and_tags(rcx, rax);
522  const int base_offset = ConstantPool::header_size() * wordSize;
523  const int tags_offset = Array<u1>::base_offset_in_bytes();
524
525  // get type
526  __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
527          JVM_CONSTANT_Double);
528  __ jccb(Assembler::notEqual, Long);
529
530  // dtos
531  __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
532  __ push(dtos);
533
534  __ jmpb(Done);
535  __ bind(Long);
536
537  // ltos
538  __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
539  NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
540  __ push(ltos);
541
542  __ bind(Done);
543}
544
545void TemplateTable::locals_index(Register reg, int offset) {
546  __ load_unsigned_byte(reg, at_bcp(offset));
547  __ negptr(reg);
548}
549
550void TemplateTable::iload() {
551  iload_internal();
552}
553
554void TemplateTable::nofast_iload() {
555  iload_internal(may_not_rewrite);
556}
557
558void TemplateTable::iload_internal(RewriteControl rc) {
559  transition(vtos, itos);
560  if (RewriteFrequentPairs && rc == may_rewrite) {
561    Label rewrite, done;
562    const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
563    LP64_ONLY(assert(rbx != bc, "register damaged"));
564
565    // get next byte
566    __ load_unsigned_byte(rbx,
567                          at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
568    // if _iload, wait to rewrite to iload2.  We only want to rewrite the
569    // last two iloads in a pair.  Comparing against fast_iload means that
570    // the next bytecode is neither an iload or a caload, and therefore
571    // an iload pair.
572    __ cmpl(rbx, Bytecodes::_iload);
573    __ jcc(Assembler::equal, done);
574
575    __ cmpl(rbx, Bytecodes::_fast_iload);
576    __ movl(bc, Bytecodes::_fast_iload2);
577
578    __ jccb(Assembler::equal, rewrite);
579
580    // if _caload, rewrite to fast_icaload
581    __ cmpl(rbx, Bytecodes::_caload);
582    __ movl(bc, Bytecodes::_fast_icaload);
583    __ jccb(Assembler::equal, rewrite);
584
585    // rewrite so iload doesn't check again.
586    __ movl(bc, Bytecodes::_fast_iload);
587
588    // rewrite
589    // bc: fast bytecode
590    __ bind(rewrite);
591    patch_bytecode(Bytecodes::_iload, bc, rbx, false);
592    __ bind(done);
593  }
594
595  // Get the local value into tos
596  locals_index(rbx);
597  __ movl(rax, iaddress(rbx));
598}
599
600void TemplateTable::fast_iload2() {
601  transition(vtos, itos);
602  locals_index(rbx);
603  __ movl(rax, iaddress(rbx));
604  __ push(itos);
605  locals_index(rbx, 3);
606  __ movl(rax, iaddress(rbx));
607}
608
609void TemplateTable::fast_iload() {
610  transition(vtos, itos);
611  locals_index(rbx);
612  __ movl(rax, iaddress(rbx));
613}
614
615void TemplateTable::lload() {
616  transition(vtos, ltos);
617  locals_index(rbx);
618  __ movptr(rax, laddress(rbx));
619  NOT_LP64(__ movl(rdx, haddress(rbx)));
620}
621
622void TemplateTable::fload() {
623  transition(vtos, ftos);
624  locals_index(rbx);
625  __ load_float(faddress(rbx));
626}
627
628void TemplateTable::dload() {
629  transition(vtos, dtos);
630  locals_index(rbx);
631  __ load_double(daddress(rbx));
632}
633
634void TemplateTable::aload() {
635  transition(vtos, atos);
636  locals_index(rbx);
637  __ movptr(rax, aaddress(rbx));
638}
639
640void TemplateTable::locals_index_wide(Register reg) {
641  __ load_unsigned_short(reg, at_bcp(2));
642  __ bswapl(reg);
643  __ shrl(reg, 16);
644  __ negptr(reg);
645}
646
647void TemplateTable::wide_iload() {
648  transition(vtos, itos);
649  locals_index_wide(rbx);
650  __ movl(rax, iaddress(rbx));
651}
652
653void TemplateTable::wide_lload() {
654  transition(vtos, ltos);
655  locals_index_wide(rbx);
656  __ movptr(rax, laddress(rbx));
657  NOT_LP64(__ movl(rdx, haddress(rbx)));
658}
659
660void TemplateTable::wide_fload() {
661  transition(vtos, ftos);
662  locals_index_wide(rbx);
663  __ load_float(faddress(rbx));
664}
665
666void TemplateTable::wide_dload() {
667  transition(vtos, dtos);
668  locals_index_wide(rbx);
669  __ load_double(daddress(rbx));
670}
671
672void TemplateTable::wide_aload() {
673  transition(vtos, atos);
674  locals_index_wide(rbx);
675  __ movptr(rax, aaddress(rbx));
676}
677
678void TemplateTable::index_check(Register array, Register index) {
679  // Pop ptr into array
680  __ pop_ptr(array);
681  index_check_without_pop(array, index);
682}
683
684void TemplateTable::index_check_without_pop(Register array, Register index) {
685  // destroys rbx
686  // check array
687  __ null_check(array, arrayOopDesc::length_offset_in_bytes());
688  // sign extend index for use by indexed load
689  __ movl2ptr(index, index);
690  // check index
691  __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
692  if (index != rbx) {
693    // ??? convention: move aberrant index into rbx for exception message
694    assert(rbx != array, "different registers");
695    __ movl(rbx, index);
696  }
697  __ jump_cc(Assembler::aboveEqual,
698             ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
699}
700
701
702void TemplateTable::iaload() {
703  transition(itos, itos);
704  // rax: index
705  // rdx: array
706  index_check(rdx, rax); // kills rbx
707  __ movl(rax, Address(rdx, rax,
708                       Address::times_4,
709                       arrayOopDesc::base_offset_in_bytes(T_INT)));
710}
711
712void TemplateTable::laload() {
713  transition(itos, ltos);
714  // rax: index
715  // rdx: array
716  index_check(rdx, rax); // kills rbx
717  NOT_LP64(__ mov(rbx, rax));
718  // rbx,: index
719  __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
720  NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
721}
722
723
724
725void TemplateTable::faload() {
726  transition(itos, ftos);
727  // rax: index
728  // rdx: array
729  index_check(rdx, rax); // kills rbx
730  __ load_float(Address(rdx, rax,
731                        Address::times_4,
732                        arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
733}
734
735void TemplateTable::daload() {
736  transition(itos, dtos);
737  // rax: index
738  // rdx: array
739  index_check(rdx, rax); // kills rbx
740  __ load_double(Address(rdx, rax,
741                         Address::times_8,
742                         arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
743}
744
745void TemplateTable::aaload() {
746  transition(itos, atos);
747  // rax: index
748  // rdx: array
749  index_check(rdx, rax); // kills rbx
750  __ load_heap_oop(rax, Address(rdx, rax,
751                                UseCompressedOops ? Address::times_4 : Address::times_ptr,
752                                arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
753}
754
755void TemplateTable::baload() {
756  transition(itos, itos);
757  // rax: index
758  // rdx: array
759  index_check(rdx, rax); // kills rbx
760  __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
761}
762
763void TemplateTable::caload() {
764  transition(itos, itos);
765  // rax: index
766  // rdx: array
767  index_check(rdx, rax); // kills rbx
768  __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
769}
770
771// iload followed by caload frequent pair
772void TemplateTable::fast_icaload() {
773  transition(vtos, itos);
774  // load index out of locals
775  locals_index(rbx);
776  __ movl(rax, iaddress(rbx));
777
778  // rax: index
779  // rdx: array
780  index_check(rdx, rax); // kills rbx
781  __ load_unsigned_short(rax,
782                         Address(rdx, rax,
783                                 Address::times_2,
784                                 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
785}
786
787
788void TemplateTable::saload() {
789  transition(itos, itos);
790  // rax: index
791  // rdx: array
792  index_check(rdx, rax); // kills rbx
793  __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
794}
795
796void TemplateTable::iload(int n) {
797  transition(vtos, itos);
798  __ movl(rax, iaddress(n));
799}
800
801void TemplateTable::lload(int n) {
802  transition(vtos, ltos);
803  __ movptr(rax, laddress(n));
804  NOT_LP64(__ movptr(rdx, haddress(n)));
805}
806
807void TemplateTable::fload(int n) {
808  transition(vtos, ftos);
809  __ load_float(faddress(n));
810}
811
812void TemplateTable::dload(int n) {
813  transition(vtos, dtos);
814  __ load_double(daddress(n));
815}
816
817void TemplateTable::aload(int n) {
818  transition(vtos, atos);
819  __ movptr(rax, aaddress(n));
820}
821
822void TemplateTable::aload_0() {
823  aload_0_internal();
824}
825
826void TemplateTable::nofast_aload_0() {
827  aload_0_internal(may_not_rewrite);
828}
829
830void TemplateTable::aload_0_internal(RewriteControl rc) {
831  transition(vtos, atos);
832  // According to bytecode histograms, the pairs:
833  //
834  // _aload_0, _fast_igetfield
835  // _aload_0, _fast_agetfield
836  // _aload_0, _fast_fgetfield
837  //
838  // occur frequently. If RewriteFrequentPairs is set, the (slow)
839  // _aload_0 bytecode checks if the next bytecode is either
840  // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
841  // rewrites the current bytecode into a pair bytecode; otherwise it
842  // rewrites the current bytecode into _fast_aload_0 that doesn't do
843  // the pair check anymore.
844  //
845  // Note: If the next bytecode is _getfield, the rewrite must be
846  //       delayed, otherwise we may miss an opportunity for a pair.
847  //
848  // Also rewrite frequent pairs
849  //   aload_0, aload_1
850  //   aload_0, iload_1
851  // These bytecodes with a small amount of code are most profitable
852  // to rewrite
853  if (RewriteFrequentPairs && rc == may_rewrite) {
854    Label rewrite, done;
855
856    const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
857    LP64_ONLY(assert(rbx != bc, "register damaged"));
858
859    // get next byte
860    __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
861
862    // do actual aload_0
863    aload(0);
864
865    // if _getfield then wait with rewrite
866    __ cmpl(rbx, Bytecodes::_getfield);
867    __ jcc(Assembler::equal, done);
868
869    // if _igetfield then reqrite to _fast_iaccess_0
870    assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
871    __ cmpl(rbx, Bytecodes::_fast_igetfield);
872    __ movl(bc, Bytecodes::_fast_iaccess_0);
873    __ jccb(Assembler::equal, rewrite);
874
875    // if _agetfield then reqrite to _fast_aaccess_0
876    assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
877    __ cmpl(rbx, Bytecodes::_fast_agetfield);
878    __ movl(bc, Bytecodes::_fast_aaccess_0);
879    __ jccb(Assembler::equal, rewrite);
880
881    // if _fgetfield then reqrite to _fast_faccess_0
882    assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
883    __ cmpl(rbx, Bytecodes::_fast_fgetfield);
884    __ movl(bc, Bytecodes::_fast_faccess_0);
885    __ jccb(Assembler::equal, rewrite);
886
887    // else rewrite to _fast_aload0
888    assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
889    __ movl(bc, Bytecodes::_fast_aload_0);
890
891    // rewrite
892    // bc: fast bytecode
893    __ bind(rewrite);
894    patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
895
896    __ bind(done);
897  } else {
898    aload(0);
899  }
900}
901
902void TemplateTable::istore() {
903  transition(itos, vtos);
904  locals_index(rbx);
905  __ movl(iaddress(rbx), rax);
906}
907
908
909void TemplateTable::lstore() {
910  transition(ltos, vtos);
911  locals_index(rbx);
912  __ movptr(laddress(rbx), rax);
913  NOT_LP64(__ movptr(haddress(rbx), rdx));
914}
915
916void TemplateTable::fstore() {
917  transition(ftos, vtos);
918  locals_index(rbx);
919  __ store_float(faddress(rbx));
920}
921
922void TemplateTable::dstore() {
923  transition(dtos, vtos);
924  locals_index(rbx);
925  __ store_double(daddress(rbx));
926}
927
928void TemplateTable::astore() {
929  transition(vtos, vtos);
930  __ pop_ptr(rax);
931  locals_index(rbx);
932  __ movptr(aaddress(rbx), rax);
933}
934
935void TemplateTable::wide_istore() {
936  transition(vtos, vtos);
937  __ pop_i();
938  locals_index_wide(rbx);
939  __ movl(iaddress(rbx), rax);
940}
941
942void TemplateTable::wide_lstore() {
943  transition(vtos, vtos);
944  NOT_LP64(__ pop_l(rax, rdx));
945  LP64_ONLY(__ pop_l());
946  locals_index_wide(rbx);
947  __ movptr(laddress(rbx), rax);
948  NOT_LP64(__ movl(haddress(rbx), rdx));
949}
950
951void TemplateTable::wide_fstore() {
952#ifdef _LP64
953  transition(vtos, vtos);
954  __ pop_f(xmm0);
955  locals_index_wide(rbx);
956  __ movflt(faddress(rbx), xmm0);
957#else
958  wide_istore();
959#endif
960}
961
962void TemplateTable::wide_dstore() {
963#ifdef _LP64
964  transition(vtos, vtos);
965  __ pop_d(xmm0);
966  locals_index_wide(rbx);
967  __ movdbl(daddress(rbx), xmm0);
968#else
969  wide_lstore();
970#endif
971}
972
973void TemplateTable::wide_astore() {
974  transition(vtos, vtos);
975  __ pop_ptr(rax);
976  locals_index_wide(rbx);
977  __ movptr(aaddress(rbx), rax);
978}
979
980void TemplateTable::iastore() {
981  transition(itos, vtos);
982  __ pop_i(rbx);
983  // rax: value
984  // rbx: index
985  // rdx: array
986  index_check(rdx, rbx); // prefer index in rbx
987  __ movl(Address(rdx, rbx,
988                  Address::times_4,
989                  arrayOopDesc::base_offset_in_bytes(T_INT)),
990          rax);
991}
992
993void TemplateTable::lastore() {
994  transition(ltos, vtos);
995  __ pop_i(rbx);
996  // rax,: low(value)
997  // rcx: array
998  // rdx: high(value)
999  index_check(rcx, rbx);  // prefer index in rbx,
1000  // rbx,: index
1001  __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
1002  NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
1003}
1004
1005
1006void TemplateTable::fastore() {
1007  transition(ftos, vtos);
1008  __ pop_i(rbx);
1009  // value is in UseSSE >= 1 ? xmm0 : ST(0)
1010  // rbx:  index
1011  // rdx:  array
1012  index_check(rdx, rbx); // prefer index in rbx
1013  __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
1014}
1015
1016void TemplateTable::dastore() {
1017  transition(dtos, vtos);
1018  __ pop_i(rbx);
1019  // value is in UseSSE >= 2 ? xmm0 : ST(0)
1020  // rbx:  index
1021  // rdx:  array
1022  index_check(rdx, rbx); // prefer index in rbx
1023  __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
1024}
1025
1026void TemplateTable::aastore() {
1027  Label is_null, ok_is_subtype, done;
1028  transition(vtos, vtos);
1029  // stack: ..., array, index, value
1030  __ movptr(rax, at_tos());    // value
1031  __ movl(rcx, at_tos_p1()); // index
1032  __ movptr(rdx, at_tos_p2()); // array
1033
1034  Address element_address(rdx, rcx,
1035                          UseCompressedOops? Address::times_4 : Address::times_ptr,
1036                          arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1037
1038  index_check_without_pop(rdx, rcx);     // kills rbx
1039  __ testptr(rax, rax);
1040  __ jcc(Assembler::zero, is_null);
1041
1042  // Move subklass into rbx
1043  __ load_klass(rbx, rax);
1044  // Move superklass into rax
1045  __ load_klass(rax, rdx);
1046  __ movptr(rax, Address(rax,
1047                         ObjArrayKlass::element_klass_offset()));
1048  // Compress array + index*oopSize + 12 into a single register.  Frees rcx.
1049  __ lea(rdx, element_address);
1050
1051  // Generate subtype check.  Blows rcx, rdi
1052  // Superklass in rax.  Subklass in rbx.
1053  __ gen_subtype_check(rbx, ok_is_subtype);
1054
1055  // Come here on failure
1056  // object is at TOS
1057  __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1058
1059  // Come here on success
1060  __ bind(ok_is_subtype);
1061
1062  // Get the value we will store
1063  __ movptr(rax, at_tos());
1064  // Now store using the appropriate barrier
1065  do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
1066  __ jmp(done);
1067
1068  // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
1069  __ bind(is_null);
1070  __ profile_null_seen(rbx);
1071
1072  // Store a NULL
1073  do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1074
1075  // Pop stack arguments
1076  __ bind(done);
1077  __ addptr(rsp, 3 * Interpreter::stackElementSize);
1078}
1079
1080void TemplateTable::bastore() {
1081  transition(itos, vtos);
1082  __ pop_i(rbx);
1083  // rax: value
1084  // rbx: index
1085  // rdx: array
1086  index_check(rdx, rbx); // prefer index in rbx
1087  __ movb(Address(rdx, rbx,
1088                  Address::times_1,
1089                  arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1090          rax);
1091}
1092
1093void TemplateTable::castore() {
1094  transition(itos, vtos);
1095  __ pop_i(rbx);
1096  // rax: value
1097  // rbx: index
1098  // rdx: array
1099  index_check(rdx, rbx);  // prefer index in rbx
1100  __ movw(Address(rdx, rbx,
1101                  Address::times_2,
1102                  arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1103          rax);
1104}
1105
1106
1107void TemplateTable::sastore() {
1108  castore();
1109}
1110
1111void TemplateTable::istore(int n) {
1112  transition(itos, vtos);
1113  __ movl(iaddress(n), rax);
1114}
1115
1116void TemplateTable::lstore(int n) {
1117  transition(ltos, vtos);
1118  __ movptr(laddress(n), rax);
1119  NOT_LP64(__ movptr(haddress(n), rdx));
1120}
1121
1122void TemplateTable::fstore(int n) {
1123  transition(ftos, vtos);
1124  __ store_float(faddress(n));
1125}
1126
1127void TemplateTable::dstore(int n) {
1128  transition(dtos, vtos);
1129  __ store_double(daddress(n));
1130}
1131
1132
1133void TemplateTable::astore(int n) {
1134  transition(vtos, vtos);
1135  __ pop_ptr(rax);
1136  __ movptr(aaddress(n), rax);
1137}
1138
1139void TemplateTable::pop() {
1140  transition(vtos, vtos);
1141  __ addptr(rsp, Interpreter::stackElementSize);
1142}
1143
1144void TemplateTable::pop2() {
1145  transition(vtos, vtos);
1146  __ addptr(rsp, 2 * Interpreter::stackElementSize);
1147}
1148
1149
1150void TemplateTable::dup() {
1151  transition(vtos, vtos);
1152  __ load_ptr(0, rax);
1153  __ push_ptr(rax);
1154  // stack: ..., a, a
1155}
1156
1157void TemplateTable::dup_x1() {
1158  transition(vtos, vtos);
1159  // stack: ..., a, b
1160  __ load_ptr( 0, rax);  // load b
1161  __ load_ptr( 1, rcx);  // load a
1162  __ store_ptr(1, rax);  // store b
1163  __ store_ptr(0, rcx);  // store a
1164  __ push_ptr(rax);      // push b
1165  // stack: ..., b, a, b
1166}
1167
1168void TemplateTable::dup_x2() {
1169  transition(vtos, vtos);
1170  // stack: ..., a, b, c
1171  __ load_ptr( 0, rax);  // load c
1172  __ load_ptr( 2, rcx);  // load a
1173  __ store_ptr(2, rax);  // store c in a
1174  __ push_ptr(rax);      // push c
1175  // stack: ..., c, b, c, c
1176  __ load_ptr( 2, rax);  // load b
1177  __ store_ptr(2, rcx);  // store a in b
1178  // stack: ..., c, a, c, c
1179  __ store_ptr(1, rax);  // store b in c
1180  // stack: ..., c, a, b, c
1181}
1182
1183void TemplateTable::dup2() {
1184  transition(vtos, vtos);
1185  // stack: ..., a, b
1186  __ load_ptr(1, rax);  // load a
1187  __ push_ptr(rax);     // push a
1188  __ load_ptr(1, rax);  // load b
1189  __ push_ptr(rax);     // push b
1190  // stack: ..., a, b, a, b
1191}
1192
1193
1194void TemplateTable::dup2_x1() {
1195  transition(vtos, vtos);
1196  // stack: ..., a, b, c
1197  __ load_ptr( 0, rcx);  // load c
1198  __ load_ptr( 1, rax);  // load b
1199  __ push_ptr(rax);      // push b
1200  __ push_ptr(rcx);      // push c
1201  // stack: ..., a, b, c, b, c
1202  __ store_ptr(3, rcx);  // store c in b
1203  // stack: ..., a, c, c, b, c
1204  __ load_ptr( 4, rcx);  // load a
1205  __ store_ptr(2, rcx);  // store a in 2nd c
1206  // stack: ..., a, c, a, b, c
1207  __ store_ptr(4, rax);  // store b in a
1208  // stack: ..., b, c, a, b, c
1209}
1210
1211void TemplateTable::dup2_x2() {
1212  transition(vtos, vtos);
1213  // stack: ..., a, b, c, d
1214  __ load_ptr( 0, rcx);  // load d
1215  __ load_ptr( 1, rax);  // load c
1216  __ push_ptr(rax);      // push c
1217  __ push_ptr(rcx);      // push d
1218  // stack: ..., a, b, c, d, c, d
1219  __ load_ptr( 4, rax);  // load b
1220  __ store_ptr(2, rax);  // store b in d
1221  __ store_ptr(4, rcx);  // store d in b
1222  // stack: ..., a, d, c, b, c, d
1223  __ load_ptr( 5, rcx);  // load a
1224  __ load_ptr( 3, rax);  // load c
1225  __ store_ptr(3, rcx);  // store a in c
1226  __ store_ptr(5, rax);  // store c in a
1227  // stack: ..., c, d, a, b, c, d
1228}
1229
1230void TemplateTable::swap() {
1231  transition(vtos, vtos);
1232  // stack: ..., a, b
1233  __ load_ptr( 1, rcx);  // load a
1234  __ load_ptr( 0, rax);  // load b
1235  __ store_ptr(0, rcx);  // store a in b
1236  __ store_ptr(1, rax);  // store b in a
1237  // stack: ..., b, a
1238}
1239
1240void TemplateTable::iop2(Operation op) {
1241  transition(itos, itos);
1242  switch (op) {
1243  case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1244  case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1245  case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1246  case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1247  case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1248  case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1249  case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1250  case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1251  case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1252  default   : ShouldNotReachHere();
1253  }
1254}
1255
1256void TemplateTable::lop2(Operation op) {
1257  transition(ltos, ltos);
1258#ifdef _LP64
1259  switch (op) {
1260  case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1261  case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1262  case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1263  case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1264  case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1265  default   : ShouldNotReachHere();
1266  }
1267#else
1268  __ pop_l(rbx, rcx);
1269  switch (op) {
1270    case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1271    case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1272                __ mov (rax, rbx); __ mov (rdx, rcx); break;
1273    case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1274    case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1275    case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1276    default   : ShouldNotReachHere();
1277  }
1278#endif
1279}
1280
1281void TemplateTable::idiv() {
1282  transition(itos, itos);
1283  __ movl(rcx, rax);
1284  __ pop_i(rax);
1285  // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1286  //       they are not equal, one could do a normal division (no correction
1287  //       needed), which may speed up this implementation for the common case.
1288  //       (see also JVM spec., p.243 & p.271)
1289  __ corrected_idivl(rcx);
1290}
1291
1292void TemplateTable::irem() {
1293  transition(itos, itos);
1294  __ movl(rcx, rax);
1295  __ pop_i(rax);
1296  // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1297  //       they are not equal, one could do a normal division (no correction
1298  //       needed), which may speed up this implementation for the common case.
1299  //       (see also JVM spec., p.243 & p.271)
1300  __ corrected_idivl(rcx);
1301  __ movl(rax, rdx);
1302}
1303
1304void TemplateTable::lmul() {
1305  transition(ltos, ltos);
1306#ifdef _LP64
1307  __ pop_l(rdx);
1308  __ imulq(rax, rdx);
1309#else
1310  __ pop_l(rbx, rcx);
1311  __ push(rcx); __ push(rbx);
1312  __ push(rdx); __ push(rax);
1313  __ lmul(2 * wordSize, 0);
1314  __ addptr(rsp, 4 * wordSize);  // take off temporaries
1315#endif
1316}
1317
1318void TemplateTable::ldiv() {
1319  transition(ltos, ltos);
1320#ifdef _LP64
1321  __ mov(rcx, rax);
1322  __ pop_l(rax);
1323  // generate explicit div0 check
1324  __ testq(rcx, rcx);
1325  __ jump_cc(Assembler::zero,
1326             ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1327  // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1328  //       they are not equal, one could do a normal division (no correction
1329  //       needed), which may speed up this implementation for the common case.
1330  //       (see also JVM spec., p.243 & p.271)
1331  __ corrected_idivq(rcx); // kills rbx
1332#else
1333  __ pop_l(rbx, rcx);
1334  __ push(rcx); __ push(rbx);
1335  __ push(rdx); __ push(rax);
1336  // check if y = 0
1337  __ orl(rax, rdx);
1338  __ jump_cc(Assembler::zero,
1339             ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1340  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1341  __ addptr(rsp, 4 * wordSize);  // take off temporaries
1342#endif
1343}
1344
1345void TemplateTable::lrem() {
1346  transition(ltos, ltos);
1347#ifdef _LP64
1348  __ mov(rcx, rax);
1349  __ pop_l(rax);
1350  __ testq(rcx, rcx);
1351  __ jump_cc(Assembler::zero,
1352             ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1353  // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1354  //       they are not equal, one could do a normal division (no correction
1355  //       needed), which may speed up this implementation for the common case.
1356  //       (see also JVM spec., p.243 & p.271)
1357  __ corrected_idivq(rcx); // kills rbx
1358  __ mov(rax, rdx);
1359#else
1360  __ pop_l(rbx, rcx);
1361  __ push(rcx); __ push(rbx);
1362  __ push(rdx); __ push(rax);
1363  // check if y = 0
1364  __ orl(rax, rdx);
1365  __ jump_cc(Assembler::zero,
1366             ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1367  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1368  __ addptr(rsp, 4 * wordSize);
1369#endif
1370}
1371
1372void TemplateTable::lshl() {
1373  transition(itos, ltos);
1374  __ movl(rcx, rax);                             // get shift count
1375  #ifdef _LP64
1376  __ pop_l(rax);                                 // get shift value
1377  __ shlq(rax);
1378#else
1379  __ pop_l(rax, rdx);                            // get shift value
1380  __ lshl(rdx, rax);
1381#endif
1382}
1383
1384void TemplateTable::lshr() {
1385#ifdef _LP64
1386  transition(itos, ltos);
1387  __ movl(rcx, rax);                             // get shift count
1388  __ pop_l(rax);                                 // get shift value
1389  __ sarq(rax);
1390#else
1391  transition(itos, ltos);
1392  __ mov(rcx, rax);                              // get shift count
1393  __ pop_l(rax, rdx);                            // get shift value
1394  __ lshr(rdx, rax, true);
1395#endif
1396}
1397
1398void TemplateTable::lushr() {
1399  transition(itos, ltos);
1400#ifdef _LP64
1401  __ movl(rcx, rax);                             // get shift count
1402  __ pop_l(rax);                                 // get shift value
1403  __ shrq(rax);
1404#else
1405  __ mov(rcx, rax);                              // get shift count
1406  __ pop_l(rax, rdx);                            // get shift value
1407  __ lshr(rdx, rax);
1408#endif
1409}
1410
1411void TemplateTable::fop2(Operation op) {
1412  transition(ftos, ftos);
1413
1414  if (UseSSE >= 1) {
1415    switch (op) {
1416    case add:
1417      __ addss(xmm0, at_rsp());
1418      __ addptr(rsp, Interpreter::stackElementSize);
1419      break;
1420    case sub:
1421      __ movflt(xmm1, xmm0);
1422      __ pop_f(xmm0);
1423      __ subss(xmm0, xmm1);
1424      break;
1425    case mul:
1426      __ mulss(xmm0, at_rsp());
1427      __ addptr(rsp, Interpreter::stackElementSize);
1428      break;
1429    case div:
1430      __ movflt(xmm1, xmm0);
1431      __ pop_f(xmm0);
1432      __ divss(xmm0, xmm1);
1433      break;
1434    case rem:
1435      // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1436      // modulo operation. The frem method calls the function
1437      // double fmod(double x, double y) in math.h. The documentation of fmod states:
1438      // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1439      // (signalling or quiet) is returned.
1440      //
1441      // On x86_32 platforms the FPU is used to perform the modulo operation. The
1442      // reason is that on 32-bit Windows the sign of modulo operations diverges from
1443      // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1444      // The fprem instruction used on x86_32 is functionally equivalent to
1445      // SharedRuntime::frem in that it returns a NaN.
1446#ifdef _LP64
1447      __ movflt(xmm1, xmm0);
1448      __ pop_f(xmm0);
1449      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1450#else
1451      __ push_f(xmm0);
1452      __ pop_f();
1453      __ fld_s(at_rsp());
1454      __ fremr(rax);
1455      __ f2ieee();
1456      __ pop(rax);  // pop second operand off the stack
1457      __ push_f();
1458      __ pop_f(xmm0);
1459#endif
1460      break;
1461    default:
1462      ShouldNotReachHere();
1463      break;
1464    }
1465  } else {
1466#ifdef _LP64
1467    ShouldNotReachHere();
1468#else
1469    switch (op) {
1470    case add: __ fadd_s (at_rsp());                break;
1471    case sub: __ fsubr_s(at_rsp());                break;
1472    case mul: __ fmul_s (at_rsp());                break;
1473    case div: __ fdivr_s(at_rsp());                break;
1474    case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1475    default : ShouldNotReachHere();
1476    }
1477    __ f2ieee();
1478    __ pop(rax);  // pop second operand off the stack
1479#endif // _LP64
1480  }
1481}
1482
1483void TemplateTable::dop2(Operation op) {
1484  transition(dtos, dtos);
1485  if (UseSSE >= 2) {
1486    switch (op) {
1487    case add:
1488      __ addsd(xmm0, at_rsp());
1489      __ addptr(rsp, 2 * Interpreter::stackElementSize);
1490      break;
1491    case sub:
1492      __ movdbl(xmm1, xmm0);
1493      __ pop_d(xmm0);
1494      __ subsd(xmm0, xmm1);
1495      break;
1496    case mul:
1497      __ mulsd(xmm0, at_rsp());
1498      __ addptr(rsp, 2 * Interpreter::stackElementSize);
1499      break;
1500    case div:
1501      __ movdbl(xmm1, xmm0);
1502      __ pop_d(xmm0);
1503      __ divsd(xmm0, xmm1);
1504      break;
1505    case rem:
1506      // Similar to fop2(), the modulo operation is performed using the
1507      // SharedRuntime::drem method (on x86_64 platforms) or using the
1508      // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1509#ifdef _LP64
1510      __ movdbl(xmm1, xmm0);
1511      __ pop_d(xmm0);
1512      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1513#else
1514      __ push_d(xmm0);
1515      __ pop_d();
1516      __ fld_d(at_rsp());
1517      __ fremr(rax);
1518      __ d2ieee();
1519      __ pop(rax);
1520      __ pop(rdx);
1521      __ push_d();
1522      __ pop_d(xmm0);
1523#endif
1524      break;
1525    default:
1526      ShouldNotReachHere();
1527      break;
1528    }
1529  } else {
1530#ifdef _LP64
1531    ShouldNotReachHere();
1532#else
1533    switch (op) {
1534    case add: __ fadd_d (at_rsp());                break;
1535    case sub: __ fsubr_d(at_rsp());                break;
1536    case mul: {
1537      Label L_strict;
1538      Label L_join;
1539      const Address access_flags      (rcx, Method::access_flags_offset());
1540      __ get_method(rcx);
1541      __ movl(rcx, access_flags);
1542      __ testl(rcx, JVM_ACC_STRICT);
1543      __ jccb(Assembler::notZero, L_strict);
1544      __ fmul_d (at_rsp());
1545      __ jmpb(L_join);
1546      __ bind(L_strict);
1547      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1548      __ fmulp();
1549      __ fmul_d (at_rsp());
1550      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1551      __ fmulp();
1552      __ bind(L_join);
1553      break;
1554    }
1555    case div: {
1556      Label L_strict;
1557      Label L_join;
1558      const Address access_flags      (rcx, Method::access_flags_offset());
1559      __ get_method(rcx);
1560      __ movl(rcx, access_flags);
1561      __ testl(rcx, JVM_ACC_STRICT);
1562      __ jccb(Assembler::notZero, L_strict);
1563      __ fdivr_d(at_rsp());
1564      __ jmp(L_join);
1565      __ bind(L_strict);
1566      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1567      __ fmul_d (at_rsp());
1568      __ fdivrp();
1569      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1570      __ fmulp();
1571      __ bind(L_join);
1572      break;
1573    }
1574    case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1575    default : ShouldNotReachHere();
1576    }
1577    __ d2ieee();
1578    // Pop double precision number from rsp.
1579    __ pop(rax);
1580    __ pop(rdx);
1581#endif
1582  }
1583}
1584
1585void TemplateTable::ineg() {
1586  transition(itos, itos);
1587  __ negl(rax);
1588}
1589
1590void TemplateTable::lneg() {
1591  transition(ltos, ltos);
1592  LP64_ONLY(__ negq(rax));
1593  NOT_LP64(__ lneg(rdx, rax));
1594}
1595
1596// Note: 'double' and 'long long' have 32-bits alignment on x86.
1597static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1598  // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1599  // of 128-bits operands for SSE instructions.
1600  jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1601  // Store the value to a 128-bits operand.
1602  operand[0] = lo;
1603  operand[1] = hi;
1604  return operand;
1605}
1606
1607// Buffer for 128-bits masks used by SSE instructions.
1608static jlong float_signflip_pool[2*2];
1609static jlong double_signflip_pool[2*2];
1610
1611void TemplateTable::fneg() {
1612  transition(ftos, ftos);
1613  if (UseSSE >= 1) {
1614    static jlong *float_signflip  = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1615    __ xorps(xmm0, ExternalAddress((address) float_signflip));
1616  } else {
1617    LP64_ONLY(ShouldNotReachHere());
1618    NOT_LP64(__ fchs());
1619  }
1620}
1621
1622void TemplateTable::dneg() {
1623  transition(dtos, dtos);
1624  if (UseSSE >= 2) {
1625    static jlong *double_signflip  = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1626    __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1627  } else {
1628#ifdef _LP64
1629    ShouldNotReachHere();
1630#else
1631    __ fchs();
1632#endif
1633  }
1634}
1635
1636void TemplateTable::iinc() {
1637  transition(vtos, vtos);
1638  __ load_signed_byte(rdx, at_bcp(2)); // get constant
1639  locals_index(rbx);
1640  __ addl(iaddress(rbx), rdx);
1641}
1642
1643void TemplateTable::wide_iinc() {
1644  transition(vtos, vtos);
1645  __ movl(rdx, at_bcp(4)); // get constant
1646  locals_index_wide(rbx);
1647  __ bswapl(rdx); // swap bytes & sign-extend constant
1648  __ sarl(rdx, 16);
1649  __ addl(iaddress(rbx), rdx);
1650  // Note: should probably use only one movl to get both
1651  //       the index and the constant -> fix this
1652}
1653
1654void TemplateTable::convert() {
1655#ifdef _LP64
1656  // Checking
1657#ifdef ASSERT
1658  {
1659    TosState tos_in  = ilgl;
1660    TosState tos_out = ilgl;
1661    switch (bytecode()) {
1662    case Bytecodes::_i2l: // fall through
1663    case Bytecodes::_i2f: // fall through
1664    case Bytecodes::_i2d: // fall through
1665    case Bytecodes::_i2b: // fall through
1666    case Bytecodes::_i2c: // fall through
1667    case Bytecodes::_i2s: tos_in = itos; break;
1668    case Bytecodes::_l2i: // fall through
1669    case Bytecodes::_l2f: // fall through
1670    case Bytecodes::_l2d: tos_in = ltos; break;
1671    case Bytecodes::_f2i: // fall through
1672    case Bytecodes::_f2l: // fall through
1673    case Bytecodes::_f2d: tos_in = ftos; break;
1674    case Bytecodes::_d2i: // fall through
1675    case Bytecodes::_d2l: // fall through
1676    case Bytecodes::_d2f: tos_in = dtos; break;
1677    default             : ShouldNotReachHere();
1678    }
1679    switch (bytecode()) {
1680    case Bytecodes::_l2i: // fall through
1681    case Bytecodes::_f2i: // fall through
1682    case Bytecodes::_d2i: // fall through
1683    case Bytecodes::_i2b: // fall through
1684    case Bytecodes::_i2c: // fall through
1685    case Bytecodes::_i2s: tos_out = itos; break;
1686    case Bytecodes::_i2l: // fall through
1687    case Bytecodes::_f2l: // fall through
1688    case Bytecodes::_d2l: tos_out = ltos; break;
1689    case Bytecodes::_i2f: // fall through
1690    case Bytecodes::_l2f: // fall through
1691    case Bytecodes::_d2f: tos_out = ftos; break;
1692    case Bytecodes::_i2d: // fall through
1693    case Bytecodes::_l2d: // fall through
1694    case Bytecodes::_f2d: tos_out = dtos; break;
1695    default             : ShouldNotReachHere();
1696    }
1697    transition(tos_in, tos_out);
1698  }
1699#endif // ASSERT
1700
1701  static const int64_t is_nan = 0x8000000000000000L;
1702
1703  // Conversion
1704  switch (bytecode()) {
1705  case Bytecodes::_i2l:
1706    __ movslq(rax, rax);
1707    break;
1708  case Bytecodes::_i2f:
1709    __ cvtsi2ssl(xmm0, rax);
1710    break;
1711  case Bytecodes::_i2d:
1712    __ cvtsi2sdl(xmm0, rax);
1713    break;
1714  case Bytecodes::_i2b:
1715    __ movsbl(rax, rax);
1716    break;
1717  case Bytecodes::_i2c:
1718    __ movzwl(rax, rax);
1719    break;
1720  case Bytecodes::_i2s:
1721    __ movswl(rax, rax);
1722    break;
1723  case Bytecodes::_l2i:
1724    __ movl(rax, rax);
1725    break;
1726  case Bytecodes::_l2f:
1727    __ cvtsi2ssq(xmm0, rax);
1728    break;
1729  case Bytecodes::_l2d:
1730    __ cvtsi2sdq(xmm0, rax);
1731    break;
1732  case Bytecodes::_f2i:
1733  {
1734    Label L;
1735    __ cvttss2sil(rax, xmm0);
1736    __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1737    __ jcc(Assembler::notEqual, L);
1738    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1739    __ bind(L);
1740  }
1741    break;
1742  case Bytecodes::_f2l:
1743  {
1744    Label L;
1745    __ cvttss2siq(rax, xmm0);
1746    // NaN or overflow/underflow?
1747    __ cmp64(rax, ExternalAddress((address) &is_nan));
1748    __ jcc(Assembler::notEqual, L);
1749    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1750    __ bind(L);
1751  }
1752    break;
1753  case Bytecodes::_f2d:
1754    __ cvtss2sd(xmm0, xmm0);
1755    break;
1756  case Bytecodes::_d2i:
1757  {
1758    Label L;
1759    __ cvttsd2sil(rax, xmm0);
1760    __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1761    __ jcc(Assembler::notEqual, L);
1762    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1763    __ bind(L);
1764  }
1765    break;
1766  case Bytecodes::_d2l:
1767  {
1768    Label L;
1769    __ cvttsd2siq(rax, xmm0);
1770    // NaN or overflow/underflow?
1771    __ cmp64(rax, ExternalAddress((address) &is_nan));
1772    __ jcc(Assembler::notEqual, L);
1773    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1774    __ bind(L);
1775  }
1776    break;
1777  case Bytecodes::_d2f:
1778    __ cvtsd2ss(xmm0, xmm0);
1779    break;
1780  default:
1781    ShouldNotReachHere();
1782  }
1783#else
1784  // Checking
1785#ifdef ASSERT
1786  { TosState tos_in  = ilgl;
1787    TosState tos_out = ilgl;
1788    switch (bytecode()) {
1789      case Bytecodes::_i2l: // fall through
1790      case Bytecodes::_i2f: // fall through
1791      case Bytecodes::_i2d: // fall through
1792      case Bytecodes::_i2b: // fall through
1793      case Bytecodes::_i2c: // fall through
1794      case Bytecodes::_i2s: tos_in = itos; break;
1795      case Bytecodes::_l2i: // fall through
1796      case Bytecodes::_l2f: // fall through
1797      case Bytecodes::_l2d: tos_in = ltos; break;
1798      case Bytecodes::_f2i: // fall through
1799      case Bytecodes::_f2l: // fall through
1800      case Bytecodes::_f2d: tos_in = ftos; break;
1801      case Bytecodes::_d2i: // fall through
1802      case Bytecodes::_d2l: // fall through
1803      case Bytecodes::_d2f: tos_in = dtos; break;
1804      default             : ShouldNotReachHere();
1805    }
1806    switch (bytecode()) {
1807      case Bytecodes::_l2i: // fall through
1808      case Bytecodes::_f2i: // fall through
1809      case Bytecodes::_d2i: // fall through
1810      case Bytecodes::_i2b: // fall through
1811      case Bytecodes::_i2c: // fall through
1812      case Bytecodes::_i2s: tos_out = itos; break;
1813      case Bytecodes::_i2l: // fall through
1814      case Bytecodes::_f2l: // fall through
1815      case Bytecodes::_d2l: tos_out = ltos; break;
1816      case Bytecodes::_i2f: // fall through
1817      case Bytecodes::_l2f: // fall through
1818      case Bytecodes::_d2f: tos_out = ftos; break;
1819      case Bytecodes::_i2d: // fall through
1820      case Bytecodes::_l2d: // fall through
1821      case Bytecodes::_f2d: tos_out = dtos; break;
1822      default             : ShouldNotReachHere();
1823    }
1824    transition(tos_in, tos_out);
1825  }
1826#endif // ASSERT
1827
1828  // Conversion
1829  // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1830  switch (bytecode()) {
1831    case Bytecodes::_i2l:
1832      __ extend_sign(rdx, rax);
1833      break;
1834    case Bytecodes::_i2f:
1835      if (UseSSE >= 1) {
1836        __ cvtsi2ssl(xmm0, rax);
1837      } else {
1838        __ push(rax);          // store int on tos
1839        __ fild_s(at_rsp());   // load int to ST0
1840        __ f2ieee();           // truncate to float size
1841        __ pop(rcx);           // adjust rsp
1842      }
1843      break;
1844    case Bytecodes::_i2d:
1845      if (UseSSE >= 2) {
1846        __ cvtsi2sdl(xmm0, rax);
1847      } else {
1848      __ push(rax);          // add one slot for d2ieee()
1849      __ push(rax);          // store int on tos
1850      __ fild_s(at_rsp());   // load int to ST0
1851      __ d2ieee();           // truncate to double size
1852      __ pop(rcx);           // adjust rsp
1853      __ pop(rcx);
1854      }
1855      break;
1856    case Bytecodes::_i2b:
1857      __ shll(rax, 24);      // truncate upper 24 bits
1858      __ sarl(rax, 24);      // and sign-extend byte
1859      LP64_ONLY(__ movsbl(rax, rax));
1860      break;
1861    case Bytecodes::_i2c:
1862      __ andl(rax, 0xFFFF);  // truncate upper 16 bits
1863      LP64_ONLY(__ movzwl(rax, rax));
1864      break;
1865    case Bytecodes::_i2s:
1866      __ shll(rax, 16);      // truncate upper 16 bits
1867      __ sarl(rax, 16);      // and sign-extend short
1868      LP64_ONLY(__ movswl(rax, rax));
1869      break;
1870    case Bytecodes::_l2i:
1871      /* nothing to do */
1872      break;
1873    case Bytecodes::_l2f:
1874      // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1875      // 64-bit long values to floats. On 32-bit platforms it is not possible
1876      // to use that instruction with 64-bit operands, therefore the FPU is
1877      // used to perform the conversion.
1878      __ push(rdx);          // store long on tos
1879      __ push(rax);
1880      __ fild_d(at_rsp());   // load long to ST0
1881      __ f2ieee();           // truncate to float size
1882      __ pop(rcx);           // adjust rsp
1883      __ pop(rcx);
1884      if (UseSSE >= 1) {
1885        __ push_f();
1886        __ pop_f(xmm0);
1887      }
1888      break;
1889    case Bytecodes::_l2d:
1890      // On 32-bit platforms the FPU is used for conversion because on
1891      // 32-bit platforms it is not not possible to use the cvtsi2sdq
1892      // instruction with 64-bit operands.
1893      __ push(rdx);          // store long on tos
1894      __ push(rax);
1895      __ fild_d(at_rsp());   // load long to ST0
1896      __ d2ieee();           // truncate to double size
1897      __ pop(rcx);           // adjust rsp
1898      __ pop(rcx);
1899      if (UseSSE >= 2) {
1900        __ push_d();
1901        __ pop_d(xmm0);
1902      }
1903      break;
1904    case Bytecodes::_f2i:
1905      // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
1906      // as it returns 0 for any NaN.
1907      if (UseSSE >= 1) {
1908        __ push_f(xmm0);
1909      } else {
1910        __ push(rcx);          // reserve space for argument
1911        __ fstp_s(at_rsp());   // pass float argument on stack
1912      }
1913      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1914      break;
1915    case Bytecodes::_f2l:
1916      // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
1917      // as it returns 0 for any NaN.
1918      if (UseSSE >= 1) {
1919       __ push_f(xmm0);
1920      } else {
1921        __ push(rcx);          // reserve space for argument
1922        __ fstp_s(at_rsp());   // pass float argument on stack
1923      }
1924      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1925      break;
1926    case Bytecodes::_f2d:
1927      if (UseSSE < 1) {
1928        /* nothing to do */
1929      } else if (UseSSE == 1) {
1930        __ push_f(xmm0);
1931        __ pop_f();
1932      } else { // UseSSE >= 2
1933        __ cvtss2sd(xmm0, xmm0);
1934      }
1935      break;
1936    case Bytecodes::_d2i:
1937      if (UseSSE >= 2) {
1938        __ push_d(xmm0);
1939      } else {
1940        __ push(rcx);          // reserve space for argument
1941        __ push(rcx);
1942        __ fstp_d(at_rsp());   // pass double argument on stack
1943      }
1944      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1945      break;
1946    case Bytecodes::_d2l:
1947      if (UseSSE >= 2) {
1948        __ push_d(xmm0);
1949      } else {
1950        __ push(rcx);          // reserve space for argument
1951        __ push(rcx);
1952        __ fstp_d(at_rsp());   // pass double argument on stack
1953      }
1954      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1955      break;
1956    case Bytecodes::_d2f:
1957      if (UseSSE <= 1) {
1958        __ push(rcx);          // reserve space for f2ieee()
1959        __ f2ieee();           // truncate to float size
1960        __ pop(rcx);           // adjust rsp
1961        if (UseSSE == 1) {
1962          // The cvtsd2ss instruction is not available if UseSSE==1, therefore
1963          // the conversion is performed using the FPU in this case.
1964          __ push_f();
1965          __ pop_f(xmm0);
1966        }
1967      } else { // UseSSE >= 2
1968        __ cvtsd2ss(xmm0, xmm0);
1969      }
1970      break;
1971    default             :
1972      ShouldNotReachHere();
1973  }
1974#endif
1975}
1976
1977void TemplateTable::lcmp() {
1978  transition(ltos, itos);
1979#ifdef _LP64
1980  Label done;
1981  __ pop_l(rdx);
1982  __ cmpq(rdx, rax);
1983  __ movl(rax, -1);
1984  __ jccb(Assembler::less, done);
1985  __ setb(Assembler::notEqual, rax);
1986  __ movzbl(rax, rax);
1987  __ bind(done);
1988#else
1989
1990  // y = rdx:rax
1991  __ pop_l(rbx, rcx);             // get x = rcx:rbx
1992  __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1993  __ mov(rax, rcx);
1994#endif
1995}
1996
1997void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1998  if ((is_float && UseSSE >= 1) ||
1999      (!is_float && UseSSE >= 2)) {
2000    Label done;
2001    if (is_float) {
2002      // XXX get rid of pop here, use ... reg, mem32
2003      __ pop_f(xmm1);
2004      __ ucomiss(xmm1, xmm0);
2005    } else {
2006      // XXX get rid of pop here, use ... reg, mem64
2007      __ pop_d(xmm1);
2008      __ ucomisd(xmm1, xmm0);
2009    }
2010    if (unordered_result < 0) {
2011      __ movl(rax, -1);
2012      __ jccb(Assembler::parity, done);
2013      __ jccb(Assembler::below, done);
2014      __ setb(Assembler::notEqual, rdx);
2015      __ movzbl(rax, rdx);
2016    } else {
2017      __ movl(rax, 1);
2018      __ jccb(Assembler::parity, done);
2019      __ jccb(Assembler::above, done);
2020      __ movl(rax, 0);
2021      __ jccb(Assembler::equal, done);
2022      __ decrementl(rax);
2023    }
2024    __ bind(done);
2025  } else {
2026#ifdef _LP64
2027    ShouldNotReachHere();
2028#else
2029    if (is_float) {
2030      __ fld_s(at_rsp());
2031    } else {
2032      __ fld_d(at_rsp());
2033      __ pop(rdx);
2034    }
2035    __ pop(rcx);
2036    __ fcmp2int(rax, unordered_result < 0);
2037#endif // _LP64
2038  }
2039}
2040
2041void TemplateTable::branch(bool is_jsr, bool is_wide) {
2042  __ get_method(rcx); // rcx holds method
2043  __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2044                                     // holds bumped taken count
2045
2046  const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2047                             InvocationCounter::counter_offset();
2048  const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2049                              InvocationCounter::counter_offset();
2050
2051  // Load up edx with the branch displacement
2052  if (is_wide) {
2053    __ movl(rdx, at_bcp(1));
2054  } else {
2055    __ load_signed_short(rdx, at_bcp(1));
2056  }
2057  __ bswapl(rdx);
2058
2059  if (!is_wide) {
2060    __ sarl(rdx, 16);
2061  }
2062  LP64_ONLY(__ movl2ptr(rdx, rdx));
2063
2064  // Handle all the JSR stuff here, then exit.
2065  // It's much shorter and cleaner than intermingling with the non-JSR
2066  // normal-branch stuff occurring below.
2067  if (is_jsr) {
2068    // Pre-load the next target bytecode into rbx
2069    __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2070
2071    // compute return address as bci in rax
2072    __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2073                        in_bytes(ConstMethod::codes_offset())));
2074    __ subptr(rax, Address(rcx, Method::const_offset()));
2075    // Adjust the bcp in r13 by the displacement in rdx
2076    __ addptr(rbcp, rdx);
2077    // jsr returns atos that is not an oop
2078    __ push_i(rax);
2079    __ dispatch_only(vtos);
2080    return;
2081  }
2082
2083  // Normal (non-jsr) branch handling
2084
2085  // Adjust the bcp in r13 by the displacement in rdx
2086  __ addptr(rbcp, rdx);
2087
2088  assert(UseLoopCounter || !UseOnStackReplacement,
2089         "on-stack-replacement requires loop counters");
2090  Label backedge_counter_overflow;
2091  Label profile_method;
2092  Label dispatch;
2093  if (UseLoopCounter) {
2094    // increment backedge counter for backward branches
2095    // rax: MDO
2096    // rbx: MDO bumped taken-count
2097    // rcx: method
2098    // rdx: target offset
2099    // r13: target bcp
2100    // r14: locals pointer
2101    __ testl(rdx, rdx);             // check if forward or backward branch
2102    __ jcc(Assembler::positive, dispatch); // count only if backward branch
2103
2104    // check if MethodCounters exists
2105    Label has_counters;
2106    __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2107    __ testptr(rax, rax);
2108    __ jcc(Assembler::notZero, has_counters);
2109    __ push(rdx);
2110    __ push(rcx);
2111    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2112               rcx);
2113    __ pop(rcx);
2114    __ pop(rdx);
2115    __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2116    __ testptr(rax, rax);
2117    __ jcc(Assembler::zero, dispatch);
2118    __ bind(has_counters);
2119
2120    if (TieredCompilation) {
2121      Label no_mdo;
2122      int increment = InvocationCounter::count_increment;
2123      if (ProfileInterpreter) {
2124        // Are we profiling?
2125        __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2126        __ testptr(rbx, rbx);
2127        __ jccb(Assembler::zero, no_mdo);
2128        // Increment the MDO backedge counter
2129        const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2130                                           in_bytes(InvocationCounter::counter_offset()));
2131        const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2132        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2133                                   rax, false, Assembler::zero, &backedge_counter_overflow);
2134        __ jmp(dispatch);
2135      }
2136      __ bind(no_mdo);
2137      // Increment backedge counter in MethodCounters*
2138      __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2139      const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2140      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2141                                 rax, false, Assembler::zero, &backedge_counter_overflow);
2142    } else { // not TieredCompilation
2143      // increment counter
2144      __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2145      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
2146      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2147      __ movl(Address(rcx, be_offset), rax);        // store counter
2148
2149      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
2150
2151      __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2152      __ addl(rax, Address(rcx, be_offset));        // add both counters
2153
2154      if (ProfileInterpreter) {
2155        // Test to see if we should create a method data oop
2156        __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2157        __ jcc(Assembler::less, dispatch);
2158
2159        // if no method data exists, go to profile method
2160        __ test_method_data_pointer(rax, profile_method);
2161
2162        if (UseOnStackReplacement) {
2163          // check for overflow against rbx which is the MDO taken count
2164          __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2165          __ jcc(Assembler::below, dispatch);
2166
2167          // When ProfileInterpreter is on, the backedge_count comes
2168          // from the MethodData*, which value does not get reset on
2169          // the call to frequency_counter_overflow().  To avoid
2170          // excessive calls to the overflow routine while the method is
2171          // being compiled, add a second test to make sure the overflow
2172          // function is called only once every overflow_frequency.
2173          const int overflow_frequency = 1024;
2174          __ andl(rbx, overflow_frequency - 1);
2175          __ jcc(Assembler::zero, backedge_counter_overflow);
2176
2177        }
2178      } else {
2179        if (UseOnStackReplacement) {
2180          // check for overflow against rax, which is the sum of the
2181          // counters
2182          __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2183          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2184
2185        }
2186      }
2187    }
2188    __ bind(dispatch);
2189  }
2190
2191  // Pre-load the next target bytecode into rbx
2192  __ load_unsigned_byte(rbx, Address(rbcp, 0));
2193
2194  // continue with the bytecode @ target
2195  // rax: return bci for jsr's, unused otherwise
2196  // rbx: target bytecode
2197  // r13: target bcp
2198  __ dispatch_only(vtos);
2199
2200  if (UseLoopCounter) {
2201    if (ProfileInterpreter) {
2202      // Out-of-line code to allocate method data oop.
2203      __ bind(profile_method);
2204      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2205      __ load_unsigned_byte(rbx, Address(rbcp, 0));  // restore target bytecode
2206      __ set_method_data_pointer_for_bcp();
2207      __ jmp(dispatch);
2208    }
2209
2210    if (UseOnStackReplacement) {
2211      // invocation counter overflow
2212      __ bind(backedge_counter_overflow);
2213      __ negptr(rdx);
2214      __ addptr(rdx, rbcp); // branch bcp
2215      // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2216      __ call_VM(noreg,
2217                 CAST_FROM_FN_PTR(address,
2218                                  InterpreterRuntime::frequency_counter_overflow),
2219                 rdx);
2220      __ load_unsigned_byte(rbx, Address(rbcp, 0));  // restore target bytecode
2221
2222      // rax: osr nmethod (osr ok) or NULL (osr not possible)
2223      // rbx: target bytecode
2224      // rdx: scratch
2225      // r14: locals pointer
2226      // r13: bcp
2227      __ testptr(rax, rax);                        // test result
2228      __ jcc(Assembler::zero, dispatch);         // no osr if null
2229      // nmethod may have been invalidated (VM may block upon call_VM return)
2230      __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2231      __ jcc(Assembler::notEqual, dispatch);
2232
2233      // We have the address of an on stack replacement routine in rax
2234      // We need to prepare to execute the OSR method. First we must
2235      // migrate the locals and monitors off of the stack.
2236
2237      LP64_ONLY(__ mov(r13, rax));                             // save the nmethod
2238      NOT_LP64(__ mov(rbx, rax));                             // save the nmethod
2239      NOT_LP64(__ get_thread(rcx));
2240
2241      call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2242
2243      // rax is OSR buffer, move it to expected parameter location
2244      LP64_ONLY(__ mov(j_rarg0, rax));
2245      NOT_LP64(__ mov(rcx, rax));
2246      // We use j_rarg definitions here so that registers don't conflict as parameter
2247      // registers change across platforms as we are in the midst of a calling
2248      // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2249
2250      const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2251      const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2252
2253
2254      // pop the interpreter frame
2255      __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2256      __ leave();                                // remove frame anchor
2257      __ pop(retaddr);                           // get return address
2258      __ mov(rsp, sender_sp);                   // set sp to sender sp
2259      // Ensure compiled code always sees stack at proper alignment
2260      __ andptr(rsp, -(StackAlignmentInBytes));
2261
2262      // unlike x86 we need no specialized return from compiled code
2263      // to the interpreter or the call stub.
2264
2265      // push the return address
2266      __ push(retaddr);
2267
2268      // and begin the OSR nmethod
2269      LP64_ONLY(__ jmp(Address(r13, nmethod::osr_entry_point_offset())));
2270      NOT_LP64(__ jmp(Address(rbx, nmethod::osr_entry_point_offset())));
2271    }
2272  }
2273}
2274
2275void TemplateTable::if_0cmp(Condition cc) {
2276  transition(itos, vtos);
2277  // assume branch is more often taken than not (loops use backward branches)
2278  Label not_taken;
2279  __ testl(rax, rax);
2280  __ jcc(j_not(cc), not_taken);
2281  branch(false, false);
2282  __ bind(not_taken);
2283  __ profile_not_taken_branch(rax);
2284}
2285
2286void TemplateTable::if_icmp(Condition cc) {
2287  transition(itos, vtos);
2288  // assume branch is more often taken than not (loops use backward branches)
2289  Label not_taken;
2290  __ pop_i(rdx);
2291  __ cmpl(rdx, rax);
2292  __ jcc(j_not(cc), not_taken);
2293  branch(false, false);
2294  __ bind(not_taken);
2295  __ profile_not_taken_branch(rax);
2296}
2297
2298void TemplateTable::if_nullcmp(Condition cc) {
2299  transition(atos, vtos);
2300  // assume branch is more often taken than not (loops use backward branches)
2301  Label not_taken;
2302  __ testptr(rax, rax);
2303  __ jcc(j_not(cc), not_taken);
2304  branch(false, false);
2305  __ bind(not_taken);
2306  __ profile_not_taken_branch(rax);
2307}
2308
2309void TemplateTable::if_acmp(Condition cc) {
2310  transition(atos, vtos);
2311  // assume branch is more often taken than not (loops use backward branches)
2312  Label not_taken;
2313  __ pop_ptr(rdx);
2314  __ cmpptr(rdx, rax);
2315  __ jcc(j_not(cc), not_taken);
2316  branch(false, false);
2317  __ bind(not_taken);
2318  __ profile_not_taken_branch(rax);
2319}
2320
2321void TemplateTable::ret() {
2322  transition(vtos, vtos);
2323  locals_index(rbx);
2324  LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2325  NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2326  __ profile_ret(rbx, rcx);
2327  __ get_method(rax);
2328  __ movptr(rbcp, Address(rax, Method::const_offset()));
2329  __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2330                      ConstMethod::codes_offset()));
2331  __ dispatch_next(vtos);
2332}
2333
2334void TemplateTable::wide_ret() {
2335  transition(vtos, vtos);
2336  locals_index_wide(rbx);
2337  __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2338  __ profile_ret(rbx, rcx);
2339  __ get_method(rax);
2340  __ movptr(rbcp, Address(rax, Method::const_offset()));
2341  __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2342  __ dispatch_next(vtos);
2343}
2344
2345void TemplateTable::tableswitch() {
2346  Label default_case, continue_execution;
2347  transition(itos, vtos);
2348
2349  // align r13/rsi
2350  __ lea(rbx, at_bcp(BytesPerInt));
2351  __ andptr(rbx, -BytesPerInt);
2352  // load lo & hi
2353  __ movl(rcx, Address(rbx, BytesPerInt));
2354  __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2355  __ bswapl(rcx);
2356  __ bswapl(rdx);
2357  // check against lo & hi
2358  __ cmpl(rax, rcx);
2359  __ jcc(Assembler::less, default_case);
2360  __ cmpl(rax, rdx);
2361  __ jcc(Assembler::greater, default_case);
2362  // lookup dispatch offset
2363  __ subl(rax, rcx);
2364  __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2365  __ profile_switch_case(rax, rbx, rcx);
2366  // continue execution
2367  __ bind(continue_execution);
2368  __ bswapl(rdx);
2369  LP64_ONLY(__ movl2ptr(rdx, rdx));
2370  __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2371  __ addptr(rbcp, rdx);
2372  __ dispatch_only(vtos);
2373  // handle default
2374  __ bind(default_case);
2375  __ profile_switch_default(rax);
2376  __ movl(rdx, Address(rbx, 0));
2377  __ jmp(continue_execution);
2378}
2379
2380void TemplateTable::lookupswitch() {
2381  transition(itos, itos);
2382  __ stop("lookupswitch bytecode should have been rewritten");
2383}
2384
2385void TemplateTable::fast_linearswitch() {
2386  transition(itos, vtos);
2387  Label loop_entry, loop, found, continue_execution;
2388  // bswap rax so we can avoid bswapping the table entries
2389  __ bswapl(rax);
2390  // align r13
2391  __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2392                                    // this instruction (change offsets
2393                                    // below)
2394  __ andptr(rbx, -BytesPerInt);
2395  // set counter
2396  __ movl(rcx, Address(rbx, BytesPerInt));
2397  __ bswapl(rcx);
2398  __ jmpb(loop_entry);
2399  // table search
2400  __ bind(loop);
2401  __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2402  __ jcc(Assembler::equal, found);
2403  __ bind(loop_entry);
2404  __ decrementl(rcx);
2405  __ jcc(Assembler::greaterEqual, loop);
2406  // default case
2407  __ profile_switch_default(rax);
2408  __ movl(rdx, Address(rbx, 0));
2409  __ jmp(continue_execution);
2410  // entry found -> get offset
2411  __ bind(found);
2412  __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2413  __ profile_switch_case(rcx, rax, rbx);
2414  // continue execution
2415  __ bind(continue_execution);
2416  __ bswapl(rdx);
2417  __ movl2ptr(rdx, rdx);
2418  __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2419  __ addptr(rbcp, rdx);
2420  __ dispatch_only(vtos);
2421}
2422
2423void TemplateTable::fast_binaryswitch() {
2424  transition(itos, vtos);
2425  // Implementation using the following core algorithm:
2426  //
2427  // int binary_search(int key, LookupswitchPair* array, int n) {
2428  //   // Binary search according to "Methodik des Programmierens" by
2429  //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2430  //   int i = 0;
2431  //   int j = n;
2432  //   while (i+1 < j) {
2433  //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2434  //     // with      Q: for all i: 0 <= i < n: key < a[i]
2435  //     // where a stands for the array and assuming that the (inexisting)
2436  //     // element a[n] is infinitely big.
2437  //     int h = (i + j) >> 1;
2438  //     // i < h < j
2439  //     if (key < array[h].fast_match()) {
2440  //       j = h;
2441  //     } else {
2442  //       i = h;
2443  //     }
2444  //   }
2445  //   // R: a[i] <= key < a[i+1] or Q
2446  //   // (i.e., if key is within array, i is the correct index)
2447  //   return i;
2448  // }
2449
2450  // Register allocation
2451  const Register key   = rax; // already set (tosca)
2452  const Register array = rbx;
2453  const Register i     = rcx;
2454  const Register j     = rdx;
2455  const Register h     = rdi;
2456  const Register temp  = rsi;
2457
2458  // Find array start
2459  NOT_LP64(__ save_bcp());
2460
2461  __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2462                                          // get rid of this
2463                                          // instruction (change
2464                                          // offsets below)
2465  __ andptr(array, -BytesPerInt);
2466
2467  // Initialize i & j
2468  __ xorl(i, i);                            // i = 0;
2469  __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2470
2471  // Convert j into native byteordering
2472  __ bswapl(j);
2473
2474  // And start
2475  Label entry;
2476  __ jmp(entry);
2477
2478  // binary search loop
2479  {
2480    Label loop;
2481    __ bind(loop);
2482    // int h = (i + j) >> 1;
2483    __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2484    __ sarl(h, 1);                               // h = (i + j) >> 1;
2485    // if (key < array[h].fast_match()) {
2486    //   j = h;
2487    // } else {
2488    //   i = h;
2489    // }
2490    // Convert array[h].match to native byte-ordering before compare
2491    __ movl(temp, Address(array, h, Address::times_8));
2492    __ bswapl(temp);
2493    __ cmpl(key, temp);
2494    // j = h if (key <  array[h].fast_match())
2495    __ cmov32(Assembler::less, j, h);
2496    // i = h if (key >= array[h].fast_match())
2497    __ cmov32(Assembler::greaterEqual, i, h);
2498    // while (i+1 < j)
2499    __ bind(entry);
2500    __ leal(h, Address(i, 1)); // i+1
2501    __ cmpl(h, j);             // i+1 < j
2502    __ jcc(Assembler::less, loop);
2503  }
2504
2505  // end of binary search, result index is i (must check again!)
2506  Label default_case;
2507  // Convert array[i].match to native byte-ordering before compare
2508  __ movl(temp, Address(array, i, Address::times_8));
2509  __ bswapl(temp);
2510  __ cmpl(key, temp);
2511  __ jcc(Assembler::notEqual, default_case);
2512
2513  // entry found -> j = offset
2514  __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2515  __ profile_switch_case(i, key, array);
2516  __ bswapl(j);
2517  LP64_ONLY(__ movslq(j, j));
2518
2519  NOT_LP64(__ restore_bcp());
2520  NOT_LP64(__ restore_locals());                           // restore rdi
2521
2522  __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2523  __ addptr(rbcp, j);
2524  __ dispatch_only(vtos);
2525
2526  // default case -> j = default offset
2527  __ bind(default_case);
2528  __ profile_switch_default(i);
2529  __ movl(j, Address(array, -2 * BytesPerInt));
2530  __ bswapl(j);
2531  LP64_ONLY(__ movslq(j, j));
2532
2533  NOT_LP64(__ restore_bcp());
2534  NOT_LP64(__ restore_locals());
2535
2536  __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2537  __ addptr(rbcp, j);
2538  __ dispatch_only(vtos);
2539}
2540
2541void TemplateTable::_return(TosState state) {
2542  transition(state, state);
2543
2544  Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2545
2546  assert(_desc->calls_vm(),
2547         "inconsistent calls_vm information"); // call in remove_activation
2548
2549  if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2550    assert(state == vtos, "only valid state");
2551    __ movptr(robj, aaddress(0));
2552    __ load_klass(rdi, robj);
2553    __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2554    __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2555    Label skip_register_finalizer;
2556    __ jcc(Assembler::zero, skip_register_finalizer);
2557
2558    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2559
2560    __ bind(skip_register_finalizer);
2561  }
2562
2563  __ remove_activation(state, rbcp);
2564  __ jmp(rbcp);
2565}
2566
2567// ----------------------------------------------------------------------------
2568// Volatile variables demand their effects be made known to all CPU's
2569// in order.  Store buffers on most chips allow reads & writes to
2570// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2571// without some kind of memory barrier (i.e., it's not sufficient that
2572// the interpreter does not reorder volatile references, the hardware
2573// also must not reorder them).
2574//
2575// According to the new Java Memory Model (JMM):
2576// (1) All volatiles are serialized wrt to each other.  ALSO reads &
2577//     writes act as aquire & release, so:
2578// (2) A read cannot let unrelated NON-volatile memory refs that
2579//     happen after the read float up to before the read.  It's OK for
2580//     non-volatile memory refs that happen before the volatile read to
2581//     float down below it.
2582// (3) Similar a volatile write cannot let unrelated NON-volatile
2583//     memory refs that happen BEFORE the write float down to after the
2584//     write.  It's OK for non-volatile memory refs that happen after the
2585//     volatile write to float up before it.
2586//
2587// We only put in barriers around volatile refs (they are expensive),
2588// not _between_ memory refs (that would require us to track the
2589// flavor of the previous memory refs).  Requirements (2) and (3)
2590// require some barriers before volatile stores and after volatile
2591// loads.  These nearly cover requirement (1) but miss the
2592// volatile-store-volatile-load case.  This final case is placed after
2593// volatile-stores although it could just as well go before
2594// volatile-loads.
2595
2596void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2597  // Helper function to insert a is-volatile test and memory barrier
2598  if(!os::is_MP()) return;    // Not needed on single CPU
2599  __ membar(order_constraint);
2600}
2601
2602void TemplateTable::resolve_cache_and_index(int byte_no,
2603                                            Register Rcache,
2604                                            Register index,
2605                                            size_t index_size) {
2606  const Register temp = rbx;
2607  assert_different_registers(Rcache, index, temp);
2608
2609  Label resolved;
2610
2611  Bytecodes::Code code = bytecode();
2612  switch (code) {
2613  case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2614  case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2615  }
2616
2617  assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2618  __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2619  __ cmpl(temp, code);  // have we resolved this bytecode?
2620  __ jcc(Assembler::equal, resolved);
2621
2622  // resolve first time through
2623  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2624  __ movl(temp, code);
2625  __ call_VM(noreg, entry, temp);
2626  // Update registers with resolved info
2627  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2628  __ bind(resolved);
2629}
2630
2631// The cache and index registers must be set before call
2632void TemplateTable::load_field_cp_cache_entry(Register obj,
2633                                              Register cache,
2634                                              Register index,
2635                                              Register off,
2636                                              Register flags,
2637                                              bool is_static = false) {
2638  assert_different_registers(cache, index, flags, off);
2639
2640  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2641  // Field offset
2642  __ movptr(off, Address(cache, index, Address::times_ptr,
2643                         in_bytes(cp_base_offset +
2644                                  ConstantPoolCacheEntry::f2_offset())));
2645  // Flags
2646  __ movl(flags, Address(cache, index, Address::times_ptr,
2647                         in_bytes(cp_base_offset +
2648                                  ConstantPoolCacheEntry::flags_offset())));
2649
2650  // klass overwrite register
2651  if (is_static) {
2652    __ movptr(obj, Address(cache, index, Address::times_ptr,
2653                           in_bytes(cp_base_offset +
2654                                    ConstantPoolCacheEntry::f1_offset())));
2655    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2656    __ movptr(obj, Address(obj, mirror_offset));
2657  }
2658}
2659
2660void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2661                                               Register method,
2662                                               Register itable_index,
2663                                               Register flags,
2664                                               bool is_invokevirtual,
2665                                               bool is_invokevfinal, /*unused*/
2666                                               bool is_invokedynamic) {
2667  // setup registers
2668  const Register cache = rcx;
2669  const Register index = rdx;
2670  assert_different_registers(method, flags);
2671  assert_different_registers(method, cache, index);
2672  assert_different_registers(itable_index, flags);
2673  assert_different_registers(itable_index, cache, index);
2674  // determine constant pool cache field offsets
2675  assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2676  const int method_offset = in_bytes(
2677    ConstantPoolCache::base_offset() +
2678      ((byte_no == f2_byte)
2679       ? ConstantPoolCacheEntry::f2_offset()
2680       : ConstantPoolCacheEntry::f1_offset()));
2681  const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2682                                    ConstantPoolCacheEntry::flags_offset());
2683  // access constant pool cache fields
2684  const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2685                                    ConstantPoolCacheEntry::f2_offset());
2686
2687  size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2688  resolve_cache_and_index(byte_no, cache, index, index_size);
2689    __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2690
2691  if (itable_index != noreg) {
2692    // pick up itable or appendix index from f2 also:
2693    __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2694  }
2695  __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2696}
2697
2698// The registers cache and index expected to be set before call.
2699// Correct values of the cache and index registers are preserved.
2700void TemplateTable::jvmti_post_field_access(Register cache,
2701                                            Register index,
2702                                            bool is_static,
2703                                            bool has_tos) {
2704  if (JvmtiExport::can_post_field_access()) {
2705    // Check to see if a field access watch has been set before we take
2706    // the time to call into the VM.
2707    Label L1;
2708    assert_different_registers(cache, index, rax);
2709    __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2710    __ testl(rax,rax);
2711    __ jcc(Assembler::zero, L1);
2712
2713    // cache entry pointer
2714    __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2715    __ shll(index, LogBytesPerWord);
2716    __ addptr(cache, index);
2717    if (is_static) {
2718      __ xorptr(rax, rax);      // NULL object reference
2719    } else {
2720      __ pop(atos);         // Get the object
2721      __ verify_oop(rax);
2722      __ push(atos);        // Restore stack state
2723    }
2724    // rax,:   object pointer or NULL
2725    // cache: cache entry pointer
2726    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2727               rax, cache);
2728    __ get_cache_and_index_at_bcp(cache, index, 1);
2729    __ bind(L1);
2730  }
2731}
2732
2733void TemplateTable::pop_and_check_object(Register r) {
2734  __ pop_ptr(r);
2735  __ null_check(r);  // for field access must check obj.
2736  __ verify_oop(r);
2737}
2738
2739void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2740  transition(vtos, vtos);
2741
2742  const Register cache = rcx;
2743  const Register index = rdx;
2744  const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2745  const Register off   = rbx;
2746  const Register flags = rax;
2747  const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2748
2749  resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2750  jvmti_post_field_access(cache, index, is_static, false);
2751  load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2752
2753  if (!is_static) pop_and_check_object(obj);
2754
2755  const Address field(obj, off, Address::times_1, 0*wordSize);
2756  NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize));
2757
2758  Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2759
2760  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2761  // Make sure we don't need to mask edx after the above shift
2762  assert(btos == 0, "change code, btos != 0");
2763
2764  __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2765
2766  __ jcc(Assembler::notZero, notByte);
2767  // btos
2768  __ load_signed_byte(rax, field);
2769  __ push(btos);
2770  // Rewrite bytecode to be faster
2771  if (!is_static && rc == may_rewrite) {
2772    patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2773  }
2774  __ jmp(Done);
2775
2776  __ bind(notByte);
2777  __ cmpl(flags, atos);
2778  __ jcc(Assembler::notEqual, notObj);
2779  // atos
2780  __ load_heap_oop(rax, field);
2781  __ push(atos);
2782  if (!is_static && rc == may_rewrite) {
2783    patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2784  }
2785  __ jmp(Done);
2786
2787  __ bind(notObj);
2788  __ cmpl(flags, itos);
2789  __ jcc(Assembler::notEqual, notInt);
2790  // itos
2791  __ movl(rax, field);
2792  __ push(itos);
2793  // Rewrite bytecode to be faster
2794  if (!is_static && rc == may_rewrite) {
2795    patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2796  }
2797  __ jmp(Done);
2798
2799  __ bind(notInt);
2800  __ cmpl(flags, ctos);
2801  __ jcc(Assembler::notEqual, notChar);
2802  // ctos
2803  __ load_unsigned_short(rax, field);
2804  __ push(ctos);
2805  // Rewrite bytecode to be faster
2806  if (!is_static && rc == may_rewrite) {
2807    patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2808  }
2809  __ jmp(Done);
2810
2811  __ bind(notChar);
2812  __ cmpl(flags, stos);
2813  __ jcc(Assembler::notEqual, notShort);
2814  // stos
2815  __ load_signed_short(rax, field);
2816  __ push(stos);
2817  // Rewrite bytecode to be faster
2818  if (!is_static && rc == may_rewrite) {
2819    patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2820  }
2821  __ jmp(Done);
2822
2823  __ bind(notShort);
2824  __ cmpl(flags, ltos);
2825  __ jcc(Assembler::notEqual, notLong);
2826  // ltos
2827
2828#ifndef _LP64
2829  // Generate code as if volatile.  There just aren't enough registers to
2830  // save that information and this code is faster than the test.
2831  __ fild_d(field);                // Must load atomically
2832  __ subptr(rsp,2*wordSize);    // Make space for store
2833  __ fistp_d(Address(rsp,0));
2834  __ pop(rax);
2835  __ pop(rdx);
2836#else
2837  __ movq(rax, field);
2838#endif
2839
2840  __ push(ltos);
2841  // Rewrite bytecode to be faster
2842  LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2843  __ jmp(Done);
2844
2845  __ bind(notLong);
2846  __ cmpl(flags, ftos);
2847  __ jcc(Assembler::notEqual, notFloat);
2848  // ftos
2849
2850  __ load_float(field);
2851  __ push(ftos);
2852  // Rewrite bytecode to be faster
2853  if (!is_static && rc == may_rewrite) {
2854    patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2855  }
2856  __ jmp(Done);
2857
2858  __ bind(notFloat);
2859#ifdef ASSERT
2860  __ cmpl(flags, dtos);
2861  __ jcc(Assembler::notEqual, notDouble);
2862#endif
2863  // dtos
2864  __ load_double(field);
2865  __ push(dtos);
2866  // Rewrite bytecode to be faster
2867  if (!is_static && rc == may_rewrite) {
2868    patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2869  }
2870#ifdef ASSERT
2871  __ jmp(Done);
2872
2873
2874  __ bind(notDouble);
2875  __ stop("Bad state");
2876#endif
2877
2878  __ bind(Done);
2879  // [jk] not needed currently
2880  // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2881  //                                              Assembler::LoadStore));
2882}
2883
2884void TemplateTable::getfield(int byte_no) {
2885  getfield_or_static(byte_no, false);
2886}
2887
2888void TemplateTable::nofast_getfield(int byte_no) {
2889  getfield_or_static(byte_no, false, may_not_rewrite);
2890}
2891
2892void TemplateTable::getstatic(int byte_no) {
2893  getfield_or_static(byte_no, true);
2894}
2895
2896
2897// The registers cache and index expected to be set before call.
2898// The function may destroy various registers, just not the cache and index registers.
2899void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2900
2901  const Register robj = LP64_ONLY(c_rarg2)   NOT_LP64(rax);
2902  const Register RBX  = LP64_ONLY(c_rarg1)   NOT_LP64(rbx);
2903  const Register RCX  = LP64_ONLY(c_rarg3)   NOT_LP64(rcx);
2904  const Register RDX  = LP64_ONLY(rscratch1) NOT_LP64(rdx);
2905
2906  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2907
2908  if (JvmtiExport::can_post_field_modification()) {
2909    // Check to see if a field modification watch has been set before
2910    // we take the time to call into the VM.
2911    Label L1;
2912    assert_different_registers(cache, index, rax);
2913    __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2914    __ testl(rax, rax);
2915    __ jcc(Assembler::zero, L1);
2916
2917    __ get_cache_and_index_at_bcp(robj, RDX, 1);
2918
2919
2920    if (is_static) {
2921      // Life is simple.  Null out the object pointer.
2922      __ xorl(RBX, RBX);
2923
2924    } else {
2925      // Life is harder. The stack holds the value on top, followed by
2926      // the object.  We don't know the size of the value, though; it
2927      // could be one or two words depending on its type. As a result,
2928      // we must find the type to determine where the object is.
2929#ifndef _LP64
2930      Label two_word, valsize_known;
2931#endif
2932      __ movl(RCX, Address(robj, RDX,
2933                           Address::times_ptr,
2934                           in_bytes(cp_base_offset +
2935                                     ConstantPoolCacheEntry::flags_offset())));
2936      NOT_LP64(__ mov(rbx, rsp));
2937      __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
2938
2939      // Make sure we don't need to mask rcx after the above shift
2940      ConstantPoolCacheEntry::verify_tos_state_shift();
2941#ifdef _LP64
2942      __ movptr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
2943      __ cmpl(c_rarg3, ltos);
2944      __ cmovptr(Assembler::equal,
2945                 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2946      __ cmpl(c_rarg3, dtos);
2947      __ cmovptr(Assembler::equal,
2948                 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2949#else
2950      __ cmpl(rcx, ltos);
2951      __ jccb(Assembler::equal, two_word);
2952      __ cmpl(rcx, dtos);
2953      __ jccb(Assembler::equal, two_word);
2954      __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2955      __ jmpb(valsize_known);
2956
2957      __ bind(two_word);
2958      __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2959
2960      __ bind(valsize_known);
2961      // setup object pointer
2962      __ movptr(rbx, Address(rbx, 0));
2963#endif
2964    }
2965    // cache entry pointer
2966    __ addptr(robj, in_bytes(cp_base_offset));
2967    __ shll(RDX, LogBytesPerWord);
2968    __ addptr(robj, RDX);
2969    // object (tos)
2970    __ mov(RCX, rsp);
2971    // c_rarg1: object pointer set up above (NULL if static)
2972    // c_rarg2: cache entry pointer
2973    // c_rarg3: jvalue object on the stack
2974    __ call_VM(noreg,
2975               CAST_FROM_FN_PTR(address,
2976                                InterpreterRuntime::post_field_modification),
2977               RBX, robj, RCX);
2978    __ get_cache_and_index_at_bcp(cache, index, 1);
2979    __ bind(L1);
2980  }
2981}
2982
2983void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2984  transition(vtos, vtos);
2985
2986  const Register cache = rcx;
2987  const Register index = rdx;
2988  const Register obj   = rcx;
2989  const Register off   = rbx;
2990  const Register flags = rax;
2991  const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2992
2993  resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2994  jvmti_post_field_mod(cache, index, is_static);
2995  load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2996
2997  // [jk] not needed currently
2998  // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2999  //                                              Assembler::StoreStore));
3000
3001  Label notVolatile, Done;
3002  __ movl(rdx, flags);
3003  __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3004  __ andl(rdx, 0x1);
3005
3006  // field addresses
3007  const Address field(obj, off, Address::times_1, 0*wordSize);
3008  NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3009
3010  Label notByte, notInt, notShort, notChar,
3011        notLong, notFloat, notObj, notDouble;
3012
3013  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3014
3015  assert(btos == 0, "change code, btos != 0");
3016  __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3017  __ jcc(Assembler::notZero, notByte);
3018
3019  // btos
3020  {
3021    __ pop(btos);
3022    if (!is_static) pop_and_check_object(obj);
3023    __ movb(field, rax);
3024    if (!is_static && rc == may_rewrite) {
3025      patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3026    }
3027    __ jmp(Done);
3028  }
3029
3030  __ bind(notByte);
3031  __ cmpl(flags, atos);
3032  __ jcc(Assembler::notEqual, notObj);
3033
3034  // atos
3035  {
3036    __ pop(atos);
3037    if (!is_static) pop_and_check_object(obj);
3038    // Store into the field
3039    do_oop_store(_masm, field, rax, _bs->kind(), false);
3040    if (!is_static && rc == may_rewrite) {
3041      patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3042    }
3043    __ jmp(Done);
3044  }
3045
3046  __ bind(notObj);
3047  __ cmpl(flags, itos);
3048  __ jcc(Assembler::notEqual, notInt);
3049
3050  // itos
3051  {
3052    __ pop(itos);
3053    if (!is_static) pop_and_check_object(obj);
3054    __ movl(field, rax);
3055    if (!is_static && rc == may_rewrite) {
3056      patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3057    }
3058    __ jmp(Done);
3059  }
3060
3061  __ bind(notInt);
3062  __ cmpl(flags, ctos);
3063  __ jcc(Assembler::notEqual, notChar);
3064
3065  // ctos
3066  {
3067    __ pop(ctos);
3068    if (!is_static) pop_and_check_object(obj);
3069    __ movw(field, rax);
3070    if (!is_static && rc == may_rewrite) {
3071      patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3072    }
3073    __ jmp(Done);
3074  }
3075
3076  __ bind(notChar);
3077  __ cmpl(flags, stos);
3078  __ jcc(Assembler::notEqual, notShort);
3079
3080  // stos
3081  {
3082    __ pop(stos);
3083    if (!is_static) pop_and_check_object(obj);
3084    __ movw(field, rax);
3085    if (!is_static && rc == may_rewrite) {
3086      patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3087    }
3088    __ jmp(Done);
3089  }
3090
3091  __ bind(notShort);
3092  __ cmpl(flags, ltos);
3093  __ jcc(Assembler::notEqual, notLong);
3094
3095  // ltos
3096#ifdef _LP64
3097  {
3098    __ pop(ltos);
3099    if (!is_static) pop_and_check_object(obj);
3100    __ movq(field, rax);
3101    if (!is_static && rc == may_rewrite) {
3102      patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3103    }
3104    __ jmp(Done);
3105  }
3106#else
3107  {
3108    Label notVolatileLong;
3109    __ testl(rdx, rdx);
3110    __ jcc(Assembler::zero, notVolatileLong);
3111
3112    __ pop(ltos);  // overwrites rdx, do this after testing volatile.
3113    if (!is_static) pop_and_check_object(obj);
3114
3115    // Replace with real volatile test
3116    __ push(rdx);
3117    __ push(rax);                 // Must update atomically with FIST
3118    __ fild_d(Address(rsp,0));    // So load into FPU register
3119    __ fistp_d(field);            // and put into memory atomically
3120    __ addptr(rsp, 2*wordSize);
3121    // volatile_barrier();
3122    volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3123                                                 Assembler::StoreStore));
3124    // Don't rewrite volatile version
3125    __ jmp(notVolatile);
3126
3127    __ bind(notVolatileLong);
3128
3129    __ pop(ltos);  // overwrites rdx
3130    if (!is_static) pop_and_check_object(obj);
3131    __ movptr(hi, rdx);
3132    __ movptr(field, rax);
3133    // Don't rewrite to _fast_lputfield for potential volatile case.
3134    __ jmp(notVolatile);
3135  }
3136#endif // _LP64
3137
3138  __ bind(notLong);
3139  __ cmpl(flags, ftos);
3140  __ jcc(Assembler::notEqual, notFloat);
3141
3142  // ftos
3143  {
3144    __ pop(ftos);
3145    if (!is_static) pop_and_check_object(obj);
3146    __ store_float(field);
3147    if (!is_static && rc == may_rewrite) {
3148      patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3149    }
3150    __ jmp(Done);
3151  }
3152
3153  __ bind(notFloat);
3154#ifdef ASSERT
3155  __ cmpl(flags, dtos);
3156  __ jcc(Assembler::notEqual, notDouble);
3157#endif
3158
3159  // dtos
3160  {
3161    __ pop(dtos);
3162    if (!is_static) pop_and_check_object(obj);
3163    __ store_double(field);
3164    if (!is_static && rc == may_rewrite) {
3165      patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3166    }
3167  }
3168
3169#ifdef ASSERT
3170  __ jmp(Done);
3171
3172  __ bind(notDouble);
3173  __ stop("Bad state");
3174#endif
3175
3176  __ bind(Done);
3177
3178  // Check for volatile store
3179  __ testl(rdx, rdx);
3180  __ jcc(Assembler::zero, notVolatile);
3181  volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3182                                               Assembler::StoreStore));
3183  __ bind(notVolatile);
3184}
3185
3186void TemplateTable::putfield(int byte_no) {
3187  putfield_or_static(byte_no, false);
3188}
3189
3190void TemplateTable::nofast_putfield(int byte_no) {
3191  putfield_or_static(byte_no, false, may_not_rewrite);
3192}
3193
3194void TemplateTable::putstatic(int byte_no) {
3195  putfield_or_static(byte_no, true);
3196}
3197
3198void TemplateTable::jvmti_post_fast_field_mod() {
3199
3200  const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3201
3202  if (JvmtiExport::can_post_field_modification()) {
3203    // Check to see if a field modification watch has been set before
3204    // we take the time to call into the VM.
3205    Label L2;
3206    __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3207    __ testl(scratch, scratch);
3208    __ jcc(Assembler::zero, L2);
3209    __ pop_ptr(rbx);                  // copy the object pointer from tos
3210    __ verify_oop(rbx);
3211    __ push_ptr(rbx);                 // put the object pointer back on tos
3212    // Save tos values before call_VM() clobbers them. Since we have
3213    // to do it for every data type, we use the saved values as the
3214    // jvalue object.
3215    switch (bytecode()) {          // load values into the jvalue object
3216    case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3217    case Bytecodes::_fast_bputfield: // fall through
3218    case Bytecodes::_fast_sputfield: // fall through
3219    case Bytecodes::_fast_cputfield: // fall through
3220    case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3221    case Bytecodes::_fast_dputfield: __ push(dtos); break;
3222    case Bytecodes::_fast_fputfield: __ push(ftos); break;
3223    case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3224
3225    default:
3226      ShouldNotReachHere();
3227    }
3228    __ mov(scratch, rsp);             // points to jvalue on the stack
3229    // access constant pool cache entry
3230    LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3231    NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3232    __ verify_oop(rbx);
3233    // rbx: object pointer copied above
3234    // c_rarg2: cache entry pointer
3235    // c_rarg3: jvalue object on the stack
3236    LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3237    NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3238
3239    switch (bytecode()) {             // restore tos values
3240    case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3241    case Bytecodes::_fast_bputfield: // fall through
3242    case Bytecodes::_fast_sputfield: // fall through
3243    case Bytecodes::_fast_cputfield: // fall through
3244    case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3245    case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3246    case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3247    case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3248    }
3249    __ bind(L2);
3250  }
3251}
3252
3253void TemplateTable::fast_storefield(TosState state) {
3254  transition(state, vtos);
3255
3256  ByteSize base = ConstantPoolCache::base_offset();
3257
3258  jvmti_post_fast_field_mod();
3259
3260  // access constant pool cache
3261  __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3262
3263  // test for volatile with rdx but rdx is tos register for lputfield.
3264  __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3265                       in_bytes(base +
3266                                ConstantPoolCacheEntry::flags_offset())));
3267
3268  // replace index with field offset from cache entry
3269  __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3270                         in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3271
3272  // [jk] not needed currently
3273  // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3274  //                                              Assembler::StoreStore));
3275
3276  Label notVolatile;
3277  __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3278  __ andl(rdx, 0x1);
3279
3280  // Get object from stack
3281  pop_and_check_object(rcx);
3282
3283  // field address
3284  const Address field(rcx, rbx, Address::times_1);
3285
3286  // access field
3287  switch (bytecode()) {
3288  case Bytecodes::_fast_aputfield:
3289    do_oop_store(_masm, field, rax, _bs->kind(), false);
3290    break;
3291  case Bytecodes::_fast_lputfield:
3292#ifdef _LP64
3293  __ movq(field, rax);
3294#else
3295  __ stop("should not be rewritten");
3296#endif
3297    break;
3298  case Bytecodes::_fast_iputfield:
3299    __ movl(field, rax);
3300    break;
3301  case Bytecodes::_fast_bputfield:
3302    __ movb(field, rax);
3303    break;
3304  case Bytecodes::_fast_sputfield:
3305    // fall through
3306  case Bytecodes::_fast_cputfield:
3307    __ movw(field, rax);
3308    break;
3309  case Bytecodes::_fast_fputfield:
3310    __ store_float(field);
3311    break;
3312  case Bytecodes::_fast_dputfield:
3313    __ store_double(field);
3314    break;
3315  default:
3316    ShouldNotReachHere();
3317  }
3318
3319  // Check for volatile store
3320  __ testl(rdx, rdx);
3321  __ jcc(Assembler::zero, notVolatile);
3322  volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3323                                               Assembler::StoreStore));
3324  __ bind(notVolatile);
3325}
3326
3327void TemplateTable::fast_accessfield(TosState state) {
3328  transition(atos, state);
3329
3330  // Do the JVMTI work here to avoid disturbing the register state below
3331  if (JvmtiExport::can_post_field_access()) {
3332    // Check to see if a field access watch has been set before we
3333    // take the time to call into the VM.
3334    Label L1;
3335    __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3336    __ testl(rcx, rcx);
3337    __ jcc(Assembler::zero, L1);
3338    // access constant pool cache entry
3339    LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3340    NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3341    __ verify_oop(rax);
3342    __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3343    LP64_ONLY(__ mov(c_rarg1, rax));
3344    // c_rarg1: object pointer copied above
3345    // c_rarg2: cache entry pointer
3346    LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3347    NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3348    __ pop_ptr(rax); // restore object pointer
3349    __ bind(L1);
3350  }
3351
3352  // access constant pool cache
3353  __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3354  // replace index with field offset from cache entry
3355  // [jk] not needed currently
3356  // if (os::is_MP()) {
3357  //   __ movl(rdx, Address(rcx, rbx, Address::times_8,
3358  //                        in_bytes(ConstantPoolCache::base_offset() +
3359  //                                 ConstantPoolCacheEntry::flags_offset())));
3360  //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3361  //   __ andl(rdx, 0x1);
3362  // }
3363  __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3364                         in_bytes(ConstantPoolCache::base_offset() +
3365                                  ConstantPoolCacheEntry::f2_offset())));
3366
3367  // rax: object
3368  __ verify_oop(rax);
3369  __ null_check(rax);
3370  Address field(rax, rbx, Address::times_1);
3371
3372  // access field
3373  switch (bytecode()) {
3374  case Bytecodes::_fast_agetfield:
3375    __ load_heap_oop(rax, field);
3376    __ verify_oop(rax);
3377    break;
3378  case Bytecodes::_fast_lgetfield:
3379#ifdef _LP64
3380  __ movq(rax, field);
3381#else
3382  __ stop("should not be rewritten");
3383#endif
3384    break;
3385  case Bytecodes::_fast_igetfield:
3386    __ movl(rax, field);
3387    break;
3388  case Bytecodes::_fast_bgetfield:
3389    __ movsbl(rax, field);
3390    break;
3391  case Bytecodes::_fast_sgetfield:
3392    __ load_signed_short(rax, field);
3393    break;
3394  case Bytecodes::_fast_cgetfield:
3395    __ load_unsigned_short(rax, field);
3396    break;
3397  case Bytecodes::_fast_fgetfield:
3398    __ load_float(field);
3399    break;
3400  case Bytecodes::_fast_dgetfield:
3401    __ load_double(field);
3402    break;
3403  default:
3404    ShouldNotReachHere();
3405  }
3406  // [jk] not needed currently
3407  // if (os::is_MP()) {
3408  //   Label notVolatile;
3409  //   __ testl(rdx, rdx);
3410  //   __ jcc(Assembler::zero, notVolatile);
3411  //   __ membar(Assembler::LoadLoad);
3412  //   __ bind(notVolatile);
3413  //};
3414}
3415
3416void TemplateTable::fast_xaccess(TosState state) {
3417  transition(vtos, state);
3418
3419  // get receiver
3420  __ movptr(rax, aaddress(0));
3421  // access constant pool cache
3422  __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3423  __ movptr(rbx,
3424            Address(rcx, rdx, Address::times_ptr,
3425                    in_bytes(ConstantPoolCache::base_offset() +
3426                             ConstantPoolCacheEntry::f2_offset())));
3427  // make sure exception is reported in correct bcp range (getfield is
3428  // next instruction)
3429  __ increment(rbcp);
3430  __ null_check(rax);
3431  const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3432  switch (state) {
3433  case itos:
3434    __ movl(rax, field);
3435    break;
3436  case atos:
3437    __ load_heap_oop(rax, field);
3438    __ verify_oop(rax);
3439    break;
3440  case ftos:
3441    __ load_float(field);
3442    break;
3443  default:
3444    ShouldNotReachHere();
3445  }
3446
3447  // [jk] not needed currently
3448  // if (os::is_MP()) {
3449  //   Label notVolatile;
3450  //   __ movl(rdx, Address(rcx, rdx, Address::times_8,
3451  //                        in_bytes(ConstantPoolCache::base_offset() +
3452  //                                 ConstantPoolCacheEntry::flags_offset())));
3453  //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3454  //   __ testl(rdx, 0x1);
3455  //   __ jcc(Assembler::zero, notVolatile);
3456  //   __ membar(Assembler::LoadLoad);
3457  //   __ bind(notVolatile);
3458  // }
3459
3460  __ decrement(rbcp);
3461}
3462
3463//-----------------------------------------------------------------------------
3464// Calls
3465
3466void TemplateTable::count_calls(Register method, Register temp) {
3467  // implemented elsewhere
3468  ShouldNotReachHere();
3469}
3470
3471void TemplateTable::prepare_invoke(int byte_no,
3472                                   Register method,  // linked method (or i-klass)
3473                                   Register index,   // itable index, MethodType, etc.
3474                                   Register recv,    // if caller wants to see it
3475                                   Register flags    // if caller wants to test it
3476                                   ) {
3477  // determine flags
3478  const Bytecodes::Code code = bytecode();
3479  const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3480  const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3481  const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3482  const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3483  const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3484  const bool load_receiver       = (recv  != noreg);
3485  const bool save_flags          = (flags != noreg);
3486  assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3487  assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3488  assert(flags == noreg || flags == rdx, "");
3489  assert(recv  == noreg || recv  == rcx, "");
3490
3491  // setup registers & access constant pool cache
3492  if (recv  == noreg)  recv  = rcx;
3493  if (flags == noreg)  flags = rdx;
3494  assert_different_registers(method, index, recv, flags);
3495
3496  // save 'interpreter return address'
3497  __ save_bcp();
3498
3499  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3500
3501  // maybe push appendix to arguments (just before return address)
3502  if (is_invokedynamic || is_invokehandle) {
3503    Label L_no_push;
3504    __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3505    __ jcc(Assembler::zero, L_no_push);
3506    // Push the appendix as a trailing parameter.
3507    // This must be done before we get the receiver,
3508    // since the parameter_size includes it.
3509    __ push(rbx);
3510    __ mov(rbx, index);
3511    assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3512    __ load_resolved_reference_at_index(index, rbx);
3513    __ pop(rbx);
3514    __ push(index);  // push appendix (MethodType, CallSite, etc.)
3515    __ bind(L_no_push);
3516  }
3517
3518  // load receiver if needed (after appendix is pushed so parameter size is correct)
3519  // Note: no return address pushed yet
3520  if (load_receiver) {
3521    __ movl(recv, flags);
3522    __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3523    const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3524    const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3525    Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3526    __ movptr(recv, recv_addr);
3527    __ verify_oop(recv);
3528  }
3529
3530  if (save_flags) {
3531    __ movl(rbcp, flags);
3532  }
3533
3534  // compute return type
3535  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3536  // Make sure we don't need to mask flags after the above shift
3537  ConstantPoolCacheEntry::verify_tos_state_shift();
3538  // load return address
3539  {
3540    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3541    ExternalAddress table(table_addr);
3542    LP64_ONLY(__ lea(rscratch1, table));
3543    LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3544    NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3545  }
3546
3547  // push return address
3548  __ push(flags);
3549
3550  // Restore flags value from the constant pool cache, and restore rsi
3551  // for later null checks.  r13 is the bytecode pointer
3552  if (save_flags) {
3553    __ movl(flags, rbcp);
3554    __ restore_bcp();
3555  }
3556}
3557
3558void TemplateTable::invokevirtual_helper(Register index,
3559                                         Register recv,
3560                                         Register flags) {
3561  // Uses temporary registers rax, rdx
3562  assert_different_registers(index, recv, rax, rdx);
3563  assert(index == rbx, "");
3564  assert(recv  == rcx, "");
3565
3566  // Test for an invoke of a final method
3567  Label notFinal;
3568  __ movl(rax, flags);
3569  __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3570  __ jcc(Assembler::zero, notFinal);
3571
3572  const Register method = index;  // method must be rbx
3573  assert(method == rbx,
3574         "Method* must be rbx for interpreter calling convention");
3575
3576  // do the call - the index is actually the method to call
3577  // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3578
3579  // It's final, need a null check here!
3580  __ null_check(recv);
3581
3582  // profile this call
3583  __ profile_final_call(rax);
3584  __ profile_arguments_type(rax, method, rbcp, true);
3585
3586  __ jump_from_interpreted(method, rax);
3587
3588  __ bind(notFinal);
3589
3590  // get receiver klass
3591  __ null_check(recv, oopDesc::klass_offset_in_bytes());
3592  __ load_klass(rax, recv);
3593
3594  // profile this call
3595  __ profile_virtual_call(rax, rlocals, rdx);
3596  // get target Method* & entry point
3597  __ lookup_virtual_method(rax, index, method);
3598  __ profile_called_method(method, rdx, rbcp);
3599
3600  __ profile_arguments_type(rdx, method, rbcp, true);
3601  __ jump_from_interpreted(method, rdx);
3602}
3603
3604void TemplateTable::invokevirtual(int byte_no) {
3605  transition(vtos, vtos);
3606  assert(byte_no == f2_byte, "use this argument");
3607  prepare_invoke(byte_no,
3608                 rbx,    // method or vtable index
3609                 noreg,  // unused itable index
3610                 rcx, rdx); // recv, flags
3611
3612  // rbx: index
3613  // rcx: receiver
3614  // rdx: flags
3615
3616  invokevirtual_helper(rbx, rcx, rdx);
3617}
3618
3619void TemplateTable::invokespecial(int byte_no) {
3620  transition(vtos, vtos);
3621  assert(byte_no == f1_byte, "use this argument");
3622  prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
3623                 rcx);  // get receiver also for null check
3624  __ verify_oop(rcx);
3625  __ null_check(rcx);
3626  // do the call
3627  __ profile_call(rax);
3628  __ profile_arguments_type(rax, rbx, rbcp, false);
3629  __ jump_from_interpreted(rbx, rax);
3630}
3631
3632void TemplateTable::invokestatic(int byte_no) {
3633  transition(vtos, vtos);
3634  assert(byte_no == f1_byte, "use this argument");
3635  prepare_invoke(byte_no, rbx);  // get f1 Method*
3636  // do the call
3637  __ profile_call(rax);
3638  __ profile_arguments_type(rax, rbx, rbcp, false);
3639  __ jump_from_interpreted(rbx, rax);
3640}
3641
3642
3643void TemplateTable::fast_invokevfinal(int byte_no) {
3644  transition(vtos, vtos);
3645  assert(byte_no == f2_byte, "use this argument");
3646  __ stop("fast_invokevfinal not used on x86");
3647}
3648
3649
3650void TemplateTable::invokeinterface(int byte_no) {
3651  transition(vtos, vtos);
3652  assert(byte_no == f1_byte, "use this argument");
3653  prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 itable index
3654                 rcx, rdx); // recv, flags
3655
3656  // rax: interface klass (from f1)
3657  // rbx: itable index (from f2)
3658  // rcx: receiver
3659  // rdx: flags
3660
3661  // Special case of invokeinterface called for virtual method of
3662  // java.lang.Object.  See cpCacheOop.cpp for details.
3663  // This code isn't produced by javac, but could be produced by
3664  // another compliant java compiler.
3665  Label notMethod;
3666  __ movl(rlocals, rdx);
3667  __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3668
3669  __ jcc(Assembler::zero, notMethod);
3670
3671  invokevirtual_helper(rbx, rcx, rdx);
3672  __ bind(notMethod);
3673
3674  // Get receiver klass into rdx - also a null check
3675  __ restore_locals();  // restore r14
3676  __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3677  __ load_klass(rdx, rcx);
3678
3679  // profile this call
3680  __ profile_virtual_call(rdx, rbcp, rlocals);
3681
3682  Label no_such_interface, no_such_method;
3683
3684  __ lookup_interface_method(// inputs: rec. class, interface, itable index
3685                             rdx, rax, rbx,
3686                             // outputs: method, scan temp. reg
3687                             rbx, rbcp,
3688                             no_such_interface);
3689
3690  // rbx: Method* to call
3691  // rcx: receiver
3692  // Check for abstract method error
3693  // Note: This should be done more efficiently via a throw_abstract_method_error
3694  //       interpreter entry point and a conditional jump to it in case of a null
3695  //       method.
3696  __ testptr(rbx, rbx);
3697  __ jcc(Assembler::zero, no_such_method);
3698
3699  __ profile_called_method(rbx, rbcp, rdx);
3700  __ profile_arguments_type(rdx, rbx, rbcp, true);
3701
3702  // do the call
3703  // rcx: receiver
3704  // rbx,: Method*
3705  __ jump_from_interpreted(rbx, rdx);
3706  __ should_not_reach_here();
3707
3708  // exception handling code follows...
3709  // note: must restore interpreter registers to canonical
3710  //       state for exception handling to work correctly!
3711
3712  __ bind(no_such_method);
3713  // throw exception
3714  __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3715  __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3716  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3717  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3718  // the call_VM checks for exception, so we should never return here.
3719  __ should_not_reach_here();
3720
3721  __ bind(no_such_interface);
3722  // throw exception
3723  __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3724  __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3725  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3726  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3727                   InterpreterRuntime::throw_IncompatibleClassChangeError));
3728  // the call_VM checks for exception, so we should never return here.
3729  __ should_not_reach_here();
3730}
3731
3732void TemplateTable::invokehandle(int byte_no) {
3733  transition(vtos, vtos);
3734  assert(byte_no == f1_byte, "use this argument");
3735  const Register rbx_method = rbx;
3736  const Register rax_mtype  = rax;
3737  const Register rcx_recv   = rcx;
3738  const Register rdx_flags  = rdx;
3739
3740  prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3741  __ verify_method_ptr(rbx_method);
3742  __ verify_oop(rcx_recv);
3743  __ null_check(rcx_recv);
3744
3745  // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3746  // rbx: MH.invokeExact_MT method (from f2)
3747
3748  // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
3749
3750  // FIXME: profile the LambdaForm also
3751  __ profile_final_call(rax);
3752  __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3753
3754  __ jump_from_interpreted(rbx_method, rdx);
3755}
3756
3757void TemplateTable::invokedynamic(int byte_no) {
3758  transition(vtos, vtos);
3759  assert(byte_no == f1_byte, "use this argument");
3760
3761  const Register rbx_method   = rbx;
3762  const Register rax_callsite = rax;
3763
3764  prepare_invoke(byte_no, rbx_method, rax_callsite);
3765
3766  // rax: CallSite object (from cpool->resolved_references[f1])
3767  // rbx: MH.linkToCallSite method (from f2)
3768
3769  // Note:  rax_callsite is already pushed by prepare_invoke
3770
3771  // %%% should make a type profile for any invokedynamic that takes a ref argument
3772  // profile this call
3773  __ profile_call(rbcp);
3774  __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3775
3776  __ verify_oop(rax_callsite);
3777
3778  __ jump_from_interpreted(rbx_method, rdx);
3779}
3780
3781//-----------------------------------------------------------------------------
3782// Allocation
3783
3784void TemplateTable::_new() {
3785  transition(vtos, atos);
3786  __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3787  Label slow_case;
3788  Label slow_case_no_pop;
3789  Label done;
3790  Label initialize_header;
3791  Label initialize_object;  // including clearing the fields
3792  Label allocate_shared;
3793
3794  __ get_cpool_and_tags(rcx, rax);
3795
3796  // Make sure the class we're about to instantiate has been resolved.
3797  // This is done before loading InstanceKlass to be consistent with the order
3798  // how Constant Pool is updated (see ConstantPool::klass_at_put)
3799  const int tags_offset = Array<u1>::base_offset_in_bytes();
3800  __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3801  __ jcc(Assembler::notEqual, slow_case_no_pop);
3802
3803  // get InstanceKlass
3804  __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
3805  __ push(rcx);  // save the contexts of klass for initializing the header
3806
3807  // make sure klass is initialized & doesn't have finalizer
3808  // make sure klass is fully initialized
3809  __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3810  __ jcc(Assembler::notEqual, slow_case);
3811
3812  // get instance_size in InstanceKlass (scaled to a count of bytes)
3813  __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3814  // test to see if it has a finalizer or is malformed in some way
3815  __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3816  __ jcc(Assembler::notZero, slow_case);
3817
3818  //
3819  // Allocate the instance
3820  // 1) Try to allocate in the TLAB
3821  // 2) if fail and the object is large allocate in the shared Eden
3822  // 3) if the above fails (or is not applicable), go to a slow case
3823  // (creates a new TLAB, etc.)
3824
3825  const bool allow_shared_alloc =
3826    Universe::heap()->supports_inline_contig_alloc();
3827
3828  const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3829#ifndef _LP64
3830  if (UseTLAB || allow_shared_alloc) {
3831    __ get_thread(thread);
3832  }
3833#endif // _LP64
3834
3835  if (UseTLAB) {
3836    __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3837    __ lea(rbx, Address(rax, rdx, Address::times_1));
3838    __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3839    __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3840    __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3841    if (ZeroTLAB) {
3842      // the fields have been already cleared
3843      __ jmp(initialize_header);
3844    } else {
3845      // initialize both the header and fields
3846      __ jmp(initialize_object);
3847    }
3848  }
3849
3850  // Allocation in the shared Eden, if allowed.
3851  //
3852  // rdx: instance size in bytes
3853  if (allow_shared_alloc) {
3854    __ bind(allocate_shared);
3855
3856    ExternalAddress heap_top((address)Universe::heap()->top_addr());
3857    ExternalAddress heap_end((address)Universe::heap()->end_addr());
3858
3859    Label retry;
3860    __ bind(retry);
3861    __ movptr(rax, heap_top);
3862    __ lea(rbx, Address(rax, rdx, Address::times_1));
3863    __ cmpptr(rbx, heap_end);
3864    __ jcc(Assembler::above, slow_case);
3865
3866    // Compare rax, with the top addr, and if still equal, store the new
3867    // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3868    // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3869    //
3870    // rax,: object begin
3871    // rbx,: object end
3872    // rdx: instance size in bytes
3873    __ locked_cmpxchgptr(rbx, heap_top);
3874
3875    // if someone beat us on the allocation, try again, otherwise continue
3876    __ jcc(Assembler::notEqual, retry);
3877
3878    __ incr_allocated_bytes(thread, rdx, 0);
3879  }
3880
3881  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3882    // The object is initialized before the header.  If the object size is
3883    // zero, go directly to the header initialization.
3884    __ bind(initialize_object);
3885    __ decrement(rdx, sizeof(oopDesc));
3886    __ jcc(Assembler::zero, initialize_header);
3887
3888    // Initialize topmost object field, divide rdx by 8, check if odd and
3889    // test if zero.
3890    __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3891    __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3892
3893    // rdx must have been multiple of 8
3894#ifdef ASSERT
3895    // make sure rdx was multiple of 8
3896    Label L;
3897    // Ignore partial flag stall after shrl() since it is debug VM
3898    __ jccb(Assembler::carryClear, L);
3899    __ stop("object size is not multiple of 2 - adjust this code");
3900    __ bind(L);
3901    // rdx must be > 0, no extra check needed here
3902#endif
3903
3904    // initialize remaining object fields: rdx was a multiple of 8
3905    { Label loop;
3906    __ bind(loop);
3907    __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3908    NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3909    __ decrement(rdx);
3910    __ jcc(Assembler::notZero, loop);
3911    }
3912
3913    // initialize object header only.
3914    __ bind(initialize_header);
3915    if (UseBiasedLocking) {
3916      __ pop(rcx);   // get saved klass back in the register.
3917      __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3918      __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3919    } else {
3920      __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3921                (intptr_t)markOopDesc::prototype()); // header
3922      __ pop(rcx);   // get saved klass back in the register.
3923    }
3924#ifdef _LP64
3925    __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3926    __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
3927#endif
3928    __ store_klass(rax, rcx);  // klass
3929
3930    {
3931      SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3932      // Trigger dtrace event for fastpath
3933      __ push(atos);
3934      __ call_VM_leaf(
3935           CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3936      __ pop(atos);
3937    }
3938
3939    __ jmp(done);
3940  }
3941
3942  // slow case
3943  __ bind(slow_case);
3944  __ pop(rcx);   // restore stack pointer to what it was when we came in.
3945  __ bind(slow_case_no_pop);
3946
3947  Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
3948  Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
3949
3950  __ get_constant_pool(rarg1);
3951  __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
3952  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
3953   __ verify_oop(rax);
3954
3955  // continue
3956  __ bind(done);
3957}
3958
3959void TemplateTable::newarray() {
3960  transition(itos, atos);
3961  Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
3962  __ load_unsigned_byte(rarg1, at_bcp(1));
3963  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3964          rarg1, rax);
3965}
3966
3967void TemplateTable::anewarray() {
3968  transition(itos, atos);
3969
3970  Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
3971  Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
3972
3973  __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
3974  __ get_constant_pool(rarg1);
3975  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3976          rarg1, rarg2, rax);
3977}
3978
3979void TemplateTable::arraylength() {
3980  transition(atos, itos);
3981  __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3982  __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3983}
3984
3985void TemplateTable::checkcast() {
3986  transition(atos, atos);
3987  Label done, is_null, ok_is_subtype, quicked, resolved;
3988  __ testptr(rax, rax); // object is in rax
3989  __ jcc(Assembler::zero, is_null);
3990
3991  // Get cpool & tags index
3992  __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3993  __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3994  // See if bytecode has already been quicked
3995  __ cmpb(Address(rdx, rbx,
3996                  Address::times_1,
3997                  Array<u1>::base_offset_in_bytes()),
3998          JVM_CONSTANT_Class);
3999  __ jcc(Assembler::equal, quicked);
4000  __ push(atos); // save receiver for result, and for GC
4001  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4002
4003  // vm_result_2 has metadata result
4004#ifndef _LP64
4005  // borrow rdi from locals
4006  __ get_thread(rdi);
4007  __ get_vm_result_2(rax, rdi);
4008  __ restore_locals();
4009#else
4010  __ get_vm_result_2(rax, r15_thread);
4011#endif
4012
4013  __ pop_ptr(rdx); // restore receiver
4014  __ jmpb(resolved);
4015
4016  // Get superklass in rax and subklass in rbx
4017  __ bind(quicked);
4018  __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4019  __ movptr(rax, Address(rcx, rbx,
4020                       Address::times_ptr, sizeof(ConstantPool)));
4021
4022  __ bind(resolved);
4023  __ load_klass(rbx, rdx);
4024
4025  // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4026  // Superklass in rax.  Subklass in rbx.
4027  __ gen_subtype_check(rbx, ok_is_subtype);
4028
4029  // Come here on failure
4030  __ push_ptr(rdx);
4031  // object is at TOS
4032  __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4033
4034  // Come here on success
4035  __ bind(ok_is_subtype);
4036  __ mov(rax, rdx); // Restore object in rdx
4037
4038  // Collect counts on whether this check-cast sees NULLs a lot or not.
4039  if (ProfileInterpreter) {
4040    __ jmp(done);
4041    __ bind(is_null);
4042    __ profile_null_seen(rcx);
4043  } else {
4044    __ bind(is_null);   // same as 'done'
4045  }
4046  __ bind(done);
4047}
4048
4049void TemplateTable::instanceof() {
4050  transition(atos, itos);
4051  Label done, is_null, ok_is_subtype, quicked, resolved;
4052  __ testptr(rax, rax);
4053  __ jcc(Assembler::zero, is_null);
4054
4055  // Get cpool & tags index
4056  __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4057  __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4058  // See if bytecode has already been quicked
4059  __ cmpb(Address(rdx, rbx,
4060                  Address::times_1,
4061                  Array<u1>::base_offset_in_bytes()),
4062          JVM_CONSTANT_Class);
4063  __ jcc(Assembler::equal, quicked);
4064
4065  __ push(atos); // save receiver for result, and for GC
4066  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4067  // vm_result_2 has metadata result
4068
4069#ifndef _LP64
4070  // borrow rdi from locals
4071  __ get_thread(rdi);
4072  __ get_vm_result_2(rax, rdi);
4073  __ restore_locals();
4074#else
4075  __ get_vm_result_2(rax, r15_thread);
4076#endif
4077
4078  __ pop_ptr(rdx); // restore receiver
4079  __ verify_oop(rdx);
4080  __ load_klass(rdx, rdx);
4081  __ jmpb(resolved);
4082
4083  // Get superklass in rax and subklass in rdx
4084  __ bind(quicked);
4085  __ load_klass(rdx, rax);
4086  __ movptr(rax, Address(rcx, rbx,
4087                         Address::times_ptr, sizeof(ConstantPool)));
4088
4089  __ bind(resolved);
4090
4091  // Generate subtype check.  Blows rcx, rdi
4092  // Superklass in rax.  Subklass in rdx.
4093  __ gen_subtype_check(rdx, ok_is_subtype);
4094
4095  // Come here on failure
4096  __ xorl(rax, rax);
4097  __ jmpb(done);
4098  // Come here on success
4099  __ bind(ok_is_subtype);
4100  __ movl(rax, 1);
4101
4102  // Collect counts on whether this test sees NULLs a lot or not.
4103  if (ProfileInterpreter) {
4104    __ jmp(done);
4105    __ bind(is_null);
4106    __ profile_null_seen(rcx);
4107  } else {
4108    __ bind(is_null);   // same as 'done'
4109  }
4110  __ bind(done);
4111  // rax = 0: obj == NULL or  obj is not an instanceof the specified klass
4112  // rax = 1: obj != NULL and obj is     an instanceof the specified klass
4113}
4114
4115
4116//----------------------------------------------------------------------------------------------------
4117// Breakpoints
4118void TemplateTable::_breakpoint() {
4119  // Note: We get here even if we are single stepping..
4120  // jbug insists on setting breakpoints at every bytecode
4121  // even if we are in single step mode.
4122
4123  transition(vtos, vtos);
4124
4125  Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4126
4127  // get the unpatched byte code
4128  __ get_method(rarg);
4129  __ call_VM(noreg,
4130             CAST_FROM_FN_PTR(address,
4131                              InterpreterRuntime::get_original_bytecode_at),
4132             rarg, rbcp);
4133  __ mov(rbx, rax);  // why?
4134
4135  // post the breakpoint event
4136  __ get_method(rarg);
4137  __ call_VM(noreg,
4138             CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4139             rarg, rbcp);
4140
4141  // complete the execution of original bytecode
4142  __ dispatch_only_normal(vtos);
4143}
4144
4145//-----------------------------------------------------------------------------
4146// Exceptions
4147
4148void TemplateTable::athrow() {
4149  transition(atos, vtos);
4150  __ null_check(rax);
4151  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4152}
4153
4154//-----------------------------------------------------------------------------
4155// Synchronization
4156//
4157// Note: monitorenter & exit are symmetric routines; which is reflected
4158//       in the assembly code structure as well
4159//
4160// Stack layout:
4161//
4162// [expressions  ] <--- rsp               = expression stack top
4163// ..
4164// [expressions  ]
4165// [monitor entry] <--- monitor block top = expression stack bot
4166// ..
4167// [monitor entry]
4168// [frame data   ] <--- monitor block bot
4169// ...
4170// [saved rbp    ] <--- rbp
4171void TemplateTable::monitorenter() {
4172  transition(atos, vtos);
4173
4174  // check for NULL object
4175  __ null_check(rax);
4176
4177  const Address monitor_block_top(
4178        rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4179  const Address monitor_block_bot(
4180        rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4181  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4182
4183  Label allocated;
4184
4185  Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4186  Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4187  Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4188
4189  // initialize entry pointer
4190  __ xorl(rmon, rmon); // points to free slot or NULL
4191
4192  // find a free slot in the monitor block (result in rmon)
4193  {
4194    Label entry, loop, exit;
4195    __ movptr(rtop, monitor_block_top); // points to current entry,
4196                                        // starting with top-most entry
4197    __ lea(rbot, monitor_block_bot);    // points to word before bottom
4198                                        // of monitor block
4199    __ jmpb(entry);
4200
4201    __ bind(loop);
4202    // check if current entry is used
4203    __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4204    // if not used then remember entry in rmon
4205    __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4206    // check if current entry is for same object
4207    __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4208    // if same object then stop searching
4209    __ jccb(Assembler::equal, exit);
4210    // otherwise advance to next entry
4211    __ addptr(rtop, entry_size);
4212    __ bind(entry);
4213    // check if bottom reached
4214    __ cmpptr(rtop, rbot);
4215    // if not at bottom then check this entry
4216    __ jcc(Assembler::notEqual, loop);
4217    __ bind(exit);
4218  }
4219
4220  __ testptr(rmon, rmon); // check if a slot has been found
4221  __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4222
4223  // allocate one if there's no free slot
4224  {
4225    Label entry, loop;
4226    // 1. compute new pointers          // rsp: old expression stack top
4227    __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4228    __ subptr(rsp, entry_size);         // move expression stack top
4229    __ subptr(rmon, entry_size);        // move expression stack bottom
4230    __ mov(rtop, rsp);                  // set start value for copy loop
4231    __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4232    __ jmp(entry);
4233    // 2. move expression stack contents
4234    __ bind(loop);
4235    __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4236                                                // word from old location
4237    __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4238    __ addptr(rtop, wordSize);                  // advance to next word
4239    __ bind(entry);
4240    __ cmpptr(rtop, rmon);                      // check if bottom reached
4241    __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4242                                                // copy next word
4243  }
4244
4245  // call run-time routine
4246  // rmon: points to monitor entry
4247  __ bind(allocated);
4248
4249  // Increment bcp to point to the next bytecode, so exception
4250  // handling for async. exceptions work correctly.
4251  // The object has already been poped from the stack, so the
4252  // expression stack looks correct.
4253  __ increment(rbcp);
4254
4255  // store object
4256  __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4257  __ lock_object(rmon);
4258
4259  // check to make sure this monitor doesn't cause stack overflow after locking
4260  __ save_bcp();  // in case of exception
4261  __ generate_stack_overflow_check(0);
4262
4263  // The bcp has already been incremented. Just need to dispatch to
4264  // next instruction.
4265  __ dispatch_next(vtos);
4266}
4267
4268void TemplateTable::monitorexit() {
4269  transition(atos, vtos);
4270
4271  // check for NULL object
4272  __ null_check(rax);
4273
4274  const Address monitor_block_top(
4275        rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4276  const Address monitor_block_bot(
4277        rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4278  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4279
4280  Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4281  Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4282
4283  Label found;
4284
4285  // find matching slot
4286  {
4287    Label entry, loop;
4288    __ movptr(rtop, monitor_block_top); // points to current entry,
4289                                        // starting with top-most entry
4290    __ lea(rbot, monitor_block_bot);    // points to word before bottom
4291                                        // of monitor block
4292    __ jmpb(entry);
4293
4294    __ bind(loop);
4295    // check if current entry is for same object
4296    __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4297    // if same object then stop searching
4298    __ jcc(Assembler::equal, found);
4299    // otherwise advance to next entry
4300    __ addptr(rtop, entry_size);
4301    __ bind(entry);
4302    // check if bottom reached
4303    __ cmpptr(rtop, rbot);
4304    // if not at bottom then check this entry
4305    __ jcc(Assembler::notEqual, loop);
4306  }
4307
4308  // error handling. Unlocking was not block-structured
4309  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4310                   InterpreterRuntime::throw_illegal_monitor_state_exception));
4311  __ should_not_reach_here();
4312
4313  // call run-time routine
4314  __ bind(found);
4315  __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4316  __ unlock_object(rtop);
4317  __ pop_ptr(rax); // discard object
4318}
4319
4320// Wide instructions
4321void TemplateTable::wide() {
4322  transition(vtos, vtos);
4323  __ load_unsigned_byte(rbx, at_bcp(1));
4324  ExternalAddress wtable((address)Interpreter::_wentry_point);
4325  __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4326  // Note: the rbcp increment step is part of the individual wide bytecode implementations
4327}
4328
4329// Multi arrays
4330void TemplateTable::multianewarray() {
4331  transition(vtos, atos);
4332
4333  Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4334  __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4335  // last dim is on top of stack; we want address of first one:
4336  // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4337  // the latter wordSize to point to the beginning of the array.
4338  __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4339  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4340  __ load_unsigned_byte(rbx, at_bcp(3));
4341  __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4342}
4343#endif /* !CC_INTERP */
4344
4345