macroAssembler_x86.cpp revision 8400:a6b23dd82a7e
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/assembler.hpp"
27#include "asm/assembler.inline.hpp"
28#include "compiler/disassembler.hpp"
29#include "gc_interface/collectedHeap.inline.hpp"
30#include "interpreter/interpreter.hpp"
31#include "memory/cardTableModRefBS.hpp"
32#include "memory/resourceArea.hpp"
33#include "memory/universe.hpp"
34#include "prims/methodHandles.hpp"
35#include "runtime/biasedLocking.hpp"
36#include "runtime/interfaceSupport.hpp"
37#include "runtime/objectMonitor.hpp"
38#include "runtime/os.hpp"
39#include "runtime/sharedRuntime.hpp"
40#include "runtime/stubRoutines.hpp"
41#include "utilities/macros.hpp"
42#if INCLUDE_ALL_GCS
43#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
44#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
45#include "gc_implementation/g1/heapRegion.hpp"
46#endif // INCLUDE_ALL_GCS
47
48#ifdef PRODUCT
49#define BLOCK_COMMENT(str) /* nothing */
50#define STOP(error) stop(error)
51#else
52#define BLOCK_COMMENT(str) block_comment(str)
53#define STOP(error) block_comment(error); stop(error)
54#endif
55
56#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
57
58PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
59
60#ifdef ASSERT
61bool AbstractAssembler::pd_check_instruction_mark() { return true; }
62#endif
63
64static Assembler::Condition reverse[] = {
65    Assembler::noOverflow     /* overflow      = 0x0 */ ,
66    Assembler::overflow       /* noOverflow    = 0x1 */ ,
67    Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
68    Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
69    Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
70    Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
71    Assembler::above          /* belowEqual    = 0x6 */ ,
72    Assembler::belowEqual     /* above         = 0x7 */ ,
73    Assembler::positive       /* negative      = 0x8 */ ,
74    Assembler::negative       /* positive      = 0x9 */ ,
75    Assembler::noParity       /* parity        = 0xa */ ,
76    Assembler::parity         /* noParity      = 0xb */ ,
77    Assembler::greaterEqual   /* less          = 0xc */ ,
78    Assembler::less           /* greaterEqual  = 0xd */ ,
79    Assembler::greater        /* lessEqual     = 0xe */ ,
80    Assembler::lessEqual      /* greater       = 0xf, */
81
82};
83
84
85// Implementation of MacroAssembler
86
87// First all the versions that have distinct versions depending on 32/64 bit
88// Unless the difference is trivial (1 line or so).
89
90#ifndef _LP64
91
92// 32bit versions
93
94Address MacroAssembler::as_Address(AddressLiteral adr) {
95  return Address(adr.target(), adr.rspec());
96}
97
98Address MacroAssembler::as_Address(ArrayAddress adr) {
99  return Address::make_array(adr);
100}
101
102void MacroAssembler::call_VM_leaf_base(address entry_point,
103                                       int number_of_arguments) {
104  call(RuntimeAddress(entry_point));
105  increment(rsp, number_of_arguments * wordSize);
106}
107
108void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
109  cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
110}
111
112void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
113  cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
114}
115
116void MacroAssembler::cmpoop(Address src1, jobject obj) {
117  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
118}
119
120void MacroAssembler::cmpoop(Register src1, jobject obj) {
121  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
122}
123
124void MacroAssembler::extend_sign(Register hi, Register lo) {
125  // According to Intel Doc. AP-526, "Integer Divide", p.18.
126  if (VM_Version::is_P6() && hi == rdx && lo == rax) {
127    cdql();
128  } else {
129    movl(hi, lo);
130    sarl(hi, 31);
131  }
132}
133
134void MacroAssembler::jC2(Register tmp, Label& L) {
135  // set parity bit if FPU flag C2 is set (via rax)
136  save_rax(tmp);
137  fwait(); fnstsw_ax();
138  sahf();
139  restore_rax(tmp);
140  // branch
141  jcc(Assembler::parity, L);
142}
143
144void MacroAssembler::jnC2(Register tmp, Label& L) {
145  // set parity bit if FPU flag C2 is set (via rax)
146  save_rax(tmp);
147  fwait(); fnstsw_ax();
148  sahf();
149  restore_rax(tmp);
150  // branch
151  jcc(Assembler::noParity, L);
152}
153
154// 32bit can do a case table jump in one instruction but we no longer allow the base
155// to be installed in the Address class
156void MacroAssembler::jump(ArrayAddress entry) {
157  jmp(as_Address(entry));
158}
159
160// Note: y_lo will be destroyed
161void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
162  // Long compare for Java (semantics as described in JVM spec.)
163  Label high, low, done;
164
165  cmpl(x_hi, y_hi);
166  jcc(Assembler::less, low);
167  jcc(Assembler::greater, high);
168  // x_hi is the return register
169  xorl(x_hi, x_hi);
170  cmpl(x_lo, y_lo);
171  jcc(Assembler::below, low);
172  jcc(Assembler::equal, done);
173
174  bind(high);
175  xorl(x_hi, x_hi);
176  increment(x_hi);
177  jmp(done);
178
179  bind(low);
180  xorl(x_hi, x_hi);
181  decrementl(x_hi);
182
183  bind(done);
184}
185
186void MacroAssembler::lea(Register dst, AddressLiteral src) {
187    mov_literal32(dst, (int32_t)src.target(), src.rspec());
188}
189
190void MacroAssembler::lea(Address dst, AddressLiteral adr) {
191  // leal(dst, as_Address(adr));
192  // see note in movl as to why we must use a move
193  mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
194}
195
196void MacroAssembler::leave() {
197  mov(rsp, rbp);
198  pop(rbp);
199}
200
201void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
202  // Multiplication of two Java long values stored on the stack
203  // as illustrated below. Result is in rdx:rax.
204  //
205  // rsp ---> [  ??  ] \               \
206  //            ....    | y_rsp_offset  |
207  //          [ y_lo ] /  (in bytes)    | x_rsp_offset
208  //          [ y_hi ]                  | (in bytes)
209  //            ....                    |
210  //          [ x_lo ]                 /
211  //          [ x_hi ]
212  //            ....
213  //
214  // Basic idea: lo(result) = lo(x_lo * y_lo)
215  //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
216  Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
217  Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
218  Label quick;
219  // load x_hi, y_hi and check if quick
220  // multiplication is possible
221  movl(rbx, x_hi);
222  movl(rcx, y_hi);
223  movl(rax, rbx);
224  orl(rbx, rcx);                                 // rbx, = 0 <=> x_hi = 0 and y_hi = 0
225  jcc(Assembler::zero, quick);                   // if rbx, = 0 do quick multiply
226  // do full multiplication
227  // 1st step
228  mull(y_lo);                                    // x_hi * y_lo
229  movl(rbx, rax);                                // save lo(x_hi * y_lo) in rbx,
230  // 2nd step
231  movl(rax, x_lo);
232  mull(rcx);                                     // x_lo * y_hi
233  addl(rbx, rax);                                // add lo(x_lo * y_hi) to rbx,
234  // 3rd step
235  bind(quick);                                   // note: rbx, = 0 if quick multiply!
236  movl(rax, x_lo);
237  mull(y_lo);                                    // x_lo * y_lo
238  addl(rdx, rbx);                                // correct hi(x_lo * y_lo)
239}
240
241void MacroAssembler::lneg(Register hi, Register lo) {
242  negl(lo);
243  adcl(hi, 0);
244  negl(hi);
245}
246
247void MacroAssembler::lshl(Register hi, Register lo) {
248  // Java shift left long support (semantics as described in JVM spec., p.305)
249  // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
250  // shift value is in rcx !
251  assert(hi != rcx, "must not use rcx");
252  assert(lo != rcx, "must not use rcx");
253  const Register s = rcx;                        // shift count
254  const int      n = BitsPerWord;
255  Label L;
256  andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
257  cmpl(s, n);                                    // if (s < n)
258  jcc(Assembler::less, L);                       // else (s >= n)
259  movl(hi, lo);                                  // x := x << n
260  xorl(lo, lo);
261  // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
262  bind(L);                                       // s (mod n) < n
263  shldl(hi, lo);                                 // x := x << s
264  shll(lo);
265}
266
267
268void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
269  // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
270  // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
271  assert(hi != rcx, "must not use rcx");
272  assert(lo != rcx, "must not use rcx");
273  const Register s = rcx;                        // shift count
274  const int      n = BitsPerWord;
275  Label L;
276  andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
277  cmpl(s, n);                                    // if (s < n)
278  jcc(Assembler::less, L);                       // else (s >= n)
279  movl(lo, hi);                                  // x := x >> n
280  if (sign_extension) sarl(hi, 31);
281  else                xorl(hi, hi);
282  // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
283  bind(L);                                       // s (mod n) < n
284  shrdl(lo, hi);                                 // x := x >> s
285  if (sign_extension) sarl(hi);
286  else                shrl(hi);
287}
288
289void MacroAssembler::movoop(Register dst, jobject obj) {
290  mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
291}
292
293void MacroAssembler::movoop(Address dst, jobject obj) {
294  mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
295}
296
297void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
298  mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
299}
300
301void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
302  mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
303}
304
305void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
306  // scratch register is not used,
307  // it is defined to match parameters of 64-bit version of this method.
308  if (src.is_lval()) {
309    mov_literal32(dst, (intptr_t)src.target(), src.rspec());
310  } else {
311    movl(dst, as_Address(src));
312  }
313}
314
315void MacroAssembler::movptr(ArrayAddress dst, Register src) {
316  movl(as_Address(dst), src);
317}
318
319void MacroAssembler::movptr(Register dst, ArrayAddress src) {
320  movl(dst, as_Address(src));
321}
322
323// src should NEVER be a real pointer. Use AddressLiteral for true pointers
324void MacroAssembler::movptr(Address dst, intptr_t src) {
325  movl(dst, src);
326}
327
328
329void MacroAssembler::pop_callee_saved_registers() {
330  pop(rcx);
331  pop(rdx);
332  pop(rdi);
333  pop(rsi);
334}
335
336void MacroAssembler::pop_fTOS() {
337  fld_d(Address(rsp, 0));
338  addl(rsp, 2 * wordSize);
339}
340
341void MacroAssembler::push_callee_saved_registers() {
342  push(rsi);
343  push(rdi);
344  push(rdx);
345  push(rcx);
346}
347
348void MacroAssembler::push_fTOS() {
349  subl(rsp, 2 * wordSize);
350  fstp_d(Address(rsp, 0));
351}
352
353
354void MacroAssembler::pushoop(jobject obj) {
355  push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
356}
357
358void MacroAssembler::pushklass(Metadata* obj) {
359  push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
360}
361
362void MacroAssembler::pushptr(AddressLiteral src) {
363  if (src.is_lval()) {
364    push_literal32((int32_t)src.target(), src.rspec());
365  } else {
366    pushl(as_Address(src));
367  }
368}
369
370void MacroAssembler::set_word_if_not_zero(Register dst) {
371  xorl(dst, dst);
372  set_byte_if_not_zero(dst);
373}
374
375static void pass_arg0(MacroAssembler* masm, Register arg) {
376  masm->push(arg);
377}
378
379static void pass_arg1(MacroAssembler* masm, Register arg) {
380  masm->push(arg);
381}
382
383static void pass_arg2(MacroAssembler* masm, Register arg) {
384  masm->push(arg);
385}
386
387static void pass_arg3(MacroAssembler* masm, Register arg) {
388  masm->push(arg);
389}
390
391#ifndef PRODUCT
392extern "C" void findpc(intptr_t x);
393#endif
394
395void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
396  // In order to get locks to work, we need to fake a in_VM state
397  JavaThread* thread = JavaThread::current();
398  JavaThreadState saved_state = thread->thread_state();
399  thread->set_thread_state(_thread_in_vm);
400  if (ShowMessageBoxOnError) {
401    JavaThread* thread = JavaThread::current();
402    JavaThreadState saved_state = thread->thread_state();
403    thread->set_thread_state(_thread_in_vm);
404    if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
405      ttyLocker ttyl;
406      BytecodeCounter::print();
407    }
408    // To see where a verify_oop failed, get $ebx+40/X for this frame.
409    // This is the value of eip which points to where verify_oop will return.
410    if (os::message_box(msg, "Execution stopped, print registers?")) {
411      print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
412      BREAKPOINT;
413    }
414  } else {
415    ttyLocker ttyl;
416    ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
417  }
418  // Don't assert holding the ttyLock
419    assert(false, err_msg("DEBUG MESSAGE: %s", msg));
420  ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
421}
422
423void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
424  ttyLocker ttyl;
425  FlagSetting fs(Debugging, true);
426  tty->print_cr("eip = 0x%08x", eip);
427#ifndef PRODUCT
428  if ((WizardMode || Verbose) && PrintMiscellaneous) {
429    tty->cr();
430    findpc(eip);
431    tty->cr();
432  }
433#endif
434#define PRINT_REG(rax) \
435  { tty->print("%s = ", #rax); os::print_location(tty, rax); }
436  PRINT_REG(rax);
437  PRINT_REG(rbx);
438  PRINT_REG(rcx);
439  PRINT_REG(rdx);
440  PRINT_REG(rdi);
441  PRINT_REG(rsi);
442  PRINT_REG(rbp);
443  PRINT_REG(rsp);
444#undef PRINT_REG
445  // Print some words near top of staack.
446  int* dump_sp = (int*) rsp;
447  for (int col1 = 0; col1 < 8; col1++) {
448    tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
449    os::print_location(tty, *dump_sp++);
450  }
451  for (int row = 0; row < 16; row++) {
452    tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
453    for (int col = 0; col < 8; col++) {
454      tty->print(" 0x%08x", *dump_sp++);
455    }
456    tty->cr();
457  }
458  // Print some instructions around pc:
459  Disassembler::decode((address)eip-64, (address)eip);
460  tty->print_cr("--------");
461  Disassembler::decode((address)eip, (address)eip+32);
462}
463
464void MacroAssembler::stop(const char* msg) {
465  ExternalAddress message((address)msg);
466  // push address of message
467  pushptr(message.addr());
468  { Label L; call(L, relocInfo::none); bind(L); }     // push eip
469  pusha();                                            // push registers
470  call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
471  hlt();
472}
473
474void MacroAssembler::warn(const char* msg) {
475  push_CPU_state();
476
477  ExternalAddress message((address) msg);
478  // push address of message
479  pushptr(message.addr());
480
481  call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
482  addl(rsp, wordSize);       // discard argument
483  pop_CPU_state();
484}
485
486void MacroAssembler::print_state() {
487  { Label L; call(L, relocInfo::none); bind(L); }     // push eip
488  pusha();                                            // push registers
489
490  push_CPU_state();
491  call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
492  pop_CPU_state();
493
494  popa();
495  addl(rsp, wordSize);
496}
497
498#else // _LP64
499
500// 64 bit versions
501
502Address MacroAssembler::as_Address(AddressLiteral adr) {
503  // amd64 always does this as a pc-rel
504  // we can be absolute or disp based on the instruction type
505  // jmp/call are displacements others are absolute
506  assert(!adr.is_lval(), "must be rval");
507  assert(reachable(adr), "must be");
508  return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
509
510}
511
512Address MacroAssembler::as_Address(ArrayAddress adr) {
513  AddressLiteral base = adr.base();
514  lea(rscratch1, base);
515  Address index = adr.index();
516  assert(index._disp == 0, "must not have disp"); // maybe it can?
517  Address array(rscratch1, index._index, index._scale, index._disp);
518  return array;
519}
520
521void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
522  Label L, E;
523
524#ifdef _WIN64
525  // Windows always allocates space for it's register args
526  assert(num_args <= 4, "only register arguments supported");
527  subq(rsp,  frame::arg_reg_save_area_bytes);
528#endif
529
530  // Align stack if necessary
531  testl(rsp, 15);
532  jcc(Assembler::zero, L);
533
534  subq(rsp, 8);
535  {
536    call(RuntimeAddress(entry_point));
537  }
538  addq(rsp, 8);
539  jmp(E);
540
541  bind(L);
542  {
543    call(RuntimeAddress(entry_point));
544  }
545
546  bind(E);
547
548#ifdef _WIN64
549  // restore stack pointer
550  addq(rsp, frame::arg_reg_save_area_bytes);
551#endif
552
553}
554
555void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
556  assert(!src2.is_lval(), "should use cmpptr");
557
558  if (reachable(src2)) {
559    cmpq(src1, as_Address(src2));
560  } else {
561    lea(rscratch1, src2);
562    Assembler::cmpq(src1, Address(rscratch1, 0));
563  }
564}
565
566int MacroAssembler::corrected_idivq(Register reg) {
567  // Full implementation of Java ldiv and lrem; checks for special
568  // case as described in JVM spec., p.243 & p.271.  The function
569  // returns the (pc) offset of the idivl instruction - may be needed
570  // for implicit exceptions.
571  //
572  //         normal case                           special case
573  //
574  // input : rax: dividend                         min_long
575  //         reg: divisor   (may not be eax/edx)   -1
576  //
577  // output: rax: quotient  (= rax idiv reg)       min_long
578  //         rdx: remainder (= rax irem reg)       0
579  assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
580  static const int64_t min_long = 0x8000000000000000;
581  Label normal_case, special_case;
582
583  // check for special case
584  cmp64(rax, ExternalAddress((address) &min_long));
585  jcc(Assembler::notEqual, normal_case);
586  xorl(rdx, rdx); // prepare rdx for possible special case (where
587                  // remainder = 0)
588  cmpq(reg, -1);
589  jcc(Assembler::equal, special_case);
590
591  // handle normal case
592  bind(normal_case);
593  cdqq();
594  int idivq_offset = offset();
595  idivq(reg);
596
597  // normal and special case exit
598  bind(special_case);
599
600  return idivq_offset;
601}
602
603void MacroAssembler::decrementq(Register reg, int value) {
604  if (value == min_jint) { subq(reg, value); return; }
605  if (value <  0) { incrementq(reg, -value); return; }
606  if (value == 0) {                        ; return; }
607  if (value == 1 && UseIncDec) { decq(reg) ; return; }
608  /* else */      { subq(reg, value)       ; return; }
609}
610
611void MacroAssembler::decrementq(Address dst, int value) {
612  if (value == min_jint) { subq(dst, value); return; }
613  if (value <  0) { incrementq(dst, -value); return; }
614  if (value == 0) {                        ; return; }
615  if (value == 1 && UseIncDec) { decq(dst) ; return; }
616  /* else */      { subq(dst, value)       ; return; }
617}
618
619void MacroAssembler::incrementq(AddressLiteral dst) {
620  if (reachable(dst)) {
621    incrementq(as_Address(dst));
622  } else {
623    lea(rscratch1, dst);
624    incrementq(Address(rscratch1, 0));
625  }
626}
627
628void MacroAssembler::incrementq(Register reg, int value) {
629  if (value == min_jint) { addq(reg, value); return; }
630  if (value <  0) { decrementq(reg, -value); return; }
631  if (value == 0) {                        ; return; }
632  if (value == 1 && UseIncDec) { incq(reg) ; return; }
633  /* else */      { addq(reg, value)       ; return; }
634}
635
636void MacroAssembler::incrementq(Address dst, int value) {
637  if (value == min_jint) { addq(dst, value); return; }
638  if (value <  0) { decrementq(dst, -value); return; }
639  if (value == 0) {                        ; return; }
640  if (value == 1 && UseIncDec) { incq(dst) ; return; }
641  /* else */      { addq(dst, value)       ; return; }
642}
643
644// 32bit can do a case table jump in one instruction but we no longer allow the base
645// to be installed in the Address class
646void MacroAssembler::jump(ArrayAddress entry) {
647  lea(rscratch1, entry.base());
648  Address dispatch = entry.index();
649  assert(dispatch._base == noreg, "must be");
650  dispatch._base = rscratch1;
651  jmp(dispatch);
652}
653
654void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
655  ShouldNotReachHere(); // 64bit doesn't use two regs
656  cmpq(x_lo, y_lo);
657}
658
659void MacroAssembler::lea(Register dst, AddressLiteral src) {
660    mov_literal64(dst, (intptr_t)src.target(), src.rspec());
661}
662
663void MacroAssembler::lea(Address dst, AddressLiteral adr) {
664  mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
665  movptr(dst, rscratch1);
666}
667
668void MacroAssembler::leave() {
669  // %%% is this really better? Why not on 32bit too?
670  emit_int8((unsigned char)0xC9); // LEAVE
671}
672
673void MacroAssembler::lneg(Register hi, Register lo) {
674  ShouldNotReachHere(); // 64bit doesn't use two regs
675  negq(lo);
676}
677
678void MacroAssembler::movoop(Register dst, jobject obj) {
679  mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
680}
681
682void MacroAssembler::movoop(Address dst, jobject obj) {
683  mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
684  movq(dst, rscratch1);
685}
686
687void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
688  mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
689}
690
691void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
692  mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
693  movq(dst, rscratch1);
694}
695
696void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
697  if (src.is_lval()) {
698    mov_literal64(dst, (intptr_t)src.target(), src.rspec());
699  } else {
700    if (reachable(src)) {
701      movq(dst, as_Address(src));
702    } else {
703      lea(scratch, src);
704      movq(dst, Address(scratch, 0));
705    }
706  }
707}
708
709void MacroAssembler::movptr(ArrayAddress dst, Register src) {
710  movq(as_Address(dst), src);
711}
712
713void MacroAssembler::movptr(Register dst, ArrayAddress src) {
714  movq(dst, as_Address(src));
715}
716
717// src should NEVER be a real pointer. Use AddressLiteral for true pointers
718void MacroAssembler::movptr(Address dst, intptr_t src) {
719  mov64(rscratch1, src);
720  movq(dst, rscratch1);
721}
722
723// These are mostly for initializing NULL
724void MacroAssembler::movptr(Address dst, int32_t src) {
725  movslq(dst, src);
726}
727
728void MacroAssembler::movptr(Register dst, int32_t src) {
729  mov64(dst, (intptr_t)src);
730}
731
732void MacroAssembler::pushoop(jobject obj) {
733  movoop(rscratch1, obj);
734  push(rscratch1);
735}
736
737void MacroAssembler::pushklass(Metadata* obj) {
738  mov_metadata(rscratch1, obj);
739  push(rscratch1);
740}
741
742void MacroAssembler::pushptr(AddressLiteral src) {
743  lea(rscratch1, src);
744  if (src.is_lval()) {
745    push(rscratch1);
746  } else {
747    pushq(Address(rscratch1, 0));
748  }
749}
750
751void MacroAssembler::reset_last_Java_frame(bool clear_fp,
752                                           bool clear_pc) {
753  // we must set sp to zero to clear frame
754  movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
755  // must clear fp, so that compiled frames are not confused; it is
756  // possible that we need it only for debugging
757  if (clear_fp) {
758    movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
759  }
760
761  if (clear_pc) {
762    movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
763  }
764}
765
766void MacroAssembler::set_last_Java_frame(Register last_java_sp,
767                                         Register last_java_fp,
768                                         address  last_java_pc) {
769  // determine last_java_sp register
770  if (!last_java_sp->is_valid()) {
771    last_java_sp = rsp;
772  }
773
774  // last_java_fp is optional
775  if (last_java_fp->is_valid()) {
776    movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
777           last_java_fp);
778  }
779
780  // last_java_pc is optional
781  if (last_java_pc != NULL) {
782    Address java_pc(r15_thread,
783                    JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
784    lea(rscratch1, InternalAddress(last_java_pc));
785    movptr(java_pc, rscratch1);
786  }
787
788  movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
789}
790
791static void pass_arg0(MacroAssembler* masm, Register arg) {
792  if (c_rarg0 != arg ) {
793    masm->mov(c_rarg0, arg);
794  }
795}
796
797static void pass_arg1(MacroAssembler* masm, Register arg) {
798  if (c_rarg1 != arg ) {
799    masm->mov(c_rarg1, arg);
800  }
801}
802
803static void pass_arg2(MacroAssembler* masm, Register arg) {
804  if (c_rarg2 != arg ) {
805    masm->mov(c_rarg2, arg);
806  }
807}
808
809static void pass_arg3(MacroAssembler* masm, Register arg) {
810  if (c_rarg3 != arg ) {
811    masm->mov(c_rarg3, arg);
812  }
813}
814
815void MacroAssembler::stop(const char* msg) {
816  address rip = pc();
817  pusha(); // get regs on stack
818  lea(c_rarg0, ExternalAddress((address) msg));
819  lea(c_rarg1, InternalAddress(rip));
820  movq(c_rarg2, rsp); // pass pointer to regs array
821  andq(rsp, -16); // align stack as required by ABI
822  call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
823  hlt();
824}
825
826void MacroAssembler::warn(const char* msg) {
827  push(rbp);
828  movq(rbp, rsp);
829  andq(rsp, -16);     // align stack as required by push_CPU_state and call
830  push_CPU_state();   // keeps alignment at 16 bytes
831  lea(c_rarg0, ExternalAddress((address) msg));
832  call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
833  pop_CPU_state();
834  mov(rsp, rbp);
835  pop(rbp);
836}
837
838void MacroAssembler::print_state() {
839  address rip = pc();
840  pusha();            // get regs on stack
841  push(rbp);
842  movq(rbp, rsp);
843  andq(rsp, -16);     // align stack as required by push_CPU_state and call
844  push_CPU_state();   // keeps alignment at 16 bytes
845
846  lea(c_rarg0, InternalAddress(rip));
847  lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
848  call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
849
850  pop_CPU_state();
851  mov(rsp, rbp);
852  pop(rbp);
853  popa();
854}
855
856#ifndef PRODUCT
857extern "C" void findpc(intptr_t x);
858#endif
859
860void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
861  // In order to get locks to work, we need to fake a in_VM state
862  if (ShowMessageBoxOnError) {
863    JavaThread* thread = JavaThread::current();
864    JavaThreadState saved_state = thread->thread_state();
865    thread->set_thread_state(_thread_in_vm);
866#ifndef PRODUCT
867    if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
868      ttyLocker ttyl;
869      BytecodeCounter::print();
870    }
871#endif
872    // To see where a verify_oop failed, get $ebx+40/X for this frame.
873    // XXX correct this offset for amd64
874    // This is the value of eip which points to where verify_oop will return.
875    if (os::message_box(msg, "Execution stopped, print registers?")) {
876      print_state64(pc, regs);
877      BREAKPOINT;
878      assert(false, "start up GDB");
879    }
880    ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
881  } else {
882    ttyLocker ttyl;
883    ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
884                    msg);
885    assert(false, err_msg("DEBUG MESSAGE: %s", msg));
886  }
887}
888
889void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
890  ttyLocker ttyl;
891  FlagSetting fs(Debugging, true);
892  tty->print_cr("rip = 0x%016lx", pc);
893#ifndef PRODUCT
894  tty->cr();
895  findpc(pc);
896  tty->cr();
897#endif
898#define PRINT_REG(rax, value) \
899  { tty->print("%s = ", #rax); os::print_location(tty, value); }
900  PRINT_REG(rax, regs[15]);
901  PRINT_REG(rbx, regs[12]);
902  PRINT_REG(rcx, regs[14]);
903  PRINT_REG(rdx, regs[13]);
904  PRINT_REG(rdi, regs[8]);
905  PRINT_REG(rsi, regs[9]);
906  PRINT_REG(rbp, regs[10]);
907  PRINT_REG(rsp, regs[11]);
908  PRINT_REG(r8 , regs[7]);
909  PRINT_REG(r9 , regs[6]);
910  PRINT_REG(r10, regs[5]);
911  PRINT_REG(r11, regs[4]);
912  PRINT_REG(r12, regs[3]);
913  PRINT_REG(r13, regs[2]);
914  PRINT_REG(r14, regs[1]);
915  PRINT_REG(r15, regs[0]);
916#undef PRINT_REG
917  // Print some words near top of staack.
918  int64_t* rsp = (int64_t*) regs[11];
919  int64_t* dump_sp = rsp;
920  for (int col1 = 0; col1 < 8; col1++) {
921    tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
922    os::print_location(tty, *dump_sp++);
923  }
924  for (int row = 0; row < 25; row++) {
925    tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
926    for (int col = 0; col < 4; col++) {
927      tty->print(" 0x%016lx", *dump_sp++);
928    }
929    tty->cr();
930  }
931  // Print some instructions around pc:
932  Disassembler::decode((address)pc-64, (address)pc);
933  tty->print_cr("--------");
934  Disassembler::decode((address)pc, (address)pc+32);
935}
936
937#endif // _LP64
938
939// Now versions that are common to 32/64 bit
940
941void MacroAssembler::addptr(Register dst, int32_t imm32) {
942  LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
943}
944
945void MacroAssembler::addptr(Register dst, Register src) {
946  LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
947}
948
949void MacroAssembler::addptr(Address dst, Register src) {
950  LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
951}
952
953void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
954  if (reachable(src)) {
955    Assembler::addsd(dst, as_Address(src));
956  } else {
957    lea(rscratch1, src);
958    Assembler::addsd(dst, Address(rscratch1, 0));
959  }
960}
961
962void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
963  if (reachable(src)) {
964    addss(dst, as_Address(src));
965  } else {
966    lea(rscratch1, src);
967    addss(dst, Address(rscratch1, 0));
968  }
969}
970
971void MacroAssembler::align(int modulus) {
972  if (offset() % modulus != 0) {
973    nop(modulus - (offset() % modulus));
974  }
975}
976
977void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
978  // Used in sign-masking with aligned address.
979  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
980  if (reachable(src)) {
981    Assembler::andpd(dst, as_Address(src));
982  } else {
983    lea(rscratch1, src);
984    Assembler::andpd(dst, Address(rscratch1, 0));
985  }
986}
987
988void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
989  // Used in sign-masking with aligned address.
990  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
991  if (reachable(src)) {
992    Assembler::andps(dst, as_Address(src));
993  } else {
994    lea(rscratch1, src);
995    Assembler::andps(dst, Address(rscratch1, 0));
996  }
997}
998
999void MacroAssembler::andptr(Register dst, int32_t imm32) {
1000  LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
1001}
1002
1003void MacroAssembler::atomic_incl(Address counter_addr) {
1004  if (os::is_MP())
1005    lock();
1006  incrementl(counter_addr);
1007}
1008
1009void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
1010  if (reachable(counter_addr)) {
1011    atomic_incl(as_Address(counter_addr));
1012  } else {
1013    lea(scr, counter_addr);
1014    atomic_incl(Address(scr, 0));
1015  }
1016}
1017
1018#ifdef _LP64
1019void MacroAssembler::atomic_incq(Address counter_addr) {
1020  if (os::is_MP())
1021    lock();
1022  incrementq(counter_addr);
1023}
1024
1025void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
1026  if (reachable(counter_addr)) {
1027    atomic_incq(as_Address(counter_addr));
1028  } else {
1029    lea(scr, counter_addr);
1030    atomic_incq(Address(scr, 0));
1031  }
1032}
1033#endif
1034
1035// Writes to stack successive pages until offset reached to check for
1036// stack overflow + shadow pages.  This clobbers tmp.
1037void MacroAssembler::bang_stack_size(Register size, Register tmp) {
1038  movptr(tmp, rsp);
1039  // Bang stack for total size given plus shadow page size.
1040  // Bang one page at a time because large size can bang beyond yellow and
1041  // red zones.
1042  Label loop;
1043  bind(loop);
1044  movl(Address(tmp, (-os::vm_page_size())), size );
1045  subptr(tmp, os::vm_page_size());
1046  subl(size, os::vm_page_size());
1047  jcc(Assembler::greater, loop);
1048
1049  // Bang down shadow pages too.
1050  // At this point, (tmp-0) is the last address touched, so don't
1051  // touch it again.  (It was touched as (tmp-pagesize) but then tmp
1052  // was post-decremented.)  Skip this address by starting at i=1, and
1053  // touch a few more pages below.  N.B.  It is important to touch all
1054  // the way down to and including i=StackShadowPages.
1055  for (int i = 1; i < StackShadowPages; i++) {
1056    // this could be any sized move but this is can be a debugging crumb
1057    // so the bigger the better.
1058    movptr(Address(tmp, (-i*os::vm_page_size())), size );
1059  }
1060}
1061
1062int MacroAssembler::biased_locking_enter(Register lock_reg,
1063                                         Register obj_reg,
1064                                         Register swap_reg,
1065                                         Register tmp_reg,
1066                                         bool swap_reg_contains_mark,
1067                                         Label& done,
1068                                         Label* slow_case,
1069                                         BiasedLockingCounters* counters) {
1070  assert(UseBiasedLocking, "why call this otherwise?");
1071  assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1072  assert(tmp_reg != noreg, "tmp_reg must be supplied");
1073  assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1074  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1075  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
1076  Address saved_mark_addr(lock_reg, 0);
1077
1078  if (PrintBiasedLockingStatistics && counters == NULL) {
1079    counters = BiasedLocking::counters();
1080  }
1081  // Biased locking
1082  // See whether the lock is currently biased toward our thread and
1083  // whether the epoch is still valid
1084  // Note that the runtime guarantees sufficient alignment of JavaThread
1085  // pointers to allow age to be placed into low bits
1086  // First check to see whether biasing is even enabled for this object
1087  Label cas_label;
1088  int null_check_offset = -1;
1089  if (!swap_reg_contains_mark) {
1090    null_check_offset = offset();
1091    movptr(swap_reg, mark_addr);
1092  }
1093  movptr(tmp_reg, swap_reg);
1094  andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
1095  cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
1096  jcc(Assembler::notEqual, cas_label);
1097  // The bias pattern is present in the object's header. Need to check
1098  // whether the bias owner and the epoch are both still current.
1099#ifndef _LP64
1100  // Note that because there is no current thread register on x86_32 we
1101  // need to store off the mark word we read out of the object to
1102  // avoid reloading it and needing to recheck invariants below. This
1103  // store is unfortunate but it makes the overall code shorter and
1104  // simpler.
1105  movptr(saved_mark_addr, swap_reg);
1106#endif
1107  if (swap_reg_contains_mark) {
1108    null_check_offset = offset();
1109  }
1110  load_prototype_header(tmp_reg, obj_reg);
1111#ifdef _LP64
1112  orptr(tmp_reg, r15_thread);
1113  xorptr(tmp_reg, swap_reg);
1114  Register header_reg = tmp_reg;
1115#else
1116  xorptr(tmp_reg, swap_reg);
1117  get_thread(swap_reg);
1118  xorptr(swap_reg, tmp_reg);
1119  Register header_reg = swap_reg;
1120#endif
1121  andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
1122  if (counters != NULL) {
1123    cond_inc32(Assembler::zero,
1124               ExternalAddress((address) counters->biased_lock_entry_count_addr()));
1125  }
1126  jcc(Assembler::equal, done);
1127
1128  Label try_revoke_bias;
1129  Label try_rebias;
1130
1131  // At this point we know that the header has the bias pattern and
1132  // that we are not the bias owner in the current epoch. We need to
1133  // figure out more details about the state of the header in order to
1134  // know what operations can be legally performed on the object's
1135  // header.
1136
1137  // If the low three bits in the xor result aren't clear, that means
1138  // the prototype header is no longer biased and we have to revoke
1139  // the bias on this object.
1140  testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
1141  jccb(Assembler::notZero, try_revoke_bias);
1142
1143  // Biasing is still enabled for this data type. See whether the
1144  // epoch of the current bias is still valid, meaning that the epoch
1145  // bits of the mark word are equal to the epoch bits of the
1146  // prototype header. (Note that the prototype header's epoch bits
1147  // only change at a safepoint.) If not, attempt to rebias the object
1148  // toward the current thread. Note that we must be absolutely sure
1149  // that the current epoch is invalid in order to do this because
1150  // otherwise the manipulations it performs on the mark word are
1151  // illegal.
1152  testptr(header_reg, markOopDesc::epoch_mask_in_place);
1153  jccb(Assembler::notZero, try_rebias);
1154
1155  // The epoch of the current bias is still valid but we know nothing
1156  // about the owner; it might be set or it might be clear. Try to
1157  // acquire the bias of the object using an atomic operation. If this
1158  // fails we will go in to the runtime to revoke the object's bias.
1159  // Note that we first construct the presumed unbiased header so we
1160  // don't accidentally blow away another thread's valid bias.
1161  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1162  andptr(swap_reg,
1163         markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
1164#ifdef _LP64
1165  movptr(tmp_reg, swap_reg);
1166  orptr(tmp_reg, r15_thread);
1167#else
1168  get_thread(tmp_reg);
1169  orptr(tmp_reg, swap_reg);
1170#endif
1171  if (os::is_MP()) {
1172    lock();
1173  }
1174  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1175  // If the biasing toward our thread failed, this means that
1176  // another thread succeeded in biasing it toward itself and we
1177  // need to revoke that bias. The revocation will occur in the
1178  // interpreter runtime in the slow case.
1179  if (counters != NULL) {
1180    cond_inc32(Assembler::zero,
1181               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
1182  }
1183  if (slow_case != NULL) {
1184    jcc(Assembler::notZero, *slow_case);
1185  }
1186  jmp(done);
1187
1188  bind(try_rebias);
1189  // At this point we know the epoch has expired, meaning that the
1190  // current "bias owner", if any, is actually invalid. Under these
1191  // circumstances _only_, we are allowed to use the current header's
1192  // value as the comparison value when doing the cas to acquire the
1193  // bias in the current epoch. In other words, we allow transfer of
1194  // the bias from one thread to another directly in this situation.
1195  //
1196  // FIXME: due to a lack of registers we currently blow away the age
1197  // bits in this situation. Should attempt to preserve them.
1198  load_prototype_header(tmp_reg, obj_reg);
1199#ifdef _LP64
1200  orptr(tmp_reg, r15_thread);
1201#else
1202  get_thread(swap_reg);
1203  orptr(tmp_reg, swap_reg);
1204  movptr(swap_reg, saved_mark_addr);
1205#endif
1206  if (os::is_MP()) {
1207    lock();
1208  }
1209  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1210  // If the biasing toward our thread failed, then another thread
1211  // succeeded in biasing it toward itself and we need to revoke that
1212  // bias. The revocation will occur in the runtime in the slow case.
1213  if (counters != NULL) {
1214    cond_inc32(Assembler::zero,
1215               ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
1216  }
1217  if (slow_case != NULL) {
1218    jcc(Assembler::notZero, *slow_case);
1219  }
1220  jmp(done);
1221
1222  bind(try_revoke_bias);
1223  // The prototype mark in the klass doesn't have the bias bit set any
1224  // more, indicating that objects of this data type are not supposed
1225  // to be biased any more. We are going to try to reset the mark of
1226  // this object to the prototype value and fall through to the
1227  // CAS-based locking scheme. Note that if our CAS fails, it means
1228  // that another thread raced us for the privilege of revoking the
1229  // bias of this particular object, so it's okay to continue in the
1230  // normal locking code.
1231  //
1232  // FIXME: due to a lack of registers we currently blow away the age
1233  // bits in this situation. Should attempt to preserve them.
1234  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1235  load_prototype_header(tmp_reg, obj_reg);
1236  if (os::is_MP()) {
1237    lock();
1238  }
1239  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1240  // Fall through to the normal CAS-based lock, because no matter what
1241  // the result of the above CAS, some thread must have succeeded in
1242  // removing the bias bit from the object's header.
1243  if (counters != NULL) {
1244    cond_inc32(Assembler::zero,
1245               ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
1246  }
1247
1248  bind(cas_label);
1249
1250  return null_check_offset;
1251}
1252
1253void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
1254  assert(UseBiasedLocking, "why call this otherwise?");
1255
1256  // Check for biased locking unlock case, which is a no-op
1257  // Note: we do not have to check the thread ID for two reasons.
1258  // First, the interpreter checks for IllegalMonitorStateException at
1259  // a higher level. Second, if the bias was revoked while we held the
1260  // lock, the object could not be rebiased toward another thread, so
1261  // the bias bit would be clear.
1262  movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1263  andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
1264  cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
1265  jcc(Assembler::equal, done);
1266}
1267
1268#ifdef COMPILER2
1269
1270#if INCLUDE_RTM_OPT
1271
1272// Update rtm_counters based on abort status
1273// input: abort_status
1274//        rtm_counters (RTMLockingCounters*)
1275// flags are killed
1276void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
1277
1278  atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
1279  if (PrintPreciseRTMLockingStatistics) {
1280    for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
1281      Label check_abort;
1282      testl(abort_status, (1<<i));
1283      jccb(Assembler::equal, check_abort);
1284      atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
1285      bind(check_abort);
1286    }
1287  }
1288}
1289
1290// Branch if (random & (count-1) != 0), count is 2^n
1291// tmp, scr and flags are killed
1292void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
1293  assert(tmp == rax, "");
1294  assert(scr == rdx, "");
1295  rdtsc(); // modifies EDX:EAX
1296  andptr(tmp, count-1);
1297  jccb(Assembler::notZero, brLabel);
1298}
1299
1300// Perform abort ratio calculation, set no_rtm bit if high ratio
1301// input:  rtm_counters_Reg (RTMLockingCounters* address)
1302// tmpReg, rtm_counters_Reg and flags are killed
1303void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
1304                                                 Register rtm_counters_Reg,
1305                                                 RTMLockingCounters* rtm_counters,
1306                                                 Metadata* method_data) {
1307  Label L_done, L_check_always_rtm1, L_check_always_rtm2;
1308
1309  if (RTMLockingCalculationDelay > 0) {
1310    // Delay calculation
1311    movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
1312    testptr(tmpReg, tmpReg);
1313    jccb(Assembler::equal, L_done);
1314  }
1315  // Abort ratio calculation only if abort_count > RTMAbortThreshold
1316  //   Aborted transactions = abort_count * 100
1317  //   All transactions = total_count *  RTMTotalCountIncrRate
1318  //   Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
1319
1320  movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
1321  cmpptr(tmpReg, RTMAbortThreshold);
1322  jccb(Assembler::below, L_check_always_rtm2);
1323  imulptr(tmpReg, tmpReg, 100);
1324
1325  Register scrReg = rtm_counters_Reg;
1326  movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1327  imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
1328  imulptr(scrReg, scrReg, RTMAbortRatio);
1329  cmpptr(tmpReg, scrReg);
1330  jccb(Assembler::below, L_check_always_rtm1);
1331  if (method_data != NULL) {
1332    // set rtm_state to "no rtm" in MDO
1333    mov_metadata(tmpReg, method_data);
1334    if (os::is_MP()) {
1335      lock();
1336    }
1337    orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
1338  }
1339  jmpb(L_done);
1340  bind(L_check_always_rtm1);
1341  // Reload RTMLockingCounters* address
1342  lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1343  bind(L_check_always_rtm2);
1344  movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1345  cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
1346  jccb(Assembler::below, L_done);
1347  if (method_data != NULL) {
1348    // set rtm_state to "always rtm" in MDO
1349    mov_metadata(tmpReg, method_data);
1350    if (os::is_MP()) {
1351      lock();
1352    }
1353    orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
1354  }
1355  bind(L_done);
1356}
1357
1358// Update counters and perform abort ratio calculation
1359// input:  abort_status_Reg
1360// rtm_counters_Reg, flags are killed
1361void MacroAssembler::rtm_profiling(Register abort_status_Reg,
1362                                   Register rtm_counters_Reg,
1363                                   RTMLockingCounters* rtm_counters,
1364                                   Metadata* method_data,
1365                                   bool profile_rtm) {
1366
1367  assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1368  // update rtm counters based on rax value at abort
1369  // reads abort_status_Reg, updates flags
1370  lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1371  rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
1372  if (profile_rtm) {
1373    // Save abort status because abort_status_Reg is used by following code.
1374    if (RTMRetryCount > 0) {
1375      push(abort_status_Reg);
1376    }
1377    assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1378    rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
1379    // restore abort status
1380    if (RTMRetryCount > 0) {
1381      pop(abort_status_Reg);
1382    }
1383  }
1384}
1385
1386// Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
1387// inputs: retry_count_Reg
1388//       : abort_status_Reg
1389// output: retry_count_Reg decremented by 1
1390// flags are killed
1391void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
1392  Label doneRetry;
1393  assert(abort_status_Reg == rax, "");
1394  // The abort reason bits are in eax (see all states in rtmLocking.hpp)
1395  // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
1396  // if reason is in 0x6 and retry count != 0 then retry
1397  andptr(abort_status_Reg, 0x6);
1398  jccb(Assembler::zero, doneRetry);
1399  testl(retry_count_Reg, retry_count_Reg);
1400  jccb(Assembler::zero, doneRetry);
1401  pause();
1402  decrementl(retry_count_Reg);
1403  jmp(retryLabel);
1404  bind(doneRetry);
1405}
1406
1407// Spin and retry if lock is busy,
1408// inputs: box_Reg (monitor address)
1409//       : retry_count_Reg
1410// output: retry_count_Reg decremented by 1
1411//       : clear z flag if retry count exceeded
1412// tmp_Reg, scr_Reg, flags are killed
1413void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
1414                                            Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
1415  Label SpinLoop, SpinExit, doneRetry;
1416  int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1417
1418  testl(retry_count_Reg, retry_count_Reg);
1419  jccb(Assembler::zero, doneRetry);
1420  decrementl(retry_count_Reg);
1421  movptr(scr_Reg, RTMSpinLoopCount);
1422
1423  bind(SpinLoop);
1424  pause();
1425  decrementl(scr_Reg);
1426  jccb(Assembler::lessEqual, SpinExit);
1427  movptr(tmp_Reg, Address(box_Reg, owner_offset));
1428  testptr(tmp_Reg, tmp_Reg);
1429  jccb(Assembler::notZero, SpinLoop);
1430
1431  bind(SpinExit);
1432  jmp(retryLabel);
1433  bind(doneRetry);
1434  incrementl(retry_count_Reg); // clear z flag
1435}
1436
1437// Use RTM for normal stack locks
1438// Input: objReg (object to lock)
1439void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
1440                                       Register retry_on_abort_count_Reg,
1441                                       RTMLockingCounters* stack_rtm_counters,
1442                                       Metadata* method_data, bool profile_rtm,
1443                                       Label& DONE_LABEL, Label& IsInflated) {
1444  assert(UseRTMForStackLocks, "why call this otherwise?");
1445  assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1446  assert(tmpReg == rax, "");
1447  assert(scrReg == rdx, "");
1448  Label L_rtm_retry, L_decrement_retry, L_on_abort;
1449
1450  if (RTMRetryCount > 0) {
1451    movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1452    bind(L_rtm_retry);
1453  }
1454  movptr(tmpReg, Address(objReg, 0));
1455  testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
1456  jcc(Assembler::notZero, IsInflated);
1457
1458  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1459    Label L_noincrement;
1460    if (RTMTotalCountIncrRate > 1) {
1461      // tmpReg, scrReg and flags are killed
1462      branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1463    }
1464    assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1465    atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1466    bind(L_noincrement);
1467  }
1468  xbegin(L_on_abort);
1469  movptr(tmpReg, Address(objReg, 0));       // fetch markword
1470  andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1471  cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1472  jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
1473
1474  Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1475  if (UseRTMXendForLockBusy) {
1476    xend();
1477    movptr(abort_status_Reg, 0x2);   // Set the abort status to 2 (so we can retry)
1478    jmp(L_decrement_retry);
1479  }
1480  else {
1481    xabort(0);
1482  }
1483  bind(L_on_abort);
1484  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1485    rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
1486  }
1487  bind(L_decrement_retry);
1488  if (RTMRetryCount > 0) {
1489    // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1490    rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1491  }
1492}
1493
1494// Use RTM for inflating locks
1495// inputs: objReg (object to lock)
1496//         boxReg (on-stack box address (displaced header location) - KILLED)
1497//         tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
1498void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
1499                                          Register scrReg, Register retry_on_busy_count_Reg,
1500                                          Register retry_on_abort_count_Reg,
1501                                          RTMLockingCounters* rtm_counters,
1502                                          Metadata* method_data, bool profile_rtm,
1503                                          Label& DONE_LABEL) {
1504  assert(UseRTMLocking, "why call this otherwise?");
1505  assert(tmpReg == rax, "");
1506  assert(scrReg == rdx, "");
1507  Label L_rtm_retry, L_decrement_retry, L_on_abort;
1508  int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1509
1510  // Without cast to int32_t a movptr will destroy r10 which is typically obj
1511  movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1512  movptr(boxReg, tmpReg); // Save ObjectMonitor address
1513
1514  if (RTMRetryCount > 0) {
1515    movl(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy
1516    movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1517    bind(L_rtm_retry);
1518  }
1519  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1520    Label L_noincrement;
1521    if (RTMTotalCountIncrRate > 1) {
1522      // tmpReg, scrReg and flags are killed
1523      branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1524    }
1525    assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1526    atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1527    bind(L_noincrement);
1528  }
1529  xbegin(L_on_abort);
1530  movptr(tmpReg, Address(objReg, 0));
1531  movptr(tmpReg, Address(tmpReg, owner_offset));
1532  testptr(tmpReg, tmpReg);
1533  jcc(Assembler::zero, DONE_LABEL);
1534  if (UseRTMXendForLockBusy) {
1535    xend();
1536    jmp(L_decrement_retry);
1537  }
1538  else {
1539    xabort(0);
1540  }
1541  bind(L_on_abort);
1542  Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1543  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1544    rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
1545  }
1546  if (RTMRetryCount > 0) {
1547    // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1548    rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1549  }
1550
1551  movptr(tmpReg, Address(boxReg, owner_offset)) ;
1552  testptr(tmpReg, tmpReg) ;
1553  jccb(Assembler::notZero, L_decrement_retry) ;
1554
1555  // Appears unlocked - try to swing _owner from null to non-null.
1556  // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1557#ifdef _LP64
1558  Register threadReg = r15_thread;
1559#else
1560  get_thread(scrReg);
1561  Register threadReg = scrReg;
1562#endif
1563  if (os::is_MP()) {
1564    lock();
1565  }
1566  cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
1567
1568  if (RTMRetryCount > 0) {
1569    // success done else retry
1570    jccb(Assembler::equal, DONE_LABEL) ;
1571    bind(L_decrement_retry);
1572    // Spin and retry if lock is busy.
1573    rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1574  }
1575  else {
1576    bind(L_decrement_retry);
1577  }
1578}
1579
1580#endif //  INCLUDE_RTM_OPT
1581
1582// Fast_Lock and Fast_Unlock used by C2
1583
1584// Because the transitions from emitted code to the runtime
1585// monitorenter/exit helper stubs are so slow it's critical that
1586// we inline both the stack-locking fast-path and the inflated fast path.
1587//
1588// See also: cmpFastLock and cmpFastUnlock.
1589//
1590// What follows is a specialized inline transliteration of the code
1591// in slow_enter() and slow_exit().  If we're concerned about I$ bloat
1592// another option would be to emit TrySlowEnter and TrySlowExit methods
1593// at startup-time.  These methods would accept arguments as
1594// (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1595// indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
1596// marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1597// In practice, however, the # of lock sites is bounded and is usually small.
1598// Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1599// if the processor uses simple bimodal branch predictors keyed by EIP
1600// Since the helper routines would be called from multiple synchronization
1601// sites.
1602//
1603// An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1604// in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1605// to those specialized methods.  That'd give us a mostly platform-independent
1606// implementation that the JITs could optimize and inline at their pleasure.
1607// Done correctly, the only time we'd need to cross to native could would be
1608// to park() or unpark() threads.  We'd also need a few more unsafe operators
1609// to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1610// (b) explicit barriers or fence operations.
1611//
1612// TODO:
1613//
1614// *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
1615//    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
1616//    Given TLAB allocation, Self is usually manifested in a register, so passing it into
1617//    the lock operators would typically be faster than reifying Self.
1618//
1619// *  Ideally I'd define the primitives as:
1620//       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
1621//       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
1622//    Unfortunately ADLC bugs prevent us from expressing the ideal form.
1623//    Instead, we're stuck with a rather awkward and brittle register assignments below.
1624//    Furthermore the register assignments are overconstrained, possibly resulting in
1625//    sub-optimal code near the synchronization site.
1626//
1627// *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
1628//    Alternately, use a better sp-proximity test.
1629//
1630// *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
1631//    Either one is sufficient to uniquely identify a thread.
1632//    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
1633//
1634// *  Intrinsify notify() and notifyAll() for the common cases where the
1635//    object is locked by the calling thread but the waitlist is empty.
1636//    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
1637//
1638// *  use jccb and jmpb instead of jcc and jmp to improve code density.
1639//    But beware of excessive branch density on AMD Opterons.
1640//
1641// *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
1642//    or failure of the fast-path.  If the fast-path fails then we pass
1643//    control to the slow-path, typically in C.  In Fast_Lock and
1644//    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
1645//    will emit a conditional branch immediately after the node.
1646//    So we have branches to branches and lots of ICC.ZF games.
1647//    Instead, it might be better to have C2 pass a "FailureLabel"
1648//    into Fast_Lock and Fast_Unlock.  In the case of success, control
1649//    will drop through the node.  ICC.ZF is undefined at exit.
1650//    In the case of failure, the node will branch directly to the
1651//    FailureLabel
1652
1653
1654// obj: object to lock
1655// box: on-stack box address (displaced header location) - KILLED
1656// rax,: tmp -- KILLED
1657// scr: tmp -- KILLED
1658void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
1659                               Register scrReg, Register cx1Reg, Register cx2Reg,
1660                               BiasedLockingCounters* counters,
1661                               RTMLockingCounters* rtm_counters,
1662                               RTMLockingCounters* stack_rtm_counters,
1663                               Metadata* method_data,
1664                               bool use_rtm, bool profile_rtm) {
1665  // Ensure the register assignents are disjoint
1666  assert(tmpReg == rax, "");
1667
1668  if (use_rtm) {
1669    assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
1670  } else {
1671    assert(cx1Reg == noreg, "");
1672    assert(cx2Reg == noreg, "");
1673    assert_different_registers(objReg, boxReg, tmpReg, scrReg);
1674  }
1675
1676  if (counters != NULL) {
1677    atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
1678  }
1679  if (EmitSync & 1) {
1680      // set box->dhw = markOopDesc::unused_mark()
1681      // Force all sync thru slow-path: slow_enter() and slow_exit()
1682      movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1683      cmpptr (rsp, (int32_t)NULL_WORD);
1684  } else {
1685    // Possible cases that we'll encounter in fast_lock
1686    // ------------------------------------------------
1687    // * Inflated
1688    //    -- unlocked
1689    //    -- Locked
1690    //       = by self
1691    //       = by other
1692    // * biased
1693    //    -- by Self
1694    //    -- by other
1695    // * neutral
1696    // * stack-locked
1697    //    -- by self
1698    //       = sp-proximity test hits
1699    //       = sp-proximity test generates false-negative
1700    //    -- by other
1701    //
1702
1703    Label IsInflated, DONE_LABEL;
1704
1705    // it's stack-locked, biased or neutral
1706    // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
1707    // order to reduce the number of conditional branches in the most common cases.
1708    // Beware -- there's a subtle invariant that fetch of the markword
1709    // at [FETCH], below, will never observe a biased encoding (*101b).
1710    // If this invariant is not held we risk exclusion (safety) failure.
1711    if (UseBiasedLocking && !UseOptoBiasInlining) {
1712      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
1713    }
1714
1715#if INCLUDE_RTM_OPT
1716    if (UseRTMForStackLocks && use_rtm) {
1717      rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
1718                        stack_rtm_counters, method_data, profile_rtm,
1719                        DONE_LABEL, IsInflated);
1720    }
1721#endif // INCLUDE_RTM_OPT
1722
1723    movptr(tmpReg, Address(objReg, 0));          // [FETCH]
1724    testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1725    jccb(Assembler::notZero, IsInflated);
1726
1727    // Attempt stack-locking ...
1728    orptr (tmpReg, markOopDesc::unlocked_value);
1729    movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
1730    if (os::is_MP()) {
1731      lock();
1732    }
1733    cmpxchgptr(boxReg, Address(objReg, 0));      // Updates tmpReg
1734    if (counters != NULL) {
1735      cond_inc32(Assembler::equal,
1736                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1737    }
1738    jcc(Assembler::equal, DONE_LABEL);           // Success
1739
1740    // Recursive locking.
1741    // The object is stack-locked: markword contains stack pointer to BasicLock.
1742    // Locked by current thread if difference with current SP is less than one page.
1743    subptr(tmpReg, rsp);
1744    // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
1745    andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1746    movptr(Address(boxReg, 0), tmpReg);
1747    if (counters != NULL) {
1748      cond_inc32(Assembler::equal,
1749                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1750    }
1751    jmp(DONE_LABEL);
1752
1753    bind(IsInflated);
1754    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
1755
1756#if INCLUDE_RTM_OPT
1757    // Use the same RTM locking code in 32- and 64-bit VM.
1758    if (use_rtm) {
1759      rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
1760                           rtm_counters, method_data, profile_rtm, DONE_LABEL);
1761    } else {
1762#endif // INCLUDE_RTM_OPT
1763
1764#ifndef _LP64
1765    // The object is inflated.
1766
1767    // boxReg refers to the on-stack BasicLock in the current frame.
1768    // We'd like to write:
1769    //   set box->_displaced_header = markOopDesc::unused_mark().  Any non-0 value suffices.
1770    // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
1771    // additional latency as we have another ST in the store buffer that must drain.
1772
1773    if (EmitSync & 8192) {
1774       movptr(Address(boxReg, 0), 3);            // results in ST-before-CAS penalty
1775       get_thread (scrReg);
1776       movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2]
1777       movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
1778       if (os::is_MP()) {
1779         lock();
1780       }
1781       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1782    } else
1783    if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
1784       movptr(scrReg, boxReg);
1785       movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
1786
1787       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1788       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1789          // prefetchw [eax + Offset(_owner)-2]
1790          prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1791       }
1792
1793       if ((EmitSync & 64) == 0) {
1794         // Optimistic form: consider XORL tmpReg,tmpReg
1795         movptr(tmpReg, NULL_WORD);
1796       } else {
1797         // Can suffer RTS->RTO upgrades on shared or cold $ lines
1798         // Test-And-CAS instead of CAS
1799         movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
1800         testptr(tmpReg, tmpReg);                   // Locked ?
1801         jccb  (Assembler::notZero, DONE_LABEL);
1802       }
1803
1804       // Appears unlocked - try to swing _owner from null to non-null.
1805       // Ideally, I'd manifest "Self" with get_thread and then attempt
1806       // to CAS the register containing Self into m->Owner.
1807       // But we don't have enough registers, so instead we can either try to CAS
1808       // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
1809       // we later store "Self" into m->Owner.  Transiently storing a stack address
1810       // (rsp or the address of the box) into  m->owner is harmless.
1811       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1812       if (os::is_MP()) {
1813         lock();
1814       }
1815       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1816       movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
1817       jccb  (Assembler::notZero, DONE_LABEL);
1818       get_thread (scrReg);                    // beware: clobbers ICCs
1819       movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
1820       xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
1821
1822       // If the CAS fails we can either retry or pass control to the slow-path.
1823       // We use the latter tactic.
1824       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1825       // If the CAS was successful ...
1826       //   Self has acquired the lock
1827       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1828       // Intentional fall-through into DONE_LABEL ...
1829    } else {
1830       movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark()));  // results in ST-before-CAS penalty
1831       movptr(boxReg, tmpReg);
1832
1833       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1834       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1835          // prefetchw [eax + Offset(_owner)-2]
1836          prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1837       }
1838
1839       if ((EmitSync & 64) == 0) {
1840         // Optimistic form
1841         xorptr  (tmpReg, tmpReg);
1842       } else {
1843         // Can suffer RTS->RTO upgrades on shared or cold $ lines
1844         movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
1845         testptr(tmpReg, tmpReg);                   // Locked ?
1846         jccb  (Assembler::notZero, DONE_LABEL);
1847       }
1848
1849       // Appears unlocked - try to swing _owner from null to non-null.
1850       // Use either "Self" (in scr) or rsp as thread identity in _owner.
1851       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1852       get_thread (scrReg);
1853       if (os::is_MP()) {
1854         lock();
1855       }
1856       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1857
1858       // If the CAS fails we can either retry or pass control to the slow-path.
1859       // We use the latter tactic.
1860       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1861       // If the CAS was successful ...
1862       //   Self has acquired the lock
1863       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1864       // Intentional fall-through into DONE_LABEL ...
1865    }
1866#else // _LP64
1867    // It's inflated
1868    movq(scrReg, tmpReg);
1869    xorq(tmpReg, tmpReg);
1870
1871    if (os::is_MP()) {
1872      lock();
1873    }
1874    cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1875    // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
1876    // Without cast to int32_t movptr will destroy r10 which is typically obj.
1877    movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1878    // Intentional fall-through into DONE_LABEL ...
1879    // Propagate ICC.ZF from CAS above into DONE_LABEL.
1880#endif // _LP64
1881#if INCLUDE_RTM_OPT
1882    } // use_rtm()
1883#endif
1884    // DONE_LABEL is a hot target - we'd really like to place it at the
1885    // start of cache line by padding with NOPs.
1886    // See the AMD and Intel software optimization manuals for the
1887    // most efficient "long" NOP encodings.
1888    // Unfortunately none of our alignment mechanisms suffice.
1889    bind(DONE_LABEL);
1890
1891    // At DONE_LABEL the icc ZFlag is set as follows ...
1892    // Fast_Unlock uses the same protocol.
1893    // ZFlag == 1 -> Success
1894    // ZFlag == 0 -> Failure - force control through the slow-path
1895  }
1896}
1897
1898// obj: object to unlock
1899// box: box address (displaced header location), killed.  Must be EAX.
1900// tmp: killed, cannot be obj nor box.
1901//
1902// Some commentary on balanced locking:
1903//
1904// Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
1905// Methods that don't have provably balanced locking are forced to run in the
1906// interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
1907// The interpreter provides two properties:
1908// I1:  At return-time the interpreter automatically and quietly unlocks any
1909//      objects acquired the current activation (frame).  Recall that the
1910//      interpreter maintains an on-stack list of locks currently held by
1911//      a frame.
1912// I2:  If a method attempts to unlock an object that is not held by the
1913//      the frame the interpreter throws IMSX.
1914//
1915// Lets say A(), which has provably balanced locking, acquires O and then calls B().
1916// B() doesn't have provably balanced locking so it runs in the interpreter.
1917// Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
1918// is still locked by A().
1919//
1920// The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
1921// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
1922// should not be unlocked by "normal" java-level locking and vice-versa.  The specification
1923// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
1924// Arguably given that the spec legislates the JNI case as undefined our implementation
1925// could reasonably *avoid* checking owner in Fast_Unlock().
1926// In the interest of performance we elide m->Owner==Self check in unlock.
1927// A perfectly viable alternative is to elide the owner check except when
1928// Xcheck:jni is enabled.
1929
1930void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
1931  assert(boxReg == rax, "");
1932  assert_different_registers(objReg, boxReg, tmpReg);
1933
1934  if (EmitSync & 4) {
1935    // Disable - inhibit all inlining.  Force control through the slow-path
1936    cmpptr (rsp, 0);
1937  } else {
1938    Label DONE_LABEL, Stacked, CheckSucc;
1939
1940    // Critically, the biased locking test must have precedence over
1941    // and appear before the (box->dhw == 0) recursive stack-lock test.
1942    if (UseBiasedLocking && !UseOptoBiasInlining) {
1943       biased_locking_exit(objReg, tmpReg, DONE_LABEL);
1944    }
1945
1946#if INCLUDE_RTM_OPT
1947    if (UseRTMForStackLocks && use_rtm) {
1948      assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1949      Label L_regular_unlock;
1950      movptr(tmpReg, Address(objReg, 0));           // fetch markword
1951      andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1952      cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1953      jccb(Assembler::notEqual, L_regular_unlock);  // if !HLE RegularLock
1954      xend();                                       // otherwise end...
1955      jmp(DONE_LABEL);                              // ... and we're done
1956      bind(L_regular_unlock);
1957    }
1958#endif
1959
1960    cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
1961    jcc   (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
1962    movptr(tmpReg, Address(objReg, 0));             // Examine the object's markword
1963    testptr(tmpReg, markOopDesc::monitor_value);    // Inflated?
1964    jccb  (Assembler::zero, Stacked);
1965
1966    // It's inflated.
1967#if INCLUDE_RTM_OPT
1968    if (use_rtm) {
1969      Label L_regular_inflated_unlock;
1970      int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1971      movptr(boxReg, Address(tmpReg, owner_offset));
1972      testptr(boxReg, boxReg);
1973      jccb(Assembler::notZero, L_regular_inflated_unlock);
1974      xend();
1975      jmpb(DONE_LABEL);
1976      bind(L_regular_inflated_unlock);
1977    }
1978#endif
1979
1980    // Despite our balanced locking property we still check that m->_owner == Self
1981    // as java routines or native JNI code called by this thread might
1982    // have released the lock.
1983    // Refer to the comments in synchronizer.cpp for how we might encode extra
1984    // state in _succ so we can avoid fetching EntryList|cxq.
1985    //
1986    // I'd like to add more cases in fast_lock() and fast_unlock() --
1987    // such as recursive enter and exit -- but we have to be wary of
1988    // I$ bloat, T$ effects and BP$ effects.
1989    //
1990    // If there's no contention try a 1-0 exit.  That is, exit without
1991    // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
1992    // we detect and recover from the race that the 1-0 exit admits.
1993    //
1994    // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
1995    // before it STs null into _owner, releasing the lock.  Updates
1996    // to data protected by the critical section must be visible before
1997    // we drop the lock (and thus before any other thread could acquire
1998    // the lock and observe the fields protected by the lock).
1999    // IA32's memory-model is SPO, so STs are ordered with respect to
2000    // each other and there's no need for an explicit barrier (fence).
2001    // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
2002#ifndef _LP64
2003    get_thread (boxReg);
2004    if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
2005      // prefetchw [ebx + Offset(_owner)-2]
2006      prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2007    }
2008
2009    // Note that we could employ various encoding schemes to reduce
2010    // the number of loads below (currently 4) to just 2 or 3.
2011    // Refer to the comments in synchronizer.cpp.
2012    // In practice the chain of fetches doesn't seem to impact performance, however.
2013    xorptr(boxReg, boxReg);
2014    if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
2015       // Attempt to reduce branch density - AMD's branch predictor.
2016       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2017       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2018       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2019       jccb  (Assembler::notZero, DONE_LABEL);
2020       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2021       jmpb  (DONE_LABEL);
2022    } else {
2023       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2024       jccb  (Assembler::notZero, DONE_LABEL);
2025       movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2026       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2027       jccb  (Assembler::notZero, CheckSucc);
2028       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2029       jmpb  (DONE_LABEL);
2030    }
2031
2032    // The Following code fragment (EmitSync & 65536) improves the performance of
2033    // contended applications and contended synchronization microbenchmarks.
2034    // Unfortunately the emission of the code - even though not executed - causes regressions
2035    // in scimark and jetstream, evidently because of $ effects.  Replacing the code
2036    // with an equal number of never-executed NOPs results in the same regression.
2037    // We leave it off by default.
2038
2039    if ((EmitSync & 65536) != 0) {
2040       Label LSuccess, LGoSlowPath ;
2041
2042       bind  (CheckSucc);
2043
2044       // Optional pre-test ... it's safe to elide this
2045       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2046       jccb(Assembler::zero, LGoSlowPath);
2047
2048       // We have a classic Dekker-style idiom:
2049       //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
2050       // There are a number of ways to implement the barrier:
2051       // (1) lock:andl &m->_owner, 0
2052       //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
2053       //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
2054       //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
2055       // (2) If supported, an explicit MFENCE is appealing.
2056       //     In older IA32 processors MFENCE is slower than lock:add or xchg
2057       //     particularly if the write-buffer is full as might be the case if
2058       //     if stores closely precede the fence or fence-equivalent instruction.
2059       //     See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
2060       //     as the situation has changed with Nehalem and Shanghai.
2061       // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
2062       //     The $lines underlying the top-of-stack should be in M-state.
2063       //     The locked add instruction is serializing, of course.
2064       // (4) Use xchg, which is serializing
2065       //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
2066       // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
2067       //     The integer condition codes will tell us if succ was 0.
2068       //     Since _succ and _owner should reside in the same $line and
2069       //     we just stored into _owner, it's likely that the $line
2070       //     remains in M-state for the lock:orl.
2071       //
2072       // We currently use (3), although it's likely that switching to (2)
2073       // is correct for the future.
2074
2075       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2076       if (os::is_MP()) {
2077         lock(); addptr(Address(rsp, 0), 0);
2078       }
2079       // Ratify _succ remains non-null
2080       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0);
2081       jccb  (Assembler::notZero, LSuccess);
2082
2083       xorptr(boxReg, boxReg);                  // box is really EAX
2084       if (os::is_MP()) { lock(); }
2085       cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2086       jccb  (Assembler::notEqual, LSuccess);
2087       // Since we're low on registers we installed rsp as a placeholding in _owner.
2088       // Now install Self over rsp.  This is safe as we're transitioning from
2089       // non-null to non=null
2090       get_thread (boxReg);
2091       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg);
2092       // Intentional fall-through into LGoSlowPath ...
2093
2094       bind  (LGoSlowPath);
2095       orptr(boxReg, 1);                      // set ICC.ZF=0 to indicate failure
2096       jmpb  (DONE_LABEL);
2097
2098       bind  (LSuccess);
2099       xorptr(boxReg, boxReg);                 // set ICC.ZF=1 to indicate success
2100       jmpb  (DONE_LABEL);
2101    }
2102
2103    bind (Stacked);
2104    // It's not inflated and it's not recursively stack-locked and it's not biased.
2105    // It must be stack-locked.
2106    // Try to reset the header to displaced header.
2107    // The "box" value on the stack is stable, so we can reload
2108    // and be assured we observe the same value as above.
2109    movptr(tmpReg, Address(boxReg, 0));
2110    if (os::is_MP()) {
2111      lock();
2112    }
2113    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2114    // Intention fall-thru into DONE_LABEL
2115
2116    // DONE_LABEL is a hot target - we'd really like to place it at the
2117    // start of cache line by padding with NOPs.
2118    // See the AMD and Intel software optimization manuals for the
2119    // most efficient "long" NOP encodings.
2120    // Unfortunately none of our alignment mechanisms suffice.
2121    if ((EmitSync & 65536) == 0) {
2122       bind (CheckSucc);
2123    }
2124#else // _LP64
2125    // It's inflated
2126    if (EmitSync & 1024) {
2127      // Emit code to check that _owner == Self
2128      // We could fold the _owner test into subsequent code more efficiently
2129      // than using a stand-alone check, but since _owner checking is off by
2130      // default we don't bother. We also might consider predicating the
2131      // _owner==Self check on Xcheck:jni or running on a debug build.
2132      movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2133      xorptr(boxReg, r15_thread);
2134    } else {
2135      xorptr(boxReg, boxReg);
2136    }
2137    orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2138    jccb  (Assembler::notZero, DONE_LABEL);
2139    movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2140    orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2141    jccb  (Assembler::notZero, CheckSucc);
2142    movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
2143    jmpb  (DONE_LABEL);
2144
2145    if ((EmitSync & 65536) == 0) {
2146      // Try to avoid passing control into the slow_path ...
2147      Label LSuccess, LGoSlowPath ;
2148      bind  (CheckSucc);
2149
2150      // The following optional optimization can be elided if necessary
2151      // Effectively: if (succ == null) goto SlowPath
2152      // The code reduces the window for a race, however,
2153      // and thus benefits performance.
2154      cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2155      jccb  (Assembler::zero, LGoSlowPath);
2156
2157      if ((EmitSync & 16) && os::is_MP()) {
2158        orptr(boxReg, boxReg);
2159        xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2160      } else {
2161        movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
2162        if (os::is_MP()) {
2163          // Memory barrier/fence
2164          // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
2165          // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
2166          // This is faster on Nehalem and AMD Shanghai/Barcelona.
2167          // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
2168          // We might also restructure (ST Owner=0;barrier;LD _Succ) to
2169          // (mov box,0; xchgq box, &m->Owner; LD _succ) .
2170          lock(); addl(Address(rsp, 0), 0);
2171        }
2172      }
2173      cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2174      jccb  (Assembler::notZero, LSuccess);
2175
2176      // Rare inopportune interleaving - race.
2177      // The successor vanished in the small window above.
2178      // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor.
2179      // We need to ensure progress and succession.
2180      // Try to reacquire the lock.
2181      // If that fails then the new owner is responsible for succession and this
2182      // thread needs to take no further action and can exit via the fast path (success).
2183      // If the re-acquire succeeds then pass control into the slow path.
2184      // As implemented, this latter mode is horrible because we generated more
2185      // coherence traffic on the lock *and* artifically extended the critical section
2186      // length while by virtue of passing control into the slow path.
2187
2188      // box is really RAX -- the following CMPXCHG depends on that binding
2189      // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
2190      movptr(boxReg, (int32_t)NULL_WORD);
2191      if (os::is_MP()) { lock(); }
2192      cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2193      jccb  (Assembler::notEqual, LSuccess);
2194      // Intentional fall-through into slow-path
2195
2196      bind  (LGoSlowPath);
2197      orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
2198      jmpb  (DONE_LABEL);
2199
2200      bind  (LSuccess);
2201      testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
2202      jmpb  (DONE_LABEL);
2203    }
2204
2205    bind  (Stacked);
2206    movptr(tmpReg, Address (boxReg, 0));      // re-fetch
2207    if (os::is_MP()) { lock(); }
2208    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2209
2210    if (EmitSync & 65536) {
2211       bind (CheckSucc);
2212    }
2213#endif
2214    bind(DONE_LABEL);
2215  }
2216}
2217#endif // COMPILER2
2218
2219void MacroAssembler::c2bool(Register x) {
2220  // implements x == 0 ? 0 : 1
2221  // note: must only look at least-significant byte of x
2222  //       since C-style booleans are stored in one byte
2223  //       only! (was bug)
2224  andl(x, 0xFF);
2225  setb(Assembler::notZero, x);
2226}
2227
2228// Wouldn't need if AddressLiteral version had new name
2229void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
2230  Assembler::call(L, rtype);
2231}
2232
2233void MacroAssembler::call(Register entry) {
2234  Assembler::call(entry);
2235}
2236
2237void MacroAssembler::call(AddressLiteral entry) {
2238  if (reachable(entry)) {
2239    Assembler::call_literal(entry.target(), entry.rspec());
2240  } else {
2241    lea(rscratch1, entry);
2242    Assembler::call(rscratch1);
2243  }
2244}
2245
2246void MacroAssembler::ic_call(address entry) {
2247  RelocationHolder rh = virtual_call_Relocation::spec(pc());
2248  movptr(rax, (intptr_t)Universe::non_oop_word());
2249  call(AddressLiteral(entry, rh));
2250}
2251
2252// Implementation of call_VM versions
2253
2254void MacroAssembler::call_VM(Register oop_result,
2255                             address entry_point,
2256                             bool check_exceptions) {
2257  Label C, E;
2258  call(C, relocInfo::none);
2259  jmp(E);
2260
2261  bind(C);
2262  call_VM_helper(oop_result, entry_point, 0, check_exceptions);
2263  ret(0);
2264
2265  bind(E);
2266}
2267
2268void MacroAssembler::call_VM(Register oop_result,
2269                             address entry_point,
2270                             Register arg_1,
2271                             bool check_exceptions) {
2272  Label C, E;
2273  call(C, relocInfo::none);
2274  jmp(E);
2275
2276  bind(C);
2277  pass_arg1(this, arg_1);
2278  call_VM_helper(oop_result, entry_point, 1, check_exceptions);
2279  ret(0);
2280
2281  bind(E);
2282}
2283
2284void MacroAssembler::call_VM(Register oop_result,
2285                             address entry_point,
2286                             Register arg_1,
2287                             Register arg_2,
2288                             bool check_exceptions) {
2289  Label C, E;
2290  call(C, relocInfo::none);
2291  jmp(E);
2292
2293  bind(C);
2294
2295  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2296
2297  pass_arg2(this, arg_2);
2298  pass_arg1(this, arg_1);
2299  call_VM_helper(oop_result, entry_point, 2, check_exceptions);
2300  ret(0);
2301
2302  bind(E);
2303}
2304
2305void MacroAssembler::call_VM(Register oop_result,
2306                             address entry_point,
2307                             Register arg_1,
2308                             Register arg_2,
2309                             Register arg_3,
2310                             bool check_exceptions) {
2311  Label C, E;
2312  call(C, relocInfo::none);
2313  jmp(E);
2314
2315  bind(C);
2316
2317  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2318  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2319  pass_arg3(this, arg_3);
2320
2321  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2322  pass_arg2(this, arg_2);
2323
2324  pass_arg1(this, arg_1);
2325  call_VM_helper(oop_result, entry_point, 3, check_exceptions);
2326  ret(0);
2327
2328  bind(E);
2329}
2330
2331void MacroAssembler::call_VM(Register oop_result,
2332                             Register last_java_sp,
2333                             address entry_point,
2334                             int number_of_arguments,
2335                             bool check_exceptions) {
2336  Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2337  call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2338}
2339
2340void MacroAssembler::call_VM(Register oop_result,
2341                             Register last_java_sp,
2342                             address entry_point,
2343                             Register arg_1,
2344                             bool check_exceptions) {
2345  pass_arg1(this, arg_1);
2346  call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2347}
2348
2349void MacroAssembler::call_VM(Register oop_result,
2350                             Register last_java_sp,
2351                             address entry_point,
2352                             Register arg_1,
2353                             Register arg_2,
2354                             bool check_exceptions) {
2355
2356  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2357  pass_arg2(this, arg_2);
2358  pass_arg1(this, arg_1);
2359  call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2360}
2361
2362void MacroAssembler::call_VM(Register oop_result,
2363                             Register last_java_sp,
2364                             address entry_point,
2365                             Register arg_1,
2366                             Register arg_2,
2367                             Register arg_3,
2368                             bool check_exceptions) {
2369  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2370  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2371  pass_arg3(this, arg_3);
2372  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2373  pass_arg2(this, arg_2);
2374  pass_arg1(this, arg_1);
2375  call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2376}
2377
2378void MacroAssembler::super_call_VM(Register oop_result,
2379                                   Register last_java_sp,
2380                                   address entry_point,
2381                                   int number_of_arguments,
2382                                   bool check_exceptions) {
2383  Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2384  MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2385}
2386
2387void MacroAssembler::super_call_VM(Register oop_result,
2388                                   Register last_java_sp,
2389                                   address entry_point,
2390                                   Register arg_1,
2391                                   bool check_exceptions) {
2392  pass_arg1(this, arg_1);
2393  super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2394}
2395
2396void MacroAssembler::super_call_VM(Register oop_result,
2397                                   Register last_java_sp,
2398                                   address entry_point,
2399                                   Register arg_1,
2400                                   Register arg_2,
2401                                   bool check_exceptions) {
2402
2403  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2404  pass_arg2(this, arg_2);
2405  pass_arg1(this, arg_1);
2406  super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2407}
2408
2409void MacroAssembler::super_call_VM(Register oop_result,
2410                                   Register last_java_sp,
2411                                   address entry_point,
2412                                   Register arg_1,
2413                                   Register arg_2,
2414                                   Register arg_3,
2415                                   bool check_exceptions) {
2416  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2417  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2418  pass_arg3(this, arg_3);
2419  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2420  pass_arg2(this, arg_2);
2421  pass_arg1(this, arg_1);
2422  super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2423}
2424
2425void MacroAssembler::call_VM_base(Register oop_result,
2426                                  Register java_thread,
2427                                  Register last_java_sp,
2428                                  address  entry_point,
2429                                  int      number_of_arguments,
2430                                  bool     check_exceptions) {
2431  // determine java_thread register
2432  if (!java_thread->is_valid()) {
2433#ifdef _LP64
2434    java_thread = r15_thread;
2435#else
2436    java_thread = rdi;
2437    get_thread(java_thread);
2438#endif // LP64
2439  }
2440  // determine last_java_sp register
2441  if (!last_java_sp->is_valid()) {
2442    last_java_sp = rsp;
2443  }
2444  // debugging support
2445  assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
2446  LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
2447#ifdef ASSERT
2448  // TraceBytecodes does not use r12 but saves it over the call, so don't verify
2449  // r12 is the heapbase.
2450  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
2451#endif // ASSERT
2452
2453  assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
2454  assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
2455
2456  // push java thread (becomes first argument of C function)
2457
2458  NOT_LP64(push(java_thread); number_of_arguments++);
2459  LP64_ONLY(mov(c_rarg0, r15_thread));
2460
2461  // set last Java frame before call
2462  assert(last_java_sp != rbp, "can't use ebp/rbp");
2463
2464  // Only interpreter should have to set fp
2465  set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
2466
2467  // do the call, remove parameters
2468  MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
2469
2470  // restore the thread (cannot use the pushed argument since arguments
2471  // may be overwritten by C code generated by an optimizing compiler);
2472  // however can use the register value directly if it is callee saved.
2473  if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
2474    // rdi & rsi (also r15) are callee saved -> nothing to do
2475#ifdef ASSERT
2476    guarantee(java_thread != rax, "change this code");
2477    push(rax);
2478    { Label L;
2479      get_thread(rax);
2480      cmpptr(java_thread, rax);
2481      jcc(Assembler::equal, L);
2482      STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
2483      bind(L);
2484    }
2485    pop(rax);
2486#endif
2487  } else {
2488    get_thread(java_thread);
2489  }
2490  // reset last Java frame
2491  // Only interpreter should have to clear fp
2492  reset_last_Java_frame(java_thread, true, false);
2493
2494#ifndef CC_INTERP
2495   // C++ interp handles this in the interpreter
2496  check_and_handle_popframe(java_thread);
2497  check_and_handle_earlyret(java_thread);
2498#endif /* CC_INTERP */
2499
2500  if (check_exceptions) {
2501    // check for pending exceptions (java_thread is set upon return)
2502    cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
2503#ifndef _LP64
2504    jump_cc(Assembler::notEqual,
2505            RuntimeAddress(StubRoutines::forward_exception_entry()));
2506#else
2507    // This used to conditionally jump to forward_exception however it is
2508    // possible if we relocate that the branch will not reach. So we must jump
2509    // around so we can always reach
2510
2511    Label ok;
2512    jcc(Assembler::equal, ok);
2513    jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2514    bind(ok);
2515#endif // LP64
2516  }
2517
2518  // get oop result if there is one and reset the value in the thread
2519  if (oop_result->is_valid()) {
2520    get_vm_result(oop_result, java_thread);
2521  }
2522}
2523
2524void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
2525
2526  // Calculate the value for last_Java_sp
2527  // somewhat subtle. call_VM does an intermediate call
2528  // which places a return address on the stack just under the
2529  // stack pointer as the user finsihed with it. This allows
2530  // use to retrieve last_Java_pc from last_Java_sp[-1].
2531  // On 32bit we then have to push additional args on the stack to accomplish
2532  // the actual requested call. On 64bit call_VM only can use register args
2533  // so the only extra space is the return address that call_VM created.
2534  // This hopefully explains the calculations here.
2535
2536#ifdef _LP64
2537  // We've pushed one address, correct last_Java_sp
2538  lea(rax, Address(rsp, wordSize));
2539#else
2540  lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
2541#endif // LP64
2542
2543  call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
2544
2545}
2546
2547void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2548  call_VM_leaf_base(entry_point, number_of_arguments);
2549}
2550
2551void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2552  pass_arg0(this, arg_0);
2553  call_VM_leaf(entry_point, 1);
2554}
2555
2556void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2557
2558  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2559  pass_arg1(this, arg_1);
2560  pass_arg0(this, arg_0);
2561  call_VM_leaf(entry_point, 2);
2562}
2563
2564void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2565  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2566  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2567  pass_arg2(this, arg_2);
2568  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2569  pass_arg1(this, arg_1);
2570  pass_arg0(this, arg_0);
2571  call_VM_leaf(entry_point, 3);
2572}
2573
2574void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2575  pass_arg0(this, arg_0);
2576  MacroAssembler::call_VM_leaf_base(entry_point, 1);
2577}
2578
2579void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2580
2581  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2582  pass_arg1(this, arg_1);
2583  pass_arg0(this, arg_0);
2584  MacroAssembler::call_VM_leaf_base(entry_point, 2);
2585}
2586
2587void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2588  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2589  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2590  pass_arg2(this, arg_2);
2591  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2592  pass_arg1(this, arg_1);
2593  pass_arg0(this, arg_0);
2594  MacroAssembler::call_VM_leaf_base(entry_point, 3);
2595}
2596
2597void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2598  LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
2599  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2600  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2601  pass_arg3(this, arg_3);
2602  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2603  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2604  pass_arg2(this, arg_2);
2605  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2606  pass_arg1(this, arg_1);
2607  pass_arg0(this, arg_0);
2608  MacroAssembler::call_VM_leaf_base(entry_point, 4);
2609}
2610
2611void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
2612  movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
2613  movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
2614  verify_oop(oop_result, "broken oop in call_VM_base");
2615}
2616
2617void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
2618  movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
2619  movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
2620}
2621
2622void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2623}
2624
2625void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2626}
2627
2628void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
2629  if (reachable(src1)) {
2630    cmpl(as_Address(src1), imm);
2631  } else {
2632    lea(rscratch1, src1);
2633    cmpl(Address(rscratch1, 0), imm);
2634  }
2635}
2636
2637void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
2638  assert(!src2.is_lval(), "use cmpptr");
2639  if (reachable(src2)) {
2640    cmpl(src1, as_Address(src2));
2641  } else {
2642    lea(rscratch1, src2);
2643    cmpl(src1, Address(rscratch1, 0));
2644  }
2645}
2646
2647void MacroAssembler::cmp32(Register src1, int32_t imm) {
2648  Assembler::cmpl(src1, imm);
2649}
2650
2651void MacroAssembler::cmp32(Register src1, Address src2) {
2652  Assembler::cmpl(src1, src2);
2653}
2654
2655void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2656  ucomisd(opr1, opr2);
2657
2658  Label L;
2659  if (unordered_is_less) {
2660    movl(dst, -1);
2661    jcc(Assembler::parity, L);
2662    jcc(Assembler::below , L);
2663    movl(dst, 0);
2664    jcc(Assembler::equal , L);
2665    increment(dst);
2666  } else { // unordered is greater
2667    movl(dst, 1);
2668    jcc(Assembler::parity, L);
2669    jcc(Assembler::above , L);
2670    movl(dst, 0);
2671    jcc(Assembler::equal , L);
2672    decrementl(dst);
2673  }
2674  bind(L);
2675}
2676
2677void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2678  ucomiss(opr1, opr2);
2679
2680  Label L;
2681  if (unordered_is_less) {
2682    movl(dst, -1);
2683    jcc(Assembler::parity, L);
2684    jcc(Assembler::below , L);
2685    movl(dst, 0);
2686    jcc(Assembler::equal , L);
2687    increment(dst);
2688  } else { // unordered is greater
2689    movl(dst, 1);
2690    jcc(Assembler::parity, L);
2691    jcc(Assembler::above , L);
2692    movl(dst, 0);
2693    jcc(Assembler::equal , L);
2694    decrementl(dst);
2695  }
2696  bind(L);
2697}
2698
2699
2700void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
2701  if (reachable(src1)) {
2702    cmpb(as_Address(src1), imm);
2703  } else {
2704    lea(rscratch1, src1);
2705    cmpb(Address(rscratch1, 0), imm);
2706  }
2707}
2708
2709void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
2710#ifdef _LP64
2711  if (src2.is_lval()) {
2712    movptr(rscratch1, src2);
2713    Assembler::cmpq(src1, rscratch1);
2714  } else if (reachable(src2)) {
2715    cmpq(src1, as_Address(src2));
2716  } else {
2717    lea(rscratch1, src2);
2718    Assembler::cmpq(src1, Address(rscratch1, 0));
2719  }
2720#else
2721  if (src2.is_lval()) {
2722    cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2723  } else {
2724    cmpl(src1, as_Address(src2));
2725  }
2726#endif // _LP64
2727}
2728
2729void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
2730  assert(src2.is_lval(), "not a mem-mem compare");
2731#ifdef _LP64
2732  // moves src2's literal address
2733  movptr(rscratch1, src2);
2734  Assembler::cmpq(src1, rscratch1);
2735#else
2736  cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2737#endif // _LP64
2738}
2739
2740void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
2741  if (reachable(adr)) {
2742    if (os::is_MP())
2743      lock();
2744    cmpxchgptr(reg, as_Address(adr));
2745  } else {
2746    lea(rscratch1, adr);
2747    if (os::is_MP())
2748      lock();
2749    cmpxchgptr(reg, Address(rscratch1, 0));
2750  }
2751}
2752
2753void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
2754  LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
2755}
2756
2757void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
2758  if (reachable(src)) {
2759    Assembler::comisd(dst, as_Address(src));
2760  } else {
2761    lea(rscratch1, src);
2762    Assembler::comisd(dst, Address(rscratch1, 0));
2763  }
2764}
2765
2766void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
2767  if (reachable(src)) {
2768    Assembler::comiss(dst, as_Address(src));
2769  } else {
2770    lea(rscratch1, src);
2771    Assembler::comiss(dst, Address(rscratch1, 0));
2772  }
2773}
2774
2775
2776void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
2777  Condition negated_cond = negate_condition(cond);
2778  Label L;
2779  jcc(negated_cond, L);
2780  pushf(); // Preserve flags
2781  atomic_incl(counter_addr);
2782  popf();
2783  bind(L);
2784}
2785
2786int MacroAssembler::corrected_idivl(Register reg) {
2787  // Full implementation of Java idiv and irem; checks for
2788  // special case as described in JVM spec., p.243 & p.271.
2789  // The function returns the (pc) offset of the idivl
2790  // instruction - may be needed for implicit exceptions.
2791  //
2792  //         normal case                           special case
2793  //
2794  // input : rax,: dividend                         min_int
2795  //         reg: divisor   (may not be rax,/rdx)   -1
2796  //
2797  // output: rax,: quotient  (= rax, idiv reg)       min_int
2798  //         rdx: remainder (= rax, irem reg)       0
2799  assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
2800  const int min_int = 0x80000000;
2801  Label normal_case, special_case;
2802
2803  // check for special case
2804  cmpl(rax, min_int);
2805  jcc(Assembler::notEqual, normal_case);
2806  xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
2807  cmpl(reg, -1);
2808  jcc(Assembler::equal, special_case);
2809
2810  // handle normal case
2811  bind(normal_case);
2812  cdql();
2813  int idivl_offset = offset();
2814  idivl(reg);
2815
2816  // normal and special case exit
2817  bind(special_case);
2818
2819  return idivl_offset;
2820}
2821
2822
2823
2824void MacroAssembler::decrementl(Register reg, int value) {
2825  if (value == min_jint) {subl(reg, value) ; return; }
2826  if (value <  0) { incrementl(reg, -value); return; }
2827  if (value == 0) {                        ; return; }
2828  if (value == 1 && UseIncDec) { decl(reg) ; return; }
2829  /* else */      { subl(reg, value)       ; return; }
2830}
2831
2832void MacroAssembler::decrementl(Address dst, int value) {
2833  if (value == min_jint) {subl(dst, value) ; return; }
2834  if (value <  0) { incrementl(dst, -value); return; }
2835  if (value == 0) {                        ; return; }
2836  if (value == 1 && UseIncDec) { decl(dst) ; return; }
2837  /* else */      { subl(dst, value)       ; return; }
2838}
2839
2840void MacroAssembler::division_with_shift (Register reg, int shift_value) {
2841  assert (shift_value > 0, "illegal shift value");
2842  Label _is_positive;
2843  testl (reg, reg);
2844  jcc (Assembler::positive, _is_positive);
2845  int offset = (1 << shift_value) - 1 ;
2846
2847  if (offset == 1) {
2848    incrementl(reg);
2849  } else {
2850    addl(reg, offset);
2851  }
2852
2853  bind (_is_positive);
2854  sarl(reg, shift_value);
2855}
2856
2857void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
2858  if (reachable(src)) {
2859    Assembler::divsd(dst, as_Address(src));
2860  } else {
2861    lea(rscratch1, src);
2862    Assembler::divsd(dst, Address(rscratch1, 0));
2863  }
2864}
2865
2866void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
2867  if (reachable(src)) {
2868    Assembler::divss(dst, as_Address(src));
2869  } else {
2870    lea(rscratch1, src);
2871    Assembler::divss(dst, Address(rscratch1, 0));
2872  }
2873}
2874
2875// !defined(COMPILER2) is because of stupid core builds
2876#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
2877void MacroAssembler::empty_FPU_stack() {
2878  if (VM_Version::supports_mmx()) {
2879    emms();
2880  } else {
2881    for (int i = 8; i-- > 0; ) ffree(i);
2882  }
2883}
2884#endif // !LP64 || C1 || !C2
2885
2886
2887// Defines obj, preserves var_size_in_bytes
2888void MacroAssembler::eden_allocate(Register obj,
2889                                   Register var_size_in_bytes,
2890                                   int con_size_in_bytes,
2891                                   Register t1,
2892                                   Label& slow_case) {
2893  assert(obj == rax, "obj must be in rax, for cmpxchg");
2894  assert_different_registers(obj, var_size_in_bytes, t1);
2895  if (!Universe::heap()->supports_inline_contig_alloc()) {
2896    jmp(slow_case);
2897  } else {
2898    Register end = t1;
2899    Label retry;
2900    bind(retry);
2901    ExternalAddress heap_top((address) Universe::heap()->top_addr());
2902    movptr(obj, heap_top);
2903    if (var_size_in_bytes == noreg) {
2904      lea(end, Address(obj, con_size_in_bytes));
2905    } else {
2906      lea(end, Address(obj, var_size_in_bytes, Address::times_1));
2907    }
2908    // if end < obj then we wrapped around => object too long => slow case
2909    cmpptr(end, obj);
2910    jcc(Assembler::below, slow_case);
2911    cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
2912    jcc(Assembler::above, slow_case);
2913    // Compare obj with the top addr, and if still equal, store the new top addr in
2914    // end at the address of the top addr pointer. Sets ZF if was equal, and clears
2915    // it otherwise. Use lock prefix for atomicity on MPs.
2916    locked_cmpxchgptr(end, heap_top);
2917    jcc(Assembler::notEqual, retry);
2918  }
2919}
2920
2921void MacroAssembler::enter() {
2922  push(rbp);
2923  mov(rbp, rsp);
2924}
2925
2926// A 5 byte nop that is safe for patching (see patch_verified_entry)
2927void MacroAssembler::fat_nop() {
2928  if (UseAddressNop) {
2929    addr_nop_5();
2930  } else {
2931    emit_int8(0x26); // es:
2932    emit_int8(0x2e); // cs:
2933    emit_int8(0x64); // fs:
2934    emit_int8(0x65); // gs:
2935    emit_int8((unsigned char)0x90);
2936  }
2937}
2938
2939void MacroAssembler::fcmp(Register tmp) {
2940  fcmp(tmp, 1, true, true);
2941}
2942
2943void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
2944  assert(!pop_right || pop_left, "usage error");
2945  if (VM_Version::supports_cmov()) {
2946    assert(tmp == noreg, "unneeded temp");
2947    if (pop_left) {
2948      fucomip(index);
2949    } else {
2950      fucomi(index);
2951    }
2952    if (pop_right) {
2953      fpop();
2954    }
2955  } else {
2956    assert(tmp != noreg, "need temp");
2957    if (pop_left) {
2958      if (pop_right) {
2959        fcompp();
2960      } else {
2961        fcomp(index);
2962      }
2963    } else {
2964      fcom(index);
2965    }
2966    // convert FPU condition into eflags condition via rax,
2967    save_rax(tmp);
2968    fwait(); fnstsw_ax();
2969    sahf();
2970    restore_rax(tmp);
2971  }
2972  // condition codes set as follows:
2973  //
2974  // CF (corresponds to C0) if x < y
2975  // PF (corresponds to C2) if unordered
2976  // ZF (corresponds to C3) if x = y
2977}
2978
2979void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
2980  fcmp2int(dst, unordered_is_less, 1, true, true);
2981}
2982
2983void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
2984  fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
2985  Label L;
2986  if (unordered_is_less) {
2987    movl(dst, -1);
2988    jcc(Assembler::parity, L);
2989    jcc(Assembler::below , L);
2990    movl(dst, 0);
2991    jcc(Assembler::equal , L);
2992    increment(dst);
2993  } else { // unordered is greater
2994    movl(dst, 1);
2995    jcc(Assembler::parity, L);
2996    jcc(Assembler::above , L);
2997    movl(dst, 0);
2998    jcc(Assembler::equal , L);
2999    decrementl(dst);
3000  }
3001  bind(L);
3002}
3003
3004void MacroAssembler::fld_d(AddressLiteral src) {
3005  fld_d(as_Address(src));
3006}
3007
3008void MacroAssembler::fld_s(AddressLiteral src) {
3009  fld_s(as_Address(src));
3010}
3011
3012void MacroAssembler::fld_x(AddressLiteral src) {
3013  Assembler::fld_x(as_Address(src));
3014}
3015
3016void MacroAssembler::fldcw(AddressLiteral src) {
3017  Assembler::fldcw(as_Address(src));
3018}
3019
3020void MacroAssembler::pow_exp_core_encoding() {
3021  // kills rax, rcx, rdx
3022  subptr(rsp,sizeof(jdouble));
3023  // computes 2^X. Stack: X ...
3024  // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
3025  // keep it on the thread's stack to compute 2^int(X) later
3026  // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
3027  // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
3028  fld_s(0);                 // Stack: X X ...
3029  frndint();                // Stack: int(X) X ...
3030  fsuba(1);                 // Stack: int(X) X-int(X) ...
3031  fistp_s(Address(rsp,0));  // move int(X) as integer to thread's stack. Stack: X-int(X) ...
3032  f2xm1();                  // Stack: 2^(X-int(X))-1 ...
3033  fld1();                   // Stack: 1 2^(X-int(X))-1 ...
3034  faddp(1);                 // Stack: 2^(X-int(X))
3035  // computes 2^(int(X)): add exponent bias (1023) to int(X), then
3036  // shift int(X)+1023 to exponent position.
3037  // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
3038  // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
3039  // values so detect them and set result to NaN.
3040  movl(rax,Address(rsp,0));
3041  movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
3042  addl(rax, 1023);
3043  movl(rdx,rax);
3044  shll(rax,20);
3045  // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
3046  addl(rdx,1);
3047  // Check that 1 < int(X)+1023+1 < 2048
3048  // in 3 steps:
3049  // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
3050  // 2- (int(X)+1023+1)&-2048 != 0
3051  // 3- (int(X)+1023+1)&-2048 != 1
3052  // Do 2- first because addl just updated the flags.
3053  cmov32(Assembler::equal,rax,rcx);
3054  cmpl(rdx,1);
3055  cmov32(Assembler::equal,rax,rcx);
3056  testl(rdx,rcx);
3057  cmov32(Assembler::notEqual,rax,rcx);
3058  movl(Address(rsp,4),rax);
3059  movl(Address(rsp,0),0);
3060  fmul_d(Address(rsp,0));   // Stack: 2^X ...
3061  addptr(rsp,sizeof(jdouble));
3062}
3063
3064void MacroAssembler::increase_precision() {
3065  subptr(rsp, BytesPerWord);
3066  fnstcw(Address(rsp, 0));
3067  movl(rax, Address(rsp, 0));
3068  orl(rax, 0x300);
3069  push(rax);
3070  fldcw(Address(rsp, 0));
3071  pop(rax);
3072}
3073
3074void MacroAssembler::restore_precision() {
3075  fldcw(Address(rsp, 0));
3076  addptr(rsp, BytesPerWord);
3077}
3078
3079void MacroAssembler::fast_pow() {
3080  // computes X^Y = 2^(Y * log2(X))
3081  // if fast computation is not possible, result is NaN. Requires
3082  // fallback from user of this macro.
3083  // increase precision for intermediate steps of the computation
3084  BLOCK_COMMENT("fast_pow {");
3085  increase_precision();
3086  fyl2x();                 // Stack: (Y*log2(X)) ...
3087  pow_exp_core_encoding(); // Stack: exp(X) ...
3088  restore_precision();
3089  BLOCK_COMMENT("} fast_pow");
3090}
3091
3092void MacroAssembler::fast_exp() {
3093  // computes exp(X) = 2^(X * log2(e))
3094  // if fast computation is not possible, result is NaN. Requires
3095  // fallback from user of this macro.
3096  // increase precision for intermediate steps of the computation
3097  increase_precision();
3098  fldl2e();                // Stack: log2(e) X ...
3099  fmulp(1);                // Stack: (X*log2(e)) ...
3100  pow_exp_core_encoding(); // Stack: exp(X) ...
3101  restore_precision();
3102}
3103
3104void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
3105  // kills rax, rcx, rdx
3106  // pow and exp needs 2 extra registers on the fpu stack.
3107  Label slow_case, done;
3108  Register tmp = noreg;
3109  if (!VM_Version::supports_cmov()) {
3110    // fcmp needs a temporary so preserve rdx,
3111    tmp = rdx;
3112  }
3113  Register tmp2 = rax;
3114  Register tmp3 = rcx;
3115
3116  if (is_exp) {
3117    // Stack: X
3118    fld_s(0);                   // duplicate argument for runtime call. Stack: X X
3119    fast_exp();                 // Stack: exp(X) X
3120    fcmp(tmp, 0, false, false); // Stack: exp(X) X
3121    // exp(X) not equal to itself: exp(X) is NaN go to slow case.
3122    jcc(Assembler::parity, slow_case);
3123    // get rid of duplicate argument. Stack: exp(X)
3124    if (num_fpu_regs_in_use > 0) {
3125      fxch();
3126      fpop();
3127    } else {
3128      ffree(1);
3129    }
3130    jmp(done);
3131  } else {
3132    // Stack: X Y
3133    Label x_negative, y_not_2;
3134
3135    static double two = 2.0;
3136    ExternalAddress two_addr((address)&two);
3137
3138    // constant maybe too far on 64 bit
3139    lea(tmp2, two_addr);
3140    fld_d(Address(tmp2, 0));    // Stack: 2 X Y
3141    fcmp(tmp, 2, true, false);  // Stack: X Y
3142    jcc(Assembler::parity, y_not_2);
3143    jcc(Assembler::notEqual, y_not_2);
3144
3145    fxch(); fpop();             // Stack: X
3146    fmul(0);                    // Stack: X*X
3147
3148    jmp(done);
3149
3150    bind(y_not_2);
3151
3152    fldz();                     // Stack: 0 X Y
3153    fcmp(tmp, 1, true, false);  // Stack: X Y
3154    jcc(Assembler::above, x_negative);
3155
3156    // X >= 0
3157
3158    fld_s(1);                   // duplicate arguments for runtime call. Stack: Y X Y
3159    fld_s(1);                   // Stack: X Y X Y
3160    fast_pow();                 // Stack: X^Y X Y
3161    fcmp(tmp, 0, false, false); // Stack: X^Y X Y
3162    // X^Y not equal to itself: X^Y is NaN go to slow case.
3163    jcc(Assembler::parity, slow_case);
3164    // get rid of duplicate arguments. Stack: X^Y
3165    if (num_fpu_regs_in_use > 0) {
3166      fxch(); fpop();
3167      fxch(); fpop();
3168    } else {
3169      ffree(2);
3170      ffree(1);
3171    }
3172    jmp(done);
3173
3174    // X <= 0
3175    bind(x_negative);
3176
3177    fld_s(1);                   // Stack: Y X Y
3178    frndint();                  // Stack: int(Y) X Y
3179    fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
3180    jcc(Assembler::notEqual, slow_case);
3181
3182    subptr(rsp, 8);
3183
3184    // For X^Y, when X < 0, Y has to be an integer and the final
3185    // result depends on whether it's odd or even. We just checked
3186    // that int(Y) == Y.  We move int(Y) to gp registers as a 64 bit
3187    // integer to test its parity. If int(Y) is huge and doesn't fit
3188    // in the 64 bit integer range, the integer indefinite value will
3189    // end up in the gp registers. Huge numbers are all even, the
3190    // integer indefinite number is even so it's fine.
3191
3192#ifdef ASSERT
3193    // Let's check we don't end up with an integer indefinite number
3194    // when not expected. First test for huge numbers: check whether
3195    // int(Y)+1 == int(Y) which is true for very large numbers and
3196    // those are all even. A 64 bit integer is guaranteed to not
3197    // overflow for numbers where y+1 != y (when precision is set to
3198    // double precision).
3199    Label y_not_huge;
3200
3201    fld1();                     // Stack: 1 int(Y) X Y
3202    fadd(1);                    // Stack: 1+int(Y) int(Y) X Y
3203
3204#ifdef _LP64
3205    // trip to memory to force the precision down from double extended
3206    // precision
3207    fstp_d(Address(rsp, 0));
3208    fld_d(Address(rsp, 0));
3209#endif
3210
3211    fcmp(tmp, 1, true, false);  // Stack: int(Y) X Y
3212#endif
3213
3214    // move int(Y) as 64 bit integer to thread's stack
3215    fistp_d(Address(rsp,0));    // Stack: X Y
3216
3217#ifdef ASSERT
3218    jcc(Assembler::notEqual, y_not_huge);
3219
3220    // Y is huge so we know it's even. It may not fit in a 64 bit
3221    // integer and we don't want the debug code below to see the
3222    // integer indefinite value so overwrite int(Y) on the thread's
3223    // stack with 0.
3224    movl(Address(rsp, 0), 0);
3225    movl(Address(rsp, 4), 0);
3226
3227    bind(y_not_huge);
3228#endif
3229
3230    fld_s(1);                   // duplicate arguments for runtime call. Stack: Y X Y
3231    fld_s(1);                   // Stack: X Y X Y
3232    fabs();                     // Stack: abs(X) Y X Y
3233    fast_pow();                 // Stack: abs(X)^Y X Y
3234    fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
3235    // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
3236
3237    pop(tmp2);
3238    NOT_LP64(pop(tmp3));
3239    jcc(Assembler::parity, slow_case);
3240
3241#ifdef ASSERT
3242    // Check that int(Y) is not integer indefinite value (int
3243    // overflow). Shouldn't happen because for values that would
3244    // overflow, 1+int(Y)==Y which was tested earlier.
3245#ifndef _LP64
3246    {
3247      Label integer;
3248      testl(tmp2, tmp2);
3249      jcc(Assembler::notZero, integer);
3250      cmpl(tmp3, 0x80000000);
3251      jcc(Assembler::notZero, integer);
3252      STOP("integer indefinite value shouldn't be seen here");
3253      bind(integer);
3254    }
3255#else
3256    {
3257      Label integer;
3258      mov(tmp3, tmp2); // preserve tmp2 for parity check below
3259      shlq(tmp3, 1);
3260      jcc(Assembler::carryClear, integer);
3261      jcc(Assembler::notZero, integer);
3262      STOP("integer indefinite value shouldn't be seen here");
3263      bind(integer);
3264    }
3265#endif
3266#endif
3267
3268    // get rid of duplicate arguments. Stack: X^Y
3269    if (num_fpu_regs_in_use > 0) {
3270      fxch(); fpop();
3271      fxch(); fpop();
3272    } else {
3273      ffree(2);
3274      ffree(1);
3275    }
3276
3277    testl(tmp2, 1);
3278    jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
3279    // X <= 0, Y even: X^Y = -abs(X)^Y
3280
3281    fchs();                     // Stack: -abs(X)^Y Y
3282    jmp(done);
3283  }
3284
3285  // slow case: runtime call
3286  bind(slow_case);
3287
3288  fpop();                       // pop incorrect result or int(Y)
3289
3290  fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
3291                      is_exp ? 1 : 2, num_fpu_regs_in_use);
3292
3293  // Come here with result in F-TOS
3294  bind(done);
3295}
3296
3297void MacroAssembler::fpop() {
3298  ffree();
3299  fincstp();
3300}
3301
3302void MacroAssembler::fremr(Register tmp) {
3303  save_rax(tmp);
3304  { Label L;
3305    bind(L);
3306    fprem();
3307    fwait(); fnstsw_ax();
3308#ifdef _LP64
3309    testl(rax, 0x400);
3310    jcc(Assembler::notEqual, L);
3311#else
3312    sahf();
3313    jcc(Assembler::parity, L);
3314#endif // _LP64
3315  }
3316  restore_rax(tmp);
3317  // Result is in ST0.
3318  // Note: fxch & fpop to get rid of ST1
3319  // (otherwise FPU stack could overflow eventually)
3320  fxch(1);
3321  fpop();
3322}
3323
3324
3325void MacroAssembler::incrementl(AddressLiteral dst) {
3326  if (reachable(dst)) {
3327    incrementl(as_Address(dst));
3328  } else {
3329    lea(rscratch1, dst);
3330    incrementl(Address(rscratch1, 0));
3331  }
3332}
3333
3334void MacroAssembler::incrementl(ArrayAddress dst) {
3335  incrementl(as_Address(dst));
3336}
3337
3338void MacroAssembler::incrementl(Register reg, int value) {
3339  if (value == min_jint) {addl(reg, value) ; return; }
3340  if (value <  0) { decrementl(reg, -value); return; }
3341  if (value == 0) {                        ; return; }
3342  if (value == 1 && UseIncDec) { incl(reg) ; return; }
3343  /* else */      { addl(reg, value)       ; return; }
3344}
3345
3346void MacroAssembler::incrementl(Address dst, int value) {
3347  if (value == min_jint) {addl(dst, value) ; return; }
3348  if (value <  0) { decrementl(dst, -value); return; }
3349  if (value == 0) {                        ; return; }
3350  if (value == 1 && UseIncDec) { incl(dst) ; return; }
3351  /* else */      { addl(dst, value)       ; return; }
3352}
3353
3354void MacroAssembler::jump(AddressLiteral dst) {
3355  if (reachable(dst)) {
3356    jmp_literal(dst.target(), dst.rspec());
3357  } else {
3358    lea(rscratch1, dst);
3359    jmp(rscratch1);
3360  }
3361}
3362
3363void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
3364  if (reachable(dst)) {
3365    InstructionMark im(this);
3366    relocate(dst.reloc());
3367    const int short_size = 2;
3368    const int long_size = 6;
3369    int offs = (intptr_t)dst.target() - ((intptr_t)pc());
3370    if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
3371      // 0111 tttn #8-bit disp
3372      emit_int8(0x70 | cc);
3373      emit_int8((offs - short_size) & 0xFF);
3374    } else {
3375      // 0000 1111 1000 tttn #32-bit disp
3376      emit_int8(0x0F);
3377      emit_int8((unsigned char)(0x80 | cc));
3378      emit_int32(offs - long_size);
3379    }
3380  } else {
3381#ifdef ASSERT
3382    warning("reversing conditional branch");
3383#endif /* ASSERT */
3384    Label skip;
3385    jccb(reverse[cc], skip);
3386    lea(rscratch1, dst);
3387    Assembler::jmp(rscratch1);
3388    bind(skip);
3389  }
3390}
3391
3392void MacroAssembler::ldmxcsr(AddressLiteral src) {
3393  if (reachable(src)) {
3394    Assembler::ldmxcsr(as_Address(src));
3395  } else {
3396    lea(rscratch1, src);
3397    Assembler::ldmxcsr(Address(rscratch1, 0));
3398  }
3399}
3400
3401int MacroAssembler::load_signed_byte(Register dst, Address src) {
3402  int off;
3403  if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3404    off = offset();
3405    movsbl(dst, src); // movsxb
3406  } else {
3407    off = load_unsigned_byte(dst, src);
3408    shll(dst, 24);
3409    sarl(dst, 24);
3410  }
3411  return off;
3412}
3413
3414// Note: load_signed_short used to be called load_signed_word.
3415// Although the 'w' in x86 opcodes refers to the term "word" in the assembler
3416// manual, which means 16 bits, that usage is found nowhere in HotSpot code.
3417// The term "word" in HotSpot means a 32- or 64-bit machine word.
3418int MacroAssembler::load_signed_short(Register dst, Address src) {
3419  int off;
3420  if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3421    // This is dubious to me since it seems safe to do a signed 16 => 64 bit
3422    // version but this is what 64bit has always done. This seems to imply
3423    // that users are only using 32bits worth.
3424    off = offset();
3425    movswl(dst, src); // movsxw
3426  } else {
3427    off = load_unsigned_short(dst, src);
3428    shll(dst, 16);
3429    sarl(dst, 16);
3430  }
3431  return off;
3432}
3433
3434int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
3435  // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3436  // and "3.9 Partial Register Penalties", p. 22).
3437  int off;
3438  if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
3439    off = offset();
3440    movzbl(dst, src); // movzxb
3441  } else {
3442    xorl(dst, dst);
3443    off = offset();
3444    movb(dst, src);
3445  }
3446  return off;
3447}
3448
3449// Note: load_unsigned_short used to be called load_unsigned_word.
3450int MacroAssembler::load_unsigned_short(Register dst, Address src) {
3451  // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3452  // and "3.9 Partial Register Penalties", p. 22).
3453  int off;
3454  if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
3455    off = offset();
3456    movzwl(dst, src); // movzxw
3457  } else {
3458    xorl(dst, dst);
3459    off = offset();
3460    movw(dst, src);
3461  }
3462  return off;
3463}
3464
3465void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3466  switch (size_in_bytes) {
3467#ifndef _LP64
3468  case  8:
3469    assert(dst2 != noreg, "second dest register required");
3470    movl(dst,  src);
3471    movl(dst2, src.plus_disp(BytesPerInt));
3472    break;
3473#else
3474  case  8:  movq(dst, src); break;
3475#endif
3476  case  4:  movl(dst, src); break;
3477  case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
3478  case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
3479  default:  ShouldNotReachHere();
3480  }
3481}
3482
3483void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
3484  switch (size_in_bytes) {
3485#ifndef _LP64
3486  case  8:
3487    assert(src2 != noreg, "second source register required");
3488    movl(dst,                        src);
3489    movl(dst.plus_disp(BytesPerInt), src2);
3490    break;
3491#else
3492  case  8:  movq(dst, src); break;
3493#endif
3494  case  4:  movl(dst, src); break;
3495  case  2:  movw(dst, src); break;
3496  case  1:  movb(dst, src); break;
3497  default:  ShouldNotReachHere();
3498  }
3499}
3500
3501void MacroAssembler::mov32(AddressLiteral dst, Register src) {
3502  if (reachable(dst)) {
3503    movl(as_Address(dst), src);
3504  } else {
3505    lea(rscratch1, dst);
3506    movl(Address(rscratch1, 0), src);
3507  }
3508}
3509
3510void MacroAssembler::mov32(Register dst, AddressLiteral src) {
3511  if (reachable(src)) {
3512    movl(dst, as_Address(src));
3513  } else {
3514    lea(rscratch1, src);
3515    movl(dst, Address(rscratch1, 0));
3516  }
3517}
3518
3519// C++ bool manipulation
3520
3521void MacroAssembler::movbool(Register dst, Address src) {
3522  if(sizeof(bool) == 1)
3523    movb(dst, src);
3524  else if(sizeof(bool) == 2)
3525    movw(dst, src);
3526  else if(sizeof(bool) == 4)
3527    movl(dst, src);
3528  else
3529    // unsupported
3530    ShouldNotReachHere();
3531}
3532
3533void MacroAssembler::movbool(Address dst, bool boolconst) {
3534  if(sizeof(bool) == 1)
3535    movb(dst, (int) boolconst);
3536  else if(sizeof(bool) == 2)
3537    movw(dst, (int) boolconst);
3538  else if(sizeof(bool) == 4)
3539    movl(dst, (int) boolconst);
3540  else
3541    // unsupported
3542    ShouldNotReachHere();
3543}
3544
3545void MacroAssembler::movbool(Address dst, Register src) {
3546  if(sizeof(bool) == 1)
3547    movb(dst, src);
3548  else if(sizeof(bool) == 2)
3549    movw(dst, src);
3550  else if(sizeof(bool) == 4)
3551    movl(dst, src);
3552  else
3553    // unsupported
3554    ShouldNotReachHere();
3555}
3556
3557void MacroAssembler::movbyte(ArrayAddress dst, int src) {
3558  movb(as_Address(dst), src);
3559}
3560
3561void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
3562  if (reachable(src)) {
3563    movdl(dst, as_Address(src));
3564  } else {
3565    lea(rscratch1, src);
3566    movdl(dst, Address(rscratch1, 0));
3567  }
3568}
3569
3570void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
3571  if (reachable(src)) {
3572    movq(dst, as_Address(src));
3573  } else {
3574    lea(rscratch1, src);
3575    movq(dst, Address(rscratch1, 0));
3576  }
3577}
3578
3579void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
3580  if (reachable(src)) {
3581    if (UseXmmLoadAndClearUpper) {
3582      movsd (dst, as_Address(src));
3583    } else {
3584      movlpd(dst, as_Address(src));
3585    }
3586  } else {
3587    lea(rscratch1, src);
3588    if (UseXmmLoadAndClearUpper) {
3589      movsd (dst, Address(rscratch1, 0));
3590    } else {
3591      movlpd(dst, Address(rscratch1, 0));
3592    }
3593  }
3594}
3595
3596void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
3597  if (reachable(src)) {
3598    movss(dst, as_Address(src));
3599  } else {
3600    lea(rscratch1, src);
3601    movss(dst, Address(rscratch1, 0));
3602  }
3603}
3604
3605void MacroAssembler::movptr(Register dst, Register src) {
3606  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3607}
3608
3609void MacroAssembler::movptr(Register dst, Address src) {
3610  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3611}
3612
3613// src should NEVER be a real pointer. Use AddressLiteral for true pointers
3614void MacroAssembler::movptr(Register dst, intptr_t src) {
3615  LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
3616}
3617
3618void MacroAssembler::movptr(Address dst, Register src) {
3619  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3620}
3621
3622void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
3623  if (reachable(src)) {
3624    Assembler::movdqu(dst, as_Address(src));
3625  } else {
3626    lea(rscratch1, src);
3627    Assembler::movdqu(dst, Address(rscratch1, 0));
3628  }
3629}
3630
3631void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
3632  if (reachable(src)) {
3633    Assembler::movdqa(dst, as_Address(src));
3634  } else {
3635    lea(rscratch1, src);
3636    Assembler::movdqa(dst, Address(rscratch1, 0));
3637  }
3638}
3639
3640void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
3641  if (reachable(src)) {
3642    Assembler::movsd(dst, as_Address(src));
3643  } else {
3644    lea(rscratch1, src);
3645    Assembler::movsd(dst, Address(rscratch1, 0));
3646  }
3647}
3648
3649void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
3650  if (reachable(src)) {
3651    Assembler::movss(dst, as_Address(src));
3652  } else {
3653    lea(rscratch1, src);
3654    Assembler::movss(dst, Address(rscratch1, 0));
3655  }
3656}
3657
3658void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
3659  if (reachable(src)) {
3660    Assembler::mulsd(dst, as_Address(src));
3661  } else {
3662    lea(rscratch1, src);
3663    Assembler::mulsd(dst, Address(rscratch1, 0));
3664  }
3665}
3666
3667void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
3668  if (reachable(src)) {
3669    Assembler::mulss(dst, as_Address(src));
3670  } else {
3671    lea(rscratch1, src);
3672    Assembler::mulss(dst, Address(rscratch1, 0));
3673  }
3674}
3675
3676void MacroAssembler::null_check(Register reg, int offset) {
3677  if (needs_explicit_null_check(offset)) {
3678    // provoke OS NULL exception if reg = NULL by
3679    // accessing M[reg] w/o changing any (non-CC) registers
3680    // NOTE: cmpl is plenty here to provoke a segv
3681    cmpptr(rax, Address(reg, 0));
3682    // Note: should probably use testl(rax, Address(reg, 0));
3683    //       may be shorter code (however, this version of
3684    //       testl needs to be implemented first)
3685  } else {
3686    // nothing to do, (later) access of M[reg + offset]
3687    // will provoke OS NULL exception if reg = NULL
3688  }
3689}
3690
3691void MacroAssembler::os_breakpoint() {
3692  // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3693  // (e.g., MSVC can't call ps() otherwise)
3694  call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3695}
3696
3697void MacroAssembler::pop_CPU_state() {
3698  pop_FPU_state();
3699  pop_IU_state();
3700}
3701
3702void MacroAssembler::pop_FPU_state() {
3703  NOT_LP64(frstor(Address(rsp, 0));)
3704  LP64_ONLY(fxrstor(Address(rsp, 0));)
3705  addptr(rsp, FPUStateSizeInWords * wordSize);
3706}
3707
3708void MacroAssembler::pop_IU_state() {
3709  popa();
3710  LP64_ONLY(addq(rsp, 8));
3711  popf();
3712}
3713
3714// Save Integer and Float state
3715// Warning: Stack must be 16 byte aligned (64bit)
3716void MacroAssembler::push_CPU_state() {
3717  push_IU_state();
3718  push_FPU_state();
3719}
3720
3721void MacroAssembler::push_FPU_state() {
3722  subptr(rsp, FPUStateSizeInWords * wordSize);
3723#ifndef _LP64
3724  fnsave(Address(rsp, 0));
3725  fwait();
3726#else
3727  fxsave(Address(rsp, 0));
3728#endif // LP64
3729}
3730
3731void MacroAssembler::push_IU_state() {
3732  // Push flags first because pusha kills them
3733  pushf();
3734  // Make sure rsp stays 16-byte aligned
3735  LP64_ONLY(subq(rsp, 8));
3736  pusha();
3737}
3738
3739void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
3740  // determine java_thread register
3741  if (!java_thread->is_valid()) {
3742    java_thread = rdi;
3743    get_thread(java_thread);
3744  }
3745  // we must set sp to zero to clear frame
3746  movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
3747  if (clear_fp) {
3748    movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
3749  }
3750
3751  if (clear_pc)
3752    movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
3753
3754}
3755
3756void MacroAssembler::restore_rax(Register tmp) {
3757  if (tmp == noreg) pop(rax);
3758  else if (tmp != rax) mov(rax, tmp);
3759}
3760
3761void MacroAssembler::round_to(Register reg, int modulus) {
3762  addptr(reg, modulus - 1);
3763  andptr(reg, -modulus);
3764}
3765
3766void MacroAssembler::save_rax(Register tmp) {
3767  if (tmp == noreg) push(rax);
3768  else if (tmp != rax) mov(tmp, rax);
3769}
3770
3771// Write serialization page so VM thread can do a pseudo remote membar.
3772// We use the current thread pointer to calculate a thread specific
3773// offset to write to within the page. This minimizes bus traffic
3774// due to cache line collision.
3775void MacroAssembler::serialize_memory(Register thread, Register tmp) {
3776  movl(tmp, thread);
3777  shrl(tmp, os::get_serialize_page_shift_count());
3778  andl(tmp, (os::vm_page_size() - sizeof(int)));
3779
3780  Address index(noreg, tmp, Address::times_1);
3781  ExternalAddress page(os::get_memory_serialize_page());
3782
3783  // Size of store must match masking code above
3784  movl(as_Address(ArrayAddress(page, index)), tmp);
3785}
3786
3787// Calls to C land
3788//
3789// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
3790// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
3791// has to be reset to 0. This is required to allow proper stack traversal.
3792void MacroAssembler::set_last_Java_frame(Register java_thread,
3793                                         Register last_java_sp,
3794                                         Register last_java_fp,
3795                                         address  last_java_pc) {
3796  // determine java_thread register
3797  if (!java_thread->is_valid()) {
3798    java_thread = rdi;
3799    get_thread(java_thread);
3800  }
3801  // determine last_java_sp register
3802  if (!last_java_sp->is_valid()) {
3803    last_java_sp = rsp;
3804  }
3805
3806  // last_java_fp is optional
3807
3808  if (last_java_fp->is_valid()) {
3809    movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
3810  }
3811
3812  // last_java_pc is optional
3813
3814  if (last_java_pc != NULL) {
3815    lea(Address(java_thread,
3816                 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
3817        InternalAddress(last_java_pc));
3818
3819  }
3820  movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
3821}
3822
3823void MacroAssembler::shlptr(Register dst, int imm8) {
3824  LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
3825}
3826
3827void MacroAssembler::shrptr(Register dst, int imm8) {
3828  LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
3829}
3830
3831void MacroAssembler::sign_extend_byte(Register reg) {
3832  if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
3833    movsbl(reg, reg); // movsxb
3834  } else {
3835    shll(reg, 24);
3836    sarl(reg, 24);
3837  }
3838}
3839
3840void MacroAssembler::sign_extend_short(Register reg) {
3841  if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3842    movswl(reg, reg); // movsxw
3843  } else {
3844    shll(reg, 16);
3845    sarl(reg, 16);
3846  }
3847}
3848
3849void MacroAssembler::testl(Register dst, AddressLiteral src) {
3850  assert(reachable(src), "Address should be reachable");
3851  testl(dst, as_Address(src));
3852}
3853
3854void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
3855  if (reachable(src)) {
3856    Assembler::sqrtsd(dst, as_Address(src));
3857  } else {
3858    lea(rscratch1, src);
3859    Assembler::sqrtsd(dst, Address(rscratch1, 0));
3860  }
3861}
3862
3863void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
3864  if (reachable(src)) {
3865    Assembler::sqrtss(dst, as_Address(src));
3866  } else {
3867    lea(rscratch1, src);
3868    Assembler::sqrtss(dst, Address(rscratch1, 0));
3869  }
3870}
3871
3872void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
3873  if (reachable(src)) {
3874    Assembler::subsd(dst, as_Address(src));
3875  } else {
3876    lea(rscratch1, src);
3877    Assembler::subsd(dst, Address(rscratch1, 0));
3878  }
3879}
3880
3881void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
3882  if (reachable(src)) {
3883    Assembler::subss(dst, as_Address(src));
3884  } else {
3885    lea(rscratch1, src);
3886    Assembler::subss(dst, Address(rscratch1, 0));
3887  }
3888}
3889
3890void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
3891  if (reachable(src)) {
3892    Assembler::ucomisd(dst, as_Address(src));
3893  } else {
3894    lea(rscratch1, src);
3895    Assembler::ucomisd(dst, Address(rscratch1, 0));
3896  }
3897}
3898
3899void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
3900  if (reachable(src)) {
3901    Assembler::ucomiss(dst, as_Address(src));
3902  } else {
3903    lea(rscratch1, src);
3904    Assembler::ucomiss(dst, Address(rscratch1, 0));
3905  }
3906}
3907
3908void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
3909  // Used in sign-bit flipping with aligned address.
3910  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3911  if (reachable(src)) {
3912    Assembler::xorpd(dst, as_Address(src));
3913  } else {
3914    lea(rscratch1, src);
3915    Assembler::xorpd(dst, Address(rscratch1, 0));
3916  }
3917}
3918
3919void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3920  // Used in sign-bit flipping with aligned address.
3921  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3922  if (reachable(src)) {
3923    Assembler::xorps(dst, as_Address(src));
3924  } else {
3925    lea(rscratch1, src);
3926    Assembler::xorps(dst, Address(rscratch1, 0));
3927  }
3928}
3929
3930void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
3931  // Used in sign-bit flipping with aligned address.
3932  bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
3933  assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
3934  if (reachable(src)) {
3935    Assembler::pshufb(dst, as_Address(src));
3936  } else {
3937    lea(rscratch1, src);
3938    Assembler::pshufb(dst, Address(rscratch1, 0));
3939  }
3940}
3941
3942// AVX 3-operands instructions
3943
3944void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3945  if (reachable(src)) {
3946    vaddsd(dst, nds, as_Address(src));
3947  } else {
3948    lea(rscratch1, src);
3949    vaddsd(dst, nds, Address(rscratch1, 0));
3950  }
3951}
3952
3953void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3954  if (reachable(src)) {
3955    vaddss(dst, nds, as_Address(src));
3956  } else {
3957    lea(rscratch1, src);
3958    vaddss(dst, nds, Address(rscratch1, 0));
3959  }
3960}
3961
3962void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
3963  if (reachable(src)) {
3964    vandpd(dst, nds, as_Address(src), vector_len);
3965  } else {
3966    lea(rscratch1, src);
3967    vandpd(dst, nds, Address(rscratch1, 0), vector_len);
3968  }
3969}
3970
3971void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
3972  if (reachable(src)) {
3973    vandps(dst, nds, as_Address(src), vector_len);
3974  } else {
3975    lea(rscratch1, src);
3976    vandps(dst, nds, Address(rscratch1, 0), vector_len);
3977  }
3978}
3979
3980void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3981  if (reachable(src)) {
3982    vdivsd(dst, nds, as_Address(src));
3983  } else {
3984    lea(rscratch1, src);
3985    vdivsd(dst, nds, Address(rscratch1, 0));
3986  }
3987}
3988
3989void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3990  if (reachable(src)) {
3991    vdivss(dst, nds, as_Address(src));
3992  } else {
3993    lea(rscratch1, src);
3994    vdivss(dst, nds, Address(rscratch1, 0));
3995  }
3996}
3997
3998void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3999  if (reachable(src)) {
4000    vmulsd(dst, nds, as_Address(src));
4001  } else {
4002    lea(rscratch1, src);
4003    vmulsd(dst, nds, Address(rscratch1, 0));
4004  }
4005}
4006
4007void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4008  if (reachable(src)) {
4009    vmulss(dst, nds, as_Address(src));
4010  } else {
4011    lea(rscratch1, src);
4012    vmulss(dst, nds, Address(rscratch1, 0));
4013  }
4014}
4015
4016void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4017  if (reachable(src)) {
4018    vsubsd(dst, nds, as_Address(src));
4019  } else {
4020    lea(rscratch1, src);
4021    vsubsd(dst, nds, Address(rscratch1, 0));
4022  }
4023}
4024
4025void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4026  if (reachable(src)) {
4027    vsubss(dst, nds, as_Address(src));
4028  } else {
4029    lea(rscratch1, src);
4030    vsubss(dst, nds, Address(rscratch1, 0));
4031  }
4032}
4033
4034void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4035  if (reachable(src)) {
4036    vxorpd(dst, nds, as_Address(src), vector_len);
4037  } else {
4038    lea(rscratch1, src);
4039    vxorpd(dst, nds, Address(rscratch1, 0), vector_len);
4040  }
4041}
4042
4043void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4044  if (reachable(src)) {
4045    vxorps(dst, nds, as_Address(src), vector_len);
4046  } else {
4047    lea(rscratch1, src);
4048    vxorps(dst, nds, Address(rscratch1, 0), vector_len);
4049  }
4050}
4051
4052
4053//////////////////////////////////////////////////////////////////////////////////
4054#if INCLUDE_ALL_GCS
4055
4056void MacroAssembler::g1_write_barrier_pre(Register obj,
4057                                          Register pre_val,
4058                                          Register thread,
4059                                          Register tmp,
4060                                          bool tosca_live,
4061                                          bool expand_call) {
4062
4063  // If expand_call is true then we expand the call_VM_leaf macro
4064  // directly to skip generating the check by
4065  // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
4066
4067#ifdef _LP64
4068  assert(thread == r15_thread, "must be");
4069#endif // _LP64
4070
4071  Label done;
4072  Label runtime;
4073
4074  assert(pre_val != noreg, "check this code");
4075
4076  if (obj != noreg) {
4077    assert_different_registers(obj, pre_val, tmp);
4078    assert(pre_val != rax, "check this code");
4079  }
4080
4081  Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4082                                       PtrQueue::byte_offset_of_active()));
4083  Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4084                                       PtrQueue::byte_offset_of_index()));
4085  Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4086                                       PtrQueue::byte_offset_of_buf()));
4087
4088
4089  // Is marking active?
4090  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4091    cmpl(in_progress, 0);
4092  } else {
4093    assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
4094    cmpb(in_progress, 0);
4095  }
4096  jcc(Assembler::equal, done);
4097
4098  // Do we need to load the previous value?
4099  if (obj != noreg) {
4100    load_heap_oop(pre_val, Address(obj, 0));
4101  }
4102
4103  // Is the previous value null?
4104  cmpptr(pre_val, (int32_t) NULL_WORD);
4105  jcc(Assembler::equal, done);
4106
4107  // Can we store original value in the thread's buffer?
4108  // Is index == 0?
4109  // (The index field is typed as size_t.)
4110
4111  movptr(tmp, index);                   // tmp := *index_adr
4112  cmpptr(tmp, 0);                       // tmp == 0?
4113  jcc(Assembler::equal, runtime);       // If yes, goto runtime
4114
4115  subptr(tmp, wordSize);                // tmp := tmp - wordSize
4116  movptr(index, tmp);                   // *index_adr := tmp
4117  addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
4118
4119  // Record the previous value
4120  movptr(Address(tmp, 0), pre_val);
4121  jmp(done);
4122
4123  bind(runtime);
4124  // save the live input values
4125  if(tosca_live) push(rax);
4126
4127  if (obj != noreg && obj != rax)
4128    push(obj);
4129
4130  if (pre_val != rax)
4131    push(pre_val);
4132
4133  // Calling the runtime using the regular call_VM_leaf mechanism generates
4134  // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
4135  // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
4136  //
4137  // If we care generating the pre-barrier without a frame (e.g. in the
4138  // intrinsified Reference.get() routine) then ebp might be pointing to
4139  // the caller frame and so this check will most likely fail at runtime.
4140  //
4141  // Expanding the call directly bypasses the generation of the check.
4142  // So when we do not have have a full interpreter frame on the stack
4143  // expand_call should be passed true.
4144
4145  NOT_LP64( push(thread); )
4146
4147  if (expand_call) {
4148    LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
4149    pass_arg1(this, thread);
4150    pass_arg0(this, pre_val);
4151    MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
4152  } else {
4153    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
4154  }
4155
4156  NOT_LP64( pop(thread); )
4157
4158  // save the live input values
4159  if (pre_val != rax)
4160    pop(pre_val);
4161
4162  if (obj != noreg && obj != rax)
4163    pop(obj);
4164
4165  if(tosca_live) pop(rax);
4166
4167  bind(done);
4168}
4169
4170void MacroAssembler::g1_write_barrier_post(Register store_addr,
4171                                           Register new_val,
4172                                           Register thread,
4173                                           Register tmp,
4174                                           Register tmp2) {
4175#ifdef _LP64
4176  assert(thread == r15_thread, "must be");
4177#endif // _LP64
4178
4179  Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4180                                       PtrQueue::byte_offset_of_index()));
4181  Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4182                                       PtrQueue::byte_offset_of_buf()));
4183
4184  CardTableModRefBS* ct =
4185    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
4186  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4187
4188  Label done;
4189  Label runtime;
4190
4191  // Does store cross heap regions?
4192
4193  movptr(tmp, store_addr);
4194  xorptr(tmp, new_val);
4195  shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
4196  jcc(Assembler::equal, done);
4197
4198  // crosses regions, storing NULL?
4199
4200  cmpptr(new_val, (int32_t) NULL_WORD);
4201  jcc(Assembler::equal, done);
4202
4203  // storing region crossing non-NULL, is card already dirty?
4204
4205  const Register card_addr = tmp;
4206  const Register cardtable = tmp2;
4207
4208  movptr(card_addr, store_addr);
4209  shrptr(card_addr, CardTableModRefBS::card_shift);
4210  // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
4211  // a valid address and therefore is not properly handled by the relocation code.
4212  movptr(cardtable, (intptr_t)ct->byte_map_base);
4213  addptr(card_addr, cardtable);
4214
4215  cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
4216  jcc(Assembler::equal, done);
4217
4218  membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4219  cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4220  jcc(Assembler::equal, done);
4221
4222
4223  // storing a region crossing, non-NULL oop, card is clean.
4224  // dirty card and log.
4225
4226  movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4227
4228  cmpl(queue_index, 0);
4229  jcc(Assembler::equal, runtime);
4230  subl(queue_index, wordSize);
4231  movptr(tmp2, buffer);
4232#ifdef _LP64
4233  movslq(rscratch1, queue_index);
4234  addq(tmp2, rscratch1);
4235  movq(Address(tmp2, 0), card_addr);
4236#else
4237  addl(tmp2, queue_index);
4238  movl(Address(tmp2, 0), card_addr);
4239#endif
4240  jmp(done);
4241
4242  bind(runtime);
4243  // save the live input values
4244  push(store_addr);
4245  push(new_val);
4246#ifdef _LP64
4247  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
4248#else
4249  push(thread);
4250  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
4251  pop(thread);
4252#endif
4253  pop(new_val);
4254  pop(store_addr);
4255
4256  bind(done);
4257}
4258
4259#endif // INCLUDE_ALL_GCS
4260//////////////////////////////////////////////////////////////////////////////////
4261
4262
4263void MacroAssembler::store_check(Register obj) {
4264  // Does a store check for the oop in register obj. The content of
4265  // register obj is destroyed afterwards.
4266  store_check_part_1(obj);
4267  store_check_part_2(obj);
4268}
4269
4270void MacroAssembler::store_check(Register obj, Address dst) {
4271  store_check(obj);
4272}
4273
4274
4275// split the store check operation so that other instructions can be scheduled inbetween
4276void MacroAssembler::store_check_part_1(Register obj) {
4277  BarrierSet* bs = Universe::heap()->barrier_set();
4278  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4279  shrptr(obj, CardTableModRefBS::card_shift);
4280}
4281
4282void MacroAssembler::store_check_part_2(Register obj) {
4283  BarrierSet* bs = Universe::heap()->barrier_set();
4284  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4285  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
4286  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4287
4288  // The calculation for byte_map_base is as follows:
4289  // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
4290  // So this essentially converts an address to a displacement and it will
4291  // never need to be relocated. On 64bit however the value may be too
4292  // large for a 32bit displacement.
4293  intptr_t disp = (intptr_t) ct->byte_map_base;
4294  if (is_simm32(disp)) {
4295    Address cardtable(noreg, obj, Address::times_1, disp);
4296    movb(cardtable, 0);
4297  } else {
4298    // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
4299    // displacement and done in a single instruction given favorable mapping and a
4300    // smarter version of as_Address. However, 'ExternalAddress' generates a relocation
4301    // entry and that entry is not properly handled by the relocation code.
4302    AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
4303    Address index(noreg, obj, Address::times_1);
4304    movb(as_Address(ArrayAddress(cardtable, index)), 0);
4305  }
4306}
4307
4308void MacroAssembler::subptr(Register dst, int32_t imm32) {
4309  LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
4310}
4311
4312// Force generation of a 4 byte immediate value even if it fits into 8bit
4313void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
4314  LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
4315}
4316
4317void MacroAssembler::subptr(Register dst, Register src) {
4318  LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
4319}
4320
4321// C++ bool manipulation
4322void MacroAssembler::testbool(Register dst) {
4323  if(sizeof(bool) == 1)
4324    testb(dst, 0xff);
4325  else if(sizeof(bool) == 2) {
4326    // testw implementation needed for two byte bools
4327    ShouldNotReachHere();
4328  } else if(sizeof(bool) == 4)
4329    testl(dst, dst);
4330  else
4331    // unsupported
4332    ShouldNotReachHere();
4333}
4334
4335void MacroAssembler::testptr(Register dst, Register src) {
4336  LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
4337}
4338
4339// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4340void MacroAssembler::tlab_allocate(Register obj,
4341                                   Register var_size_in_bytes,
4342                                   int con_size_in_bytes,
4343                                   Register t1,
4344                                   Register t2,
4345                                   Label& slow_case) {
4346  assert_different_registers(obj, t1, t2);
4347  assert_different_registers(obj, var_size_in_bytes, t1);
4348  Register end = t2;
4349  Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
4350
4351  verify_tlab();
4352
4353  NOT_LP64(get_thread(thread));
4354
4355  movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
4356  if (var_size_in_bytes == noreg) {
4357    lea(end, Address(obj, con_size_in_bytes));
4358  } else {
4359    lea(end, Address(obj, var_size_in_bytes, Address::times_1));
4360  }
4361  cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
4362  jcc(Assembler::above, slow_case);
4363
4364  // update the tlab top pointer
4365  movptr(Address(thread, JavaThread::tlab_top_offset()), end);
4366
4367  // recover var_size_in_bytes if necessary
4368  if (var_size_in_bytes == end) {
4369    subptr(var_size_in_bytes, obj);
4370  }
4371  verify_tlab();
4372}
4373
4374// Preserves rbx, and rdx.
4375Register MacroAssembler::tlab_refill(Label& retry,
4376                                     Label& try_eden,
4377                                     Label& slow_case) {
4378  Register top = rax;
4379  Register t1  = rcx;
4380  Register t2  = rsi;
4381  Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
4382  assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
4383  Label do_refill, discard_tlab;
4384
4385  if (!Universe::heap()->supports_inline_contig_alloc()) {
4386    // No allocation in the shared eden.
4387    jmp(slow_case);
4388  }
4389
4390  NOT_LP64(get_thread(thread_reg));
4391
4392  movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4393  movptr(t1,  Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4394
4395  // calculate amount of free space
4396  subptr(t1, top);
4397  shrptr(t1, LogHeapWordSize);
4398
4399  // Retain tlab and allocate object in shared space if
4400  // the amount free in the tlab is too large to discard.
4401  cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
4402  jcc(Assembler::lessEqual, discard_tlab);
4403
4404  // Retain
4405  // %%% yuck as movptr...
4406  movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
4407  addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
4408  if (TLABStats) {
4409    // increment number of slow_allocations
4410    addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
4411  }
4412  jmp(try_eden);
4413
4414  bind(discard_tlab);
4415  if (TLABStats) {
4416    // increment number of refills
4417    addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
4418    // accumulate wastage -- t1 is amount free in tlab
4419    addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
4420  }
4421
4422  // if tlab is currently allocated (top or end != null) then
4423  // fill [top, end + alignment_reserve) with array object
4424  testptr(top, top);
4425  jcc(Assembler::zero, do_refill);
4426
4427  // set up the mark word
4428  movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
4429  // set the length to the remaining space
4430  subptr(t1, typeArrayOopDesc::header_size(T_INT));
4431  addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
4432  shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
4433  movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
4434  // set klass to intArrayKlass
4435  // dubious reloc why not an oop reloc?
4436  movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
4437  // store klass last.  concurrent gcs assumes klass length is valid if
4438  // klass field is not null.
4439  store_klass(top, t1);
4440
4441  movptr(t1, top);
4442  subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
4443  incr_allocated_bytes(thread_reg, t1, 0);
4444
4445  // refill the tlab with an eden allocation
4446  bind(do_refill);
4447  movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4448  shlptr(t1, LogHeapWordSize);
4449  // allocate new tlab, address returned in top
4450  eden_allocate(top, t1, 0, t2, slow_case);
4451
4452  // Check that t1 was preserved in eden_allocate.
4453#ifdef ASSERT
4454  if (UseTLAB) {
4455    Label ok;
4456    Register tsize = rsi;
4457    assert_different_registers(tsize, thread_reg, t1);
4458    push(tsize);
4459    movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4460    shlptr(tsize, LogHeapWordSize);
4461    cmpptr(t1, tsize);
4462    jcc(Assembler::equal, ok);
4463    STOP("assert(t1 != tlab size)");
4464    should_not_reach_here();
4465
4466    bind(ok);
4467    pop(tsize);
4468  }
4469#endif
4470  movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
4471  movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
4472  addptr(top, t1);
4473  subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
4474  movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
4475  verify_tlab();
4476  jmp(retry);
4477
4478  return thread_reg; // for use by caller
4479}
4480
4481void MacroAssembler::incr_allocated_bytes(Register thread,
4482                                          Register var_size_in_bytes,
4483                                          int con_size_in_bytes,
4484                                          Register t1) {
4485  if (!thread->is_valid()) {
4486#ifdef _LP64
4487    thread = r15_thread;
4488#else
4489    assert(t1->is_valid(), "need temp reg");
4490    thread = t1;
4491    get_thread(thread);
4492#endif
4493  }
4494
4495#ifdef _LP64
4496  if (var_size_in_bytes->is_valid()) {
4497    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4498  } else {
4499    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4500  }
4501#else
4502  if (var_size_in_bytes->is_valid()) {
4503    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4504  } else {
4505    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4506  }
4507  adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
4508#endif
4509}
4510
4511void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
4512  pusha();
4513
4514  // if we are coming from c1, xmm registers may be live
4515  int off = 0;
4516  if (UseSSE == 1)  {
4517    subptr(rsp, sizeof(jdouble)*8);
4518    movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
4519    movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
4520    movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
4521    movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
4522    movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
4523    movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
4524    movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
4525    movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
4526  } else if (UseSSE >= 2)  {
4527    if (UseAVX > 2) {
4528      movl(rbx, 0xffff);
4529#ifdef _LP64
4530      kmovql(k1, rbx);
4531#else
4532      kmovdl(k1, rbx);
4533#endif
4534    }
4535#ifdef COMPILER2
4536    if (MaxVectorSize > 16) {
4537      assert(UseAVX > 0, "256bit vectors are supported only with AVX");
4538      // Save upper half of YMM registes
4539      subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4540      vextractf128h(Address(rsp,  0),xmm0);
4541      vextractf128h(Address(rsp, 16),xmm1);
4542      vextractf128h(Address(rsp, 32),xmm2);
4543      vextractf128h(Address(rsp, 48),xmm3);
4544      vextractf128h(Address(rsp, 64),xmm4);
4545      vextractf128h(Address(rsp, 80),xmm5);
4546      vextractf128h(Address(rsp, 96),xmm6);
4547      vextractf128h(Address(rsp,112),xmm7);
4548#ifdef _LP64
4549      vextractf128h(Address(rsp,128),xmm8);
4550      vextractf128h(Address(rsp,144),xmm9);
4551      vextractf128h(Address(rsp,160),xmm10);
4552      vextractf128h(Address(rsp,176),xmm11);
4553      vextractf128h(Address(rsp,192),xmm12);
4554      vextractf128h(Address(rsp,208),xmm13);
4555      vextractf128h(Address(rsp,224),xmm14);
4556      vextractf128h(Address(rsp,240),xmm15);
4557#endif
4558    }
4559#endif
4560    // Save whole 128bit (16 bytes) XMM regiters
4561    subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4562    movdqu(Address(rsp,off++*16),xmm0);
4563    movdqu(Address(rsp,off++*16),xmm1);
4564    movdqu(Address(rsp,off++*16),xmm2);
4565    movdqu(Address(rsp,off++*16),xmm3);
4566    movdqu(Address(rsp,off++*16),xmm4);
4567    movdqu(Address(rsp,off++*16),xmm5);
4568    movdqu(Address(rsp,off++*16),xmm6);
4569    movdqu(Address(rsp,off++*16),xmm7);
4570#ifdef _LP64
4571    movdqu(Address(rsp,off++*16),xmm8);
4572    movdqu(Address(rsp,off++*16),xmm9);
4573    movdqu(Address(rsp,off++*16),xmm10);
4574    movdqu(Address(rsp,off++*16),xmm11);
4575    movdqu(Address(rsp,off++*16),xmm12);
4576    movdqu(Address(rsp,off++*16),xmm13);
4577    movdqu(Address(rsp,off++*16),xmm14);
4578    movdqu(Address(rsp,off++*16),xmm15);
4579#endif
4580  }
4581
4582  // Preserve registers across runtime call
4583  int incoming_argument_and_return_value_offset = -1;
4584  if (num_fpu_regs_in_use > 1) {
4585    // Must preserve all other FPU regs (could alternatively convert
4586    // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
4587    // FPU state, but can not trust C compiler)
4588    NEEDS_CLEANUP;
4589    // NOTE that in this case we also push the incoming argument(s) to
4590    // the stack and restore it later; we also use this stack slot to
4591    // hold the return value from dsin, dcos etc.
4592    for (int i = 0; i < num_fpu_regs_in_use; i++) {
4593      subptr(rsp, sizeof(jdouble));
4594      fstp_d(Address(rsp, 0));
4595    }
4596    incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
4597    for (int i = nb_args-1; i >= 0; i--) {
4598      fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
4599    }
4600  }
4601
4602  subptr(rsp, nb_args*sizeof(jdouble));
4603  for (int i = 0; i < nb_args; i++) {
4604    fstp_d(Address(rsp, i*sizeof(jdouble)));
4605  }
4606
4607#ifdef _LP64
4608  if (nb_args > 0) {
4609    movdbl(xmm0, Address(rsp, 0));
4610  }
4611  if (nb_args > 1) {
4612    movdbl(xmm1, Address(rsp, sizeof(jdouble)));
4613  }
4614  assert(nb_args <= 2, "unsupported number of args");
4615#endif // _LP64
4616
4617  // NOTE: we must not use call_VM_leaf here because that requires a
4618  // complete interpreter frame in debug mode -- same bug as 4387334
4619  // MacroAssembler::call_VM_leaf_base is perfectly safe and will
4620  // do proper 64bit abi
4621
4622  NEEDS_CLEANUP;
4623  // Need to add stack banging before this runtime call if it needs to
4624  // be taken; however, there is no generic stack banging routine at
4625  // the MacroAssembler level
4626
4627  MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
4628
4629#ifdef _LP64
4630  movsd(Address(rsp, 0), xmm0);
4631  fld_d(Address(rsp, 0));
4632#endif // _LP64
4633  addptr(rsp, sizeof(jdouble) * nb_args);
4634  if (num_fpu_regs_in_use > 1) {
4635    // Must save return value to stack and then restore entire FPU
4636    // stack except incoming arguments
4637    fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
4638    for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
4639      fld_d(Address(rsp, 0));
4640      addptr(rsp, sizeof(jdouble));
4641    }
4642    fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
4643    addptr(rsp, sizeof(jdouble) * nb_args);
4644  }
4645
4646  off = 0;
4647  if (UseSSE == 1)  {
4648    movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
4649    movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
4650    movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
4651    movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
4652    movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
4653    movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
4654    movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
4655    movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
4656    addptr(rsp, sizeof(jdouble)*8);
4657  } else if (UseSSE >= 2)  {
4658    // Restore whole 128bit (16 bytes) XMM regiters
4659    movdqu(xmm0, Address(rsp,off++*16));
4660    movdqu(xmm1, Address(rsp,off++*16));
4661    movdqu(xmm2, Address(rsp,off++*16));
4662    movdqu(xmm3, Address(rsp,off++*16));
4663    movdqu(xmm4, Address(rsp,off++*16));
4664    movdqu(xmm5, Address(rsp,off++*16));
4665    movdqu(xmm6, Address(rsp,off++*16));
4666    movdqu(xmm7, Address(rsp,off++*16));
4667#ifdef _LP64
4668    movdqu(xmm8, Address(rsp,off++*16));
4669    movdqu(xmm9, Address(rsp,off++*16));
4670    movdqu(xmm10, Address(rsp,off++*16));
4671    movdqu(xmm11, Address(rsp,off++*16));
4672    movdqu(xmm12, Address(rsp,off++*16));
4673    movdqu(xmm13, Address(rsp,off++*16));
4674    movdqu(xmm14, Address(rsp,off++*16));
4675    movdqu(xmm15, Address(rsp,off++*16));
4676#endif
4677    addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4678#ifdef COMPILER2
4679    if (MaxVectorSize > 16) {
4680      // Restore upper half of YMM registes.
4681      vinsertf128h(xmm0, Address(rsp,  0));
4682      vinsertf128h(xmm1, Address(rsp, 16));
4683      vinsertf128h(xmm2, Address(rsp, 32));
4684      vinsertf128h(xmm3, Address(rsp, 48));
4685      vinsertf128h(xmm4, Address(rsp, 64));
4686      vinsertf128h(xmm5, Address(rsp, 80));
4687      vinsertf128h(xmm6, Address(rsp, 96));
4688      vinsertf128h(xmm7, Address(rsp,112));
4689#ifdef _LP64
4690      vinsertf128h(xmm8, Address(rsp,128));
4691      vinsertf128h(xmm9, Address(rsp,144));
4692      vinsertf128h(xmm10, Address(rsp,160));
4693      vinsertf128h(xmm11, Address(rsp,176));
4694      vinsertf128h(xmm12, Address(rsp,192));
4695      vinsertf128h(xmm13, Address(rsp,208));
4696      vinsertf128h(xmm14, Address(rsp,224));
4697      vinsertf128h(xmm15, Address(rsp,240));
4698#endif
4699      addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4700    }
4701#endif
4702  }
4703  popa();
4704}
4705
4706static const double     pi_4 =  0.7853981633974483;
4707
4708void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
4709  // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
4710  // was attempted in this code; unfortunately it appears that the
4711  // switch to 80-bit precision and back causes this to be
4712  // unprofitable compared with simply performing a runtime call if
4713  // the argument is out of the (-pi/4, pi/4) range.
4714
4715  Register tmp = noreg;
4716  if (!VM_Version::supports_cmov()) {
4717    // fcmp needs a temporary so preserve rbx,
4718    tmp = rbx;
4719    push(tmp);
4720  }
4721
4722  Label slow_case, done;
4723
4724  ExternalAddress pi4_adr = (address)&pi_4;
4725  if (reachable(pi4_adr)) {
4726    // x ?<= pi/4
4727    fld_d(pi4_adr);
4728    fld_s(1);                // Stack:  X  PI/4  X
4729    fabs();                  // Stack: |X| PI/4  X
4730    fcmp(tmp);
4731    jcc(Assembler::above, slow_case);
4732
4733    // fastest case: -pi/4 <= x <= pi/4
4734    switch(trig) {
4735    case 's':
4736      fsin();
4737      break;
4738    case 'c':
4739      fcos();
4740      break;
4741    case 't':
4742      ftan();
4743      break;
4744    default:
4745      assert(false, "bad intrinsic");
4746      break;
4747    }
4748    jmp(done);
4749  }
4750
4751  // slow case: runtime call
4752  bind(slow_case);
4753
4754  switch(trig) {
4755  case 's':
4756    {
4757      fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
4758    }
4759    break;
4760  case 'c':
4761    {
4762      fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
4763    }
4764    break;
4765  case 't':
4766    {
4767      fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
4768    }
4769    break;
4770  default:
4771    assert(false, "bad intrinsic");
4772    break;
4773  }
4774
4775  // Come here with result in F-TOS
4776  bind(done);
4777
4778  if (tmp != noreg) {
4779    pop(tmp);
4780  }
4781}
4782
4783
4784// Look up the method for a megamorphic invokeinterface call.
4785// The target method is determined by <intf_klass, itable_index>.
4786// The receiver klass is in recv_klass.
4787// On success, the result will be in method_result, and execution falls through.
4788// On failure, execution transfers to the given label.
4789void MacroAssembler::lookup_interface_method(Register recv_klass,
4790                                             Register intf_klass,
4791                                             RegisterOrConstant itable_index,
4792                                             Register method_result,
4793                                             Register scan_temp,
4794                                             Label& L_no_such_interface) {
4795  assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
4796  assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4797         "caller must use same register for non-constant itable index as for method");
4798
4799  // Compute start of first itableOffsetEntry (which is at the end of the vtable)
4800  int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
4801  int itentry_off = itableMethodEntry::method_offset_in_bytes();
4802  int scan_step   = itableOffsetEntry::size() * wordSize;
4803  int vte_size    = vtableEntry::size() * wordSize;
4804  Address::ScaleFactor times_vte_scale = Address::times_ptr;
4805  assert(vte_size == wordSize, "else adjust times_vte_scale");
4806
4807  movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
4808
4809  // %%% Could store the aligned, prescaled offset in the klassoop.
4810  lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
4811  if (HeapWordsPerLong > 1) {
4812    // Round up to align_object_offset boundary
4813    // see code for InstanceKlass::start_of_itable!
4814    round_to(scan_temp, BytesPerLong);
4815  }
4816
4817  // Adjust recv_klass by scaled itable_index, so we can free itable_index.
4818  assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4819  lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
4820
4821  // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
4822  //   if (scan->interface() == intf) {
4823  //     result = (klass + scan->offset() + itable_index);
4824  //   }
4825  // }
4826  Label search, found_method;
4827
4828  for (int peel = 1; peel >= 0; peel--) {
4829    movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
4830    cmpptr(intf_klass, method_result);
4831
4832    if (peel) {
4833      jccb(Assembler::equal, found_method);
4834    } else {
4835      jccb(Assembler::notEqual, search);
4836      // (invert the test to fall through to found_method...)
4837    }
4838
4839    if (!peel)  break;
4840
4841    bind(search);
4842
4843    // Check that the previous entry is non-null.  A null entry means that
4844    // the receiver class doesn't implement the interface, and wasn't the
4845    // same as when the caller was compiled.
4846    testptr(method_result, method_result);
4847    jcc(Assembler::zero, L_no_such_interface);
4848    addptr(scan_temp, scan_step);
4849  }
4850
4851  bind(found_method);
4852
4853  // Got a hit.
4854  movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
4855  movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
4856}
4857
4858
4859// virtual method calling
4860void MacroAssembler::lookup_virtual_method(Register recv_klass,
4861                                           RegisterOrConstant vtable_index,
4862                                           Register method_result) {
4863  const int base = InstanceKlass::vtable_start_offset() * wordSize;
4864  assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4865  Address vtable_entry_addr(recv_klass,
4866                            vtable_index, Address::times_ptr,
4867                            base + vtableEntry::method_offset_in_bytes());
4868  movptr(method_result, vtable_entry_addr);
4869}
4870
4871
4872void MacroAssembler::check_klass_subtype(Register sub_klass,
4873                           Register super_klass,
4874                           Register temp_reg,
4875                           Label& L_success) {
4876  Label L_failure;
4877  check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
4878  check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
4879  bind(L_failure);
4880}
4881
4882
4883void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4884                                                   Register super_klass,
4885                                                   Register temp_reg,
4886                                                   Label* L_success,
4887                                                   Label* L_failure,
4888                                                   Label* L_slow_path,
4889                                        RegisterOrConstant super_check_offset) {
4890  assert_different_registers(sub_klass, super_klass, temp_reg);
4891  bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4892  if (super_check_offset.is_register()) {
4893    assert_different_registers(sub_klass, super_klass,
4894                               super_check_offset.as_register());
4895  } else if (must_load_sco) {
4896    assert(temp_reg != noreg, "supply either a temp or a register offset");
4897  }
4898
4899  Label L_fallthrough;
4900  int label_nulls = 0;
4901  if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
4902  if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
4903  if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
4904  assert(label_nulls <= 1, "at most one NULL in the batch");
4905
4906  int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4907  int sco_offset = in_bytes(Klass::super_check_offset_offset());
4908  Address super_check_offset_addr(super_klass, sco_offset);
4909
4910  // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4911  // range of a jccb.  If this routine grows larger, reconsider at
4912  // least some of these.
4913#define local_jcc(assembler_cond, label)                                \
4914  if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
4915  else                             jcc( assembler_cond, label) /*omit semi*/
4916
4917  // Hacked jmp, which may only be used just before L_fallthrough.
4918#define final_jmp(label)                                                \
4919  if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
4920  else                            jmp(label)                /*omit semi*/
4921
4922  // If the pointers are equal, we are done (e.g., String[] elements).
4923  // This self-check enables sharing of secondary supertype arrays among
4924  // non-primary types such as array-of-interface.  Otherwise, each such
4925  // type would need its own customized SSA.
4926  // We move this check to the front of the fast path because many
4927  // type checks are in fact trivially successful in this manner,
4928  // so we get a nicely predicted branch right at the start of the check.
4929  cmpptr(sub_klass, super_klass);
4930  local_jcc(Assembler::equal, *L_success);
4931
4932  // Check the supertype display:
4933  if (must_load_sco) {
4934    // Positive movl does right thing on LP64.
4935    movl(temp_reg, super_check_offset_addr);
4936    super_check_offset = RegisterOrConstant(temp_reg);
4937  }
4938  Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4939  cmpptr(super_klass, super_check_addr); // load displayed supertype
4940
4941  // This check has worked decisively for primary supers.
4942  // Secondary supers are sought in the super_cache ('super_cache_addr').
4943  // (Secondary supers are interfaces and very deeply nested subtypes.)
4944  // This works in the same check above because of a tricky aliasing
4945  // between the super_cache and the primary super display elements.
4946  // (The 'super_check_addr' can address either, as the case requires.)
4947  // Note that the cache is updated below if it does not help us find
4948  // what we need immediately.
4949  // So if it was a primary super, we can just fail immediately.
4950  // Otherwise, it's the slow path for us (no success at this point).
4951
4952  if (super_check_offset.is_register()) {
4953    local_jcc(Assembler::equal, *L_success);
4954    cmpl(super_check_offset.as_register(), sc_offset);
4955    if (L_failure == &L_fallthrough) {
4956      local_jcc(Assembler::equal, *L_slow_path);
4957    } else {
4958      local_jcc(Assembler::notEqual, *L_failure);
4959      final_jmp(*L_slow_path);
4960    }
4961  } else if (super_check_offset.as_constant() == sc_offset) {
4962    // Need a slow path; fast failure is impossible.
4963    if (L_slow_path == &L_fallthrough) {
4964      local_jcc(Assembler::equal, *L_success);
4965    } else {
4966      local_jcc(Assembler::notEqual, *L_slow_path);
4967      final_jmp(*L_success);
4968    }
4969  } else {
4970    // No slow path; it's a fast decision.
4971    if (L_failure == &L_fallthrough) {
4972      local_jcc(Assembler::equal, *L_success);
4973    } else {
4974      local_jcc(Assembler::notEqual, *L_failure);
4975      final_jmp(*L_success);
4976    }
4977  }
4978
4979  bind(L_fallthrough);
4980
4981#undef local_jcc
4982#undef final_jmp
4983}
4984
4985
4986void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4987                                                   Register super_klass,
4988                                                   Register temp_reg,
4989                                                   Register temp2_reg,
4990                                                   Label* L_success,
4991                                                   Label* L_failure,
4992                                                   bool set_cond_codes) {
4993  assert_different_registers(sub_klass, super_klass, temp_reg);
4994  if (temp2_reg != noreg)
4995    assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
4996#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
4997
4998  Label L_fallthrough;
4999  int label_nulls = 0;
5000  if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
5001  if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
5002  assert(label_nulls <= 1, "at most one NULL in the batch");
5003
5004  // a couple of useful fields in sub_klass:
5005  int ss_offset = in_bytes(Klass::secondary_supers_offset());
5006  int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
5007  Address secondary_supers_addr(sub_klass, ss_offset);
5008  Address super_cache_addr(     sub_klass, sc_offset);
5009
5010  // Do a linear scan of the secondary super-klass chain.
5011  // This code is rarely used, so simplicity is a virtue here.
5012  // The repne_scan instruction uses fixed registers, which we must spill.
5013  // Don't worry too much about pre-existing connections with the input regs.
5014
5015  assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
5016  assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
5017
5018  // Get super_klass value into rax (even if it was in rdi or rcx).
5019  bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
5020  if (super_klass != rax || UseCompressedOops) {
5021    if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
5022    mov(rax, super_klass);
5023  }
5024  if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
5025  if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
5026
5027#ifndef PRODUCT
5028  int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
5029  ExternalAddress pst_counter_addr((address) pst_counter);
5030  NOT_LP64(  incrementl(pst_counter_addr) );
5031  LP64_ONLY( lea(rcx, pst_counter_addr) );
5032  LP64_ONLY( incrementl(Address(rcx, 0)) );
5033#endif //PRODUCT
5034
5035  // We will consult the secondary-super array.
5036  movptr(rdi, secondary_supers_addr);
5037  // Load the array length.  (Positive movl does right thing on LP64.)
5038  movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
5039  // Skip to start of data.
5040  addptr(rdi, Array<Klass*>::base_offset_in_bytes());
5041
5042  // Scan RCX words at [RDI] for an occurrence of RAX.
5043  // Set NZ/Z based on last compare.
5044  // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
5045  // not change flags (only scas instruction which is repeated sets flags).
5046  // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
5047
5048    testptr(rax,rax); // Set Z = 0
5049    repne_scan();
5050
5051  // Unspill the temp. registers:
5052  if (pushed_rdi)  pop(rdi);
5053  if (pushed_rcx)  pop(rcx);
5054  if (pushed_rax)  pop(rax);
5055
5056  if (set_cond_codes) {
5057    // Special hack for the AD files:  rdi is guaranteed non-zero.
5058    assert(!pushed_rdi, "rdi must be left non-NULL");
5059    // Also, the condition codes are properly set Z/NZ on succeed/failure.
5060  }
5061
5062  if (L_failure == &L_fallthrough)
5063        jccb(Assembler::notEqual, *L_failure);
5064  else  jcc(Assembler::notEqual, *L_failure);
5065
5066  // Success.  Cache the super we found and proceed in triumph.
5067  movptr(super_cache_addr, super_klass);
5068
5069  if (L_success != &L_fallthrough) {
5070    jmp(*L_success);
5071  }
5072
5073#undef IS_A_TEMP
5074
5075  bind(L_fallthrough);
5076}
5077
5078
5079void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
5080  if (VM_Version::supports_cmov()) {
5081    cmovl(cc, dst, src);
5082  } else {
5083    Label L;
5084    jccb(negate_condition(cc), L);
5085    movl(dst, src);
5086    bind(L);
5087  }
5088}
5089
5090void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5091  if (VM_Version::supports_cmov()) {
5092    cmovl(cc, dst, src);
5093  } else {
5094    Label L;
5095    jccb(negate_condition(cc), L);
5096    movl(dst, src);
5097    bind(L);
5098  }
5099}
5100
5101void MacroAssembler::verify_oop(Register reg, const char* s) {
5102  if (!VerifyOops) return;
5103
5104  // Pass register number to verify_oop_subroutine
5105  const char* b = NULL;
5106  {
5107    ResourceMark rm;
5108    stringStream ss;
5109    ss.print("verify_oop: %s: %s", reg->name(), s);
5110    b = code_string(ss.as_string());
5111  }
5112  BLOCK_COMMENT("verify_oop {");
5113#ifdef _LP64
5114  push(rscratch1);                    // save r10, trashed by movptr()
5115#endif
5116  push(rax);                          // save rax,
5117  push(reg);                          // pass register argument
5118  ExternalAddress buffer((address) b);
5119  // avoid using pushptr, as it modifies scratch registers
5120  // and our contract is not to modify anything
5121  movptr(rax, buffer.addr());
5122  push(rax);
5123  // call indirectly to solve generation ordering problem
5124  movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5125  call(rax);
5126  // Caller pops the arguments (oop, message) and restores rax, r10
5127  BLOCK_COMMENT("} verify_oop");
5128}
5129
5130
5131RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
5132                                                      Register tmp,
5133                                                      int offset) {
5134  intptr_t value = *delayed_value_addr;
5135  if (value != 0)
5136    return RegisterOrConstant(value + offset);
5137
5138  // load indirectly to solve generation ordering problem
5139  movptr(tmp, ExternalAddress((address) delayed_value_addr));
5140
5141#ifdef ASSERT
5142  { Label L;
5143    testptr(tmp, tmp);
5144    if (WizardMode) {
5145      const char* buf = NULL;
5146      {
5147        ResourceMark rm;
5148        stringStream ss;
5149        ss.print("DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
5150        buf = code_string(ss.as_string());
5151      }
5152      jcc(Assembler::notZero, L);
5153      STOP(buf);
5154    } else {
5155      jccb(Assembler::notZero, L);
5156      hlt();
5157    }
5158    bind(L);
5159  }
5160#endif
5161
5162  if (offset != 0)
5163    addptr(tmp, offset);
5164
5165  return RegisterOrConstant(tmp);
5166}
5167
5168
5169Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
5170                                         int extra_slot_offset) {
5171  // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5172  int stackElementSize = Interpreter::stackElementSize;
5173  int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5174#ifdef ASSERT
5175  int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5176  assert(offset1 - offset == stackElementSize, "correct arithmetic");
5177#endif
5178  Register             scale_reg    = noreg;
5179  Address::ScaleFactor scale_factor = Address::no_scale;
5180  if (arg_slot.is_constant()) {
5181    offset += arg_slot.as_constant() * stackElementSize;
5182  } else {
5183    scale_reg    = arg_slot.as_register();
5184    scale_factor = Address::times(stackElementSize);
5185  }
5186  offset += wordSize;           // return PC is on stack
5187  return Address(rsp, scale_reg, scale_factor, offset);
5188}
5189
5190
5191void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
5192  if (!VerifyOops) return;
5193
5194  // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
5195  // Pass register number to verify_oop_subroutine
5196  const char* b = NULL;
5197  {
5198    ResourceMark rm;
5199    stringStream ss;
5200    ss.print("verify_oop_addr: %s", s);
5201    b = code_string(ss.as_string());
5202  }
5203#ifdef _LP64
5204  push(rscratch1);                    // save r10, trashed by movptr()
5205#endif
5206  push(rax);                          // save rax,
5207  // addr may contain rsp so we will have to adjust it based on the push
5208  // we just did (and on 64 bit we do two pushes)
5209  // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5210  // stores rax into addr which is backwards of what was intended.
5211  if (addr.uses(rsp)) {
5212    lea(rax, addr);
5213    pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
5214  } else {
5215    pushptr(addr);
5216  }
5217
5218  ExternalAddress buffer((address) b);
5219  // pass msg argument
5220  // avoid using pushptr, as it modifies scratch registers
5221  // and our contract is not to modify anything
5222  movptr(rax, buffer.addr());
5223  push(rax);
5224
5225  // call indirectly to solve generation ordering problem
5226  movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5227  call(rax);
5228  // Caller pops the arguments (addr, message) and restores rax, r10.
5229}
5230
5231void MacroAssembler::verify_tlab() {
5232#ifdef ASSERT
5233  if (UseTLAB && VerifyOops) {
5234    Label next, ok;
5235    Register t1 = rsi;
5236    Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
5237
5238    push(t1);
5239    NOT_LP64(push(thread_reg));
5240    NOT_LP64(get_thread(thread_reg));
5241
5242    movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5243    cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
5244    jcc(Assembler::aboveEqual, next);
5245    STOP("assert(top >= start)");
5246    should_not_reach_here();
5247
5248    bind(next);
5249    movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
5250    cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5251    jcc(Assembler::aboveEqual, ok);
5252    STOP("assert(top <= end)");
5253    should_not_reach_here();
5254
5255    bind(ok);
5256    NOT_LP64(pop(thread_reg));
5257    pop(t1);
5258  }
5259#endif
5260}
5261
5262class ControlWord {
5263 public:
5264  int32_t _value;
5265
5266  int  rounding_control() const        { return  (_value >> 10) & 3      ; }
5267  int  precision_control() const       { return  (_value >>  8) & 3      ; }
5268  bool precision() const               { return ((_value >>  5) & 1) != 0; }
5269  bool underflow() const               { return ((_value >>  4) & 1) != 0; }
5270  bool overflow() const                { return ((_value >>  3) & 1) != 0; }
5271  bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
5272  bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
5273  bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
5274
5275  void print() const {
5276    // rounding control
5277    const char* rc;
5278    switch (rounding_control()) {
5279      case 0: rc = "round near"; break;
5280      case 1: rc = "round down"; break;
5281      case 2: rc = "round up  "; break;
5282      case 3: rc = "chop      "; break;
5283    };
5284    // precision control
5285    const char* pc;
5286    switch (precision_control()) {
5287      case 0: pc = "24 bits "; break;
5288      case 1: pc = "reserved"; break;
5289      case 2: pc = "53 bits "; break;
5290      case 3: pc = "64 bits "; break;
5291    };
5292    // flags
5293    char f[9];
5294    f[0] = ' ';
5295    f[1] = ' ';
5296    f[2] = (precision   ()) ? 'P' : 'p';
5297    f[3] = (underflow   ()) ? 'U' : 'u';
5298    f[4] = (overflow    ()) ? 'O' : 'o';
5299    f[5] = (zero_divide ()) ? 'Z' : 'z';
5300    f[6] = (denormalized()) ? 'D' : 'd';
5301    f[7] = (invalid     ()) ? 'I' : 'i';
5302    f[8] = '\x0';
5303    // output
5304    printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5305  }
5306
5307};
5308
5309class StatusWord {
5310 public:
5311  int32_t _value;
5312
5313  bool busy() const                    { return ((_value >> 15) & 1) != 0; }
5314  bool C3() const                      { return ((_value >> 14) & 1) != 0; }
5315  bool C2() const                      { return ((_value >> 10) & 1) != 0; }
5316  bool C1() const                      { return ((_value >>  9) & 1) != 0; }
5317  bool C0() const                      { return ((_value >>  8) & 1) != 0; }
5318  int  top() const                     { return  (_value >> 11) & 7      ; }
5319  bool error_status() const            { return ((_value >>  7) & 1) != 0; }
5320  bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
5321  bool precision() const               { return ((_value >>  5) & 1) != 0; }
5322  bool underflow() const               { return ((_value >>  4) & 1) != 0; }
5323  bool overflow() const                { return ((_value >>  3) & 1) != 0; }
5324  bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
5325  bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
5326  bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
5327
5328  void print() const {
5329    // condition codes
5330    char c[5];
5331    c[0] = (C3()) ? '3' : '-';
5332    c[1] = (C2()) ? '2' : '-';
5333    c[2] = (C1()) ? '1' : '-';
5334    c[3] = (C0()) ? '0' : '-';
5335    c[4] = '\x0';
5336    // flags
5337    char f[9];
5338    f[0] = (error_status()) ? 'E' : '-';
5339    f[1] = (stack_fault ()) ? 'S' : '-';
5340    f[2] = (precision   ()) ? 'P' : '-';
5341    f[3] = (underflow   ()) ? 'U' : '-';
5342    f[4] = (overflow    ()) ? 'O' : '-';
5343    f[5] = (zero_divide ()) ? 'Z' : '-';
5344    f[6] = (denormalized()) ? 'D' : '-';
5345    f[7] = (invalid     ()) ? 'I' : '-';
5346    f[8] = '\x0';
5347    // output
5348    printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
5349  }
5350
5351};
5352
5353class TagWord {
5354 public:
5355  int32_t _value;
5356
5357  int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
5358
5359  void print() const {
5360    printf("%04x", _value & 0xFFFF);
5361  }
5362
5363};
5364
5365class FPU_Register {
5366 public:
5367  int32_t _m0;
5368  int32_t _m1;
5369  int16_t _ex;
5370
5371  bool is_indefinite() const           {
5372    return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5373  }
5374
5375  void print() const {
5376    char  sign = (_ex < 0) ? '-' : '+';
5377    const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
5378    printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
5379  };
5380
5381};
5382
5383class FPU_State {
5384 public:
5385  enum {
5386    register_size       = 10,
5387    number_of_registers =  8,
5388    register_mask       =  7
5389  };
5390
5391  ControlWord  _control_word;
5392  StatusWord   _status_word;
5393  TagWord      _tag_word;
5394  int32_t      _error_offset;
5395  int32_t      _error_selector;
5396  int32_t      _data_offset;
5397  int32_t      _data_selector;
5398  int8_t       _register[register_size * number_of_registers];
5399
5400  int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5401  FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
5402
5403  const char* tag_as_string(int tag) const {
5404    switch (tag) {
5405      case 0: return "valid";
5406      case 1: return "zero";
5407      case 2: return "special";
5408      case 3: return "empty";
5409    }
5410    ShouldNotReachHere();
5411    return NULL;
5412  }
5413
5414  void print() const {
5415    // print computation registers
5416    { int t = _status_word.top();
5417      for (int i = 0; i < number_of_registers; i++) {
5418        int j = (i - t) & register_mask;
5419        printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5420        st(j)->print();
5421        printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5422      }
5423    }
5424    printf("\n");
5425    // print control registers
5426    printf("ctrl = "); _control_word.print(); printf("\n");
5427    printf("stat = "); _status_word .print(); printf("\n");
5428    printf("tags = "); _tag_word    .print(); printf("\n");
5429  }
5430
5431};
5432
5433class Flag_Register {
5434 public:
5435  int32_t _value;
5436
5437  bool overflow() const                { return ((_value >> 11) & 1) != 0; }
5438  bool direction() const               { return ((_value >> 10) & 1) != 0; }
5439  bool sign() const                    { return ((_value >>  7) & 1) != 0; }
5440  bool zero() const                    { return ((_value >>  6) & 1) != 0; }
5441  bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
5442  bool parity() const                  { return ((_value >>  2) & 1) != 0; }
5443  bool carry() const                   { return ((_value >>  0) & 1) != 0; }
5444
5445  void print() const {
5446    // flags
5447    char f[8];
5448    f[0] = (overflow       ()) ? 'O' : '-';
5449    f[1] = (direction      ()) ? 'D' : '-';
5450    f[2] = (sign           ()) ? 'S' : '-';
5451    f[3] = (zero           ()) ? 'Z' : '-';
5452    f[4] = (auxiliary_carry()) ? 'A' : '-';
5453    f[5] = (parity         ()) ? 'P' : '-';
5454    f[6] = (carry          ()) ? 'C' : '-';
5455    f[7] = '\x0';
5456    // output
5457    printf("%08x  flags = %s", _value, f);
5458  }
5459
5460};
5461
5462class IU_Register {
5463 public:
5464  int32_t _value;
5465
5466  void print() const {
5467    printf("%08x  %11d", _value, _value);
5468  }
5469
5470};
5471
5472class IU_State {
5473 public:
5474  Flag_Register _eflags;
5475  IU_Register   _rdi;
5476  IU_Register   _rsi;
5477  IU_Register   _rbp;
5478  IU_Register   _rsp;
5479  IU_Register   _rbx;
5480  IU_Register   _rdx;
5481  IU_Register   _rcx;
5482  IU_Register   _rax;
5483
5484  void print() const {
5485    // computation registers
5486    printf("rax,  = "); _rax.print(); printf("\n");
5487    printf("rbx,  = "); _rbx.print(); printf("\n");
5488    printf("rcx  = "); _rcx.print(); printf("\n");
5489    printf("rdx  = "); _rdx.print(); printf("\n");
5490    printf("rdi  = "); _rdi.print(); printf("\n");
5491    printf("rsi  = "); _rsi.print(); printf("\n");
5492    printf("rbp,  = "); _rbp.print(); printf("\n");
5493    printf("rsp  = "); _rsp.print(); printf("\n");
5494    printf("\n");
5495    // control registers
5496    printf("flgs = "); _eflags.print(); printf("\n");
5497  }
5498};
5499
5500
5501class CPU_State {
5502 public:
5503  FPU_State _fpu_state;
5504  IU_State  _iu_state;
5505
5506  void print() const {
5507    printf("--------------------------------------------------\n");
5508    _iu_state .print();
5509    printf("\n");
5510    _fpu_state.print();
5511    printf("--------------------------------------------------\n");
5512  }
5513
5514};
5515
5516
5517static void _print_CPU_state(CPU_State* state) {
5518  state->print();
5519};
5520
5521
5522void MacroAssembler::print_CPU_state() {
5523  push_CPU_state();
5524  push(rsp);                // pass CPU state
5525  call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5526  addptr(rsp, wordSize);       // discard argument
5527  pop_CPU_state();
5528}
5529
5530
5531static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
5532  static int counter = 0;
5533  FPU_State* fs = &state->_fpu_state;
5534  counter++;
5535  // For leaf calls, only verify that the top few elements remain empty.
5536  // We only need 1 empty at the top for C2 code.
5537  if( stack_depth < 0 ) {
5538    if( fs->tag_for_st(7) != 3 ) {
5539      printf("FPR7 not empty\n");
5540      state->print();
5541      assert(false, "error");
5542      return false;
5543    }
5544    return true;                // All other stack states do not matter
5545  }
5546
5547  assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
5548         "bad FPU control word");
5549
5550  // compute stack depth
5551  int i = 0;
5552  while (i < FPU_State::number_of_registers && fs->tag_for_st(i)  < 3) i++;
5553  int d = i;
5554  while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
5555  // verify findings
5556  if (i != FPU_State::number_of_registers) {
5557    // stack not contiguous
5558    printf("%s: stack not contiguous at ST%d\n", s, i);
5559    state->print();
5560    assert(false, "error");
5561    return false;
5562  }
5563  // check if computed stack depth corresponds to expected stack depth
5564  if (stack_depth < 0) {
5565    // expected stack depth is -stack_depth or less
5566    if (d > -stack_depth) {
5567      // too many elements on the stack
5568      printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
5569      state->print();
5570      assert(false, "error");
5571      return false;
5572    }
5573  } else {
5574    // expected stack depth is stack_depth
5575    if (d != stack_depth) {
5576      // wrong stack depth
5577      printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
5578      state->print();
5579      assert(false, "error");
5580      return false;
5581    }
5582  }
5583  // everything is cool
5584  return true;
5585}
5586
5587
5588void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
5589  if (!VerifyFPU) return;
5590  push_CPU_state();
5591  push(rsp);                // pass CPU state
5592  ExternalAddress msg((address) s);
5593  // pass message string s
5594  pushptr(msg.addr());
5595  push(stack_depth);        // pass stack depth
5596  call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
5597  addptr(rsp, 3 * wordSize);   // discard arguments
5598  // check for error
5599  { Label L;
5600    testl(rax, rax);
5601    jcc(Assembler::notZero, L);
5602    int3();                  // break if error condition
5603    bind(L);
5604  }
5605  pop_CPU_state();
5606}
5607
5608void MacroAssembler::restore_cpu_control_state_after_jni() {
5609  // Either restore the MXCSR register after returning from the JNI Call
5610  // or verify that it wasn't changed (with -Xcheck:jni flag).
5611  if (VM_Version::supports_sse()) {
5612    if (RestoreMXCSROnJNICalls) {
5613      ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
5614    } else if (CheckJNICalls) {
5615      call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5616    }
5617  }
5618  if (VM_Version::supports_avx()) {
5619    // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5620    vzeroupper();
5621  }
5622
5623#ifndef _LP64
5624  // Either restore the x87 floating pointer control word after returning
5625  // from the JNI call or verify that it wasn't changed.
5626  if (CheckJNICalls) {
5627    call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
5628  }
5629#endif // _LP64
5630}
5631
5632
5633void MacroAssembler::load_klass(Register dst, Register src) {
5634#ifdef _LP64
5635  if (UseCompressedClassPointers) {
5636    movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5637    decode_klass_not_null(dst);
5638  } else
5639#endif
5640    movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5641}
5642
5643void MacroAssembler::load_prototype_header(Register dst, Register src) {
5644  load_klass(dst, src);
5645  movptr(dst, Address(dst, Klass::prototype_header_offset()));
5646}
5647
5648void MacroAssembler::store_klass(Register dst, Register src) {
5649#ifdef _LP64
5650  if (UseCompressedClassPointers) {
5651    encode_klass_not_null(src);
5652    movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5653  } else
5654#endif
5655    movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5656}
5657
5658void MacroAssembler::load_heap_oop(Register dst, Address src) {
5659#ifdef _LP64
5660  // FIXME: Must change all places where we try to load the klass.
5661  if (UseCompressedOops) {
5662    movl(dst, src);
5663    decode_heap_oop(dst);
5664  } else
5665#endif
5666    movptr(dst, src);
5667}
5668
5669// Doesn't do verfication, generates fixed size code
5670void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
5671#ifdef _LP64
5672  if (UseCompressedOops) {
5673    movl(dst, src);
5674    decode_heap_oop_not_null(dst);
5675  } else
5676#endif
5677    movptr(dst, src);
5678}
5679
5680void MacroAssembler::store_heap_oop(Address dst, Register src) {
5681#ifdef _LP64
5682  if (UseCompressedOops) {
5683    assert(!dst.uses(src), "not enough registers");
5684    encode_heap_oop(src);
5685    movl(dst, src);
5686  } else
5687#endif
5688    movptr(dst, src);
5689}
5690
5691void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
5692  assert_different_registers(src1, tmp);
5693#ifdef _LP64
5694  if (UseCompressedOops) {
5695    bool did_push = false;
5696    if (tmp == noreg) {
5697      tmp = rax;
5698      push(tmp);
5699      did_push = true;
5700      assert(!src2.uses(rsp), "can't push");
5701    }
5702    load_heap_oop(tmp, src2);
5703    cmpptr(src1, tmp);
5704    if (did_push)  pop(tmp);
5705  } else
5706#endif
5707    cmpptr(src1, src2);
5708}
5709
5710// Used for storing NULLs.
5711void MacroAssembler::store_heap_oop_null(Address dst) {
5712#ifdef _LP64
5713  if (UseCompressedOops) {
5714    movl(dst, (int32_t)NULL_WORD);
5715  } else {
5716    movslq(dst, (int32_t)NULL_WORD);
5717  }
5718#else
5719  movl(dst, (int32_t)NULL_WORD);
5720#endif
5721}
5722
5723#ifdef _LP64
5724void MacroAssembler::store_klass_gap(Register dst, Register src) {
5725  if (UseCompressedClassPointers) {
5726    // Store to klass gap in destination
5727    movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5728  }
5729}
5730
5731#ifdef ASSERT
5732void MacroAssembler::verify_heapbase(const char* msg) {
5733  assert (UseCompressedOops, "should be compressed");
5734  assert (Universe::heap() != NULL, "java heap should be initialized");
5735  if (CheckCompressedOops) {
5736    Label ok;
5737    push(rscratch1); // cmpptr trashes rscratch1
5738    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
5739    jcc(Assembler::equal, ok);
5740    STOP(msg);
5741    bind(ok);
5742    pop(rscratch1);
5743  }
5744}
5745#endif
5746
5747// Algorithm must match oop.inline.hpp encode_heap_oop.
5748void MacroAssembler::encode_heap_oop(Register r) {
5749#ifdef ASSERT
5750  verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5751#endif
5752  verify_oop(r, "broken oop in encode_heap_oop");
5753  if (Universe::narrow_oop_base() == NULL) {
5754    if (Universe::narrow_oop_shift() != 0) {
5755      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5756      shrq(r, LogMinObjAlignmentInBytes);
5757    }
5758    return;
5759  }
5760  testq(r, r);
5761  cmovq(Assembler::equal, r, r12_heapbase);
5762  subq(r, r12_heapbase);
5763  shrq(r, LogMinObjAlignmentInBytes);
5764}
5765
5766void MacroAssembler::encode_heap_oop_not_null(Register r) {
5767#ifdef ASSERT
5768  verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5769  if (CheckCompressedOops) {
5770    Label ok;
5771    testq(r, r);
5772    jcc(Assembler::notEqual, ok);
5773    STOP("null oop passed to encode_heap_oop_not_null");
5774    bind(ok);
5775  }
5776#endif
5777  verify_oop(r, "broken oop in encode_heap_oop_not_null");
5778  if (Universe::narrow_oop_base() != NULL) {
5779    subq(r, r12_heapbase);
5780  }
5781  if (Universe::narrow_oop_shift() != 0) {
5782    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5783    shrq(r, LogMinObjAlignmentInBytes);
5784  }
5785}
5786
5787void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5788#ifdef ASSERT
5789  verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5790  if (CheckCompressedOops) {
5791    Label ok;
5792    testq(src, src);
5793    jcc(Assembler::notEqual, ok);
5794    STOP("null oop passed to encode_heap_oop_not_null2");
5795    bind(ok);
5796  }
5797#endif
5798  verify_oop(src, "broken oop in encode_heap_oop_not_null2");
5799  if (dst != src) {
5800    movq(dst, src);
5801  }
5802  if (Universe::narrow_oop_base() != NULL) {
5803    subq(dst, r12_heapbase);
5804  }
5805  if (Universe::narrow_oop_shift() != 0) {
5806    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5807    shrq(dst, LogMinObjAlignmentInBytes);
5808  }
5809}
5810
5811void  MacroAssembler::decode_heap_oop(Register r) {
5812#ifdef ASSERT
5813  verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5814#endif
5815  if (Universe::narrow_oop_base() == NULL) {
5816    if (Universe::narrow_oop_shift() != 0) {
5817      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5818      shlq(r, LogMinObjAlignmentInBytes);
5819    }
5820  } else {
5821    Label done;
5822    shlq(r, LogMinObjAlignmentInBytes);
5823    jccb(Assembler::equal, done);
5824    addq(r, r12_heapbase);
5825    bind(done);
5826  }
5827  verify_oop(r, "broken oop in decode_heap_oop");
5828}
5829
5830void  MacroAssembler::decode_heap_oop_not_null(Register r) {
5831  // Note: it will change flags
5832  assert (UseCompressedOops, "should only be used for compressed headers");
5833  assert (Universe::heap() != NULL, "java heap should be initialized");
5834  // Cannot assert, unverified entry point counts instructions (see .ad file)
5835  // vtableStubs also counts instructions in pd_code_size_limit.
5836  // Also do not verify_oop as this is called by verify_oop.
5837  if (Universe::narrow_oop_shift() != 0) {
5838    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5839    shlq(r, LogMinObjAlignmentInBytes);
5840    if (Universe::narrow_oop_base() != NULL) {
5841      addq(r, r12_heapbase);
5842    }
5843  } else {
5844    assert (Universe::narrow_oop_base() == NULL, "sanity");
5845  }
5846}
5847
5848void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5849  // Note: it will change flags
5850  assert (UseCompressedOops, "should only be used for compressed headers");
5851  assert (Universe::heap() != NULL, "java heap should be initialized");
5852  // Cannot assert, unverified entry point counts instructions (see .ad file)
5853  // vtableStubs also counts instructions in pd_code_size_limit.
5854  // Also do not verify_oop as this is called by verify_oop.
5855  if (Universe::narrow_oop_shift() != 0) {
5856    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5857    if (LogMinObjAlignmentInBytes == Address::times_8) {
5858      leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5859    } else {
5860      if (dst != src) {
5861        movq(dst, src);
5862      }
5863      shlq(dst, LogMinObjAlignmentInBytes);
5864      if (Universe::narrow_oop_base() != NULL) {
5865        addq(dst, r12_heapbase);
5866      }
5867    }
5868  } else {
5869    assert (Universe::narrow_oop_base() == NULL, "sanity");
5870    if (dst != src) {
5871      movq(dst, src);
5872    }
5873  }
5874}
5875
5876void MacroAssembler::encode_klass_not_null(Register r) {
5877  if (Universe::narrow_klass_base() != NULL) {
5878    // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
5879    assert(r != r12_heapbase, "Encoding a klass in r12");
5880    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
5881    subq(r, r12_heapbase);
5882  }
5883  if (Universe::narrow_klass_shift() != 0) {
5884    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5885    shrq(r, LogKlassAlignmentInBytes);
5886  }
5887  if (Universe::narrow_klass_base() != NULL) {
5888    reinit_heapbase();
5889  }
5890}
5891
5892void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5893  if (dst == src) {
5894    encode_klass_not_null(src);
5895  } else {
5896    if (Universe::narrow_klass_base() != NULL) {
5897      mov64(dst, (int64_t)Universe::narrow_klass_base());
5898      negq(dst);
5899      addq(dst, src);
5900    } else {
5901      movptr(dst, src);
5902    }
5903    if (Universe::narrow_klass_shift() != 0) {
5904      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5905      shrq(dst, LogKlassAlignmentInBytes);
5906    }
5907  }
5908}
5909
5910// Function instr_size_for_decode_klass_not_null() counts the instructions
5911// generated by decode_klass_not_null(register r) and reinit_heapbase(),
5912// when (Universe::heap() != NULL).  Hence, if the instructions they
5913// generate change, then this method needs to be updated.
5914int MacroAssembler::instr_size_for_decode_klass_not_null() {
5915  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
5916  if (Universe::narrow_klass_base() != NULL) {
5917    // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
5918    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
5919  } else {
5920    // longest load decode klass function, mov64, leaq
5921    return 16;
5922  }
5923}
5924
5925// !!! If the instructions that get generated here change then function
5926// instr_size_for_decode_klass_not_null() needs to get updated.
5927void  MacroAssembler::decode_klass_not_null(Register r) {
5928  // Note: it will change flags
5929  assert (UseCompressedClassPointers, "should only be used for compressed headers");
5930  assert(r != r12_heapbase, "Decoding a klass in r12");
5931  // Cannot assert, unverified entry point counts instructions (see .ad file)
5932  // vtableStubs also counts instructions in pd_code_size_limit.
5933  // Also do not verify_oop as this is called by verify_oop.
5934  if (Universe::narrow_klass_shift() != 0) {
5935    assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5936    shlq(r, LogKlassAlignmentInBytes);
5937  }
5938  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
5939  if (Universe::narrow_klass_base() != NULL) {
5940    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
5941    addq(r, r12_heapbase);
5942    reinit_heapbase();
5943  }
5944}
5945
5946void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5947  // Note: it will change flags
5948  assert (UseCompressedClassPointers, "should only be used for compressed headers");
5949  if (dst == src) {
5950    decode_klass_not_null(dst);
5951  } else {
5952    // Cannot assert, unverified entry point counts instructions (see .ad file)
5953    // vtableStubs also counts instructions in pd_code_size_limit.
5954    // Also do not verify_oop as this is called by verify_oop.
5955    mov64(dst, (int64_t)Universe::narrow_klass_base());
5956    if (Universe::narrow_klass_shift() != 0) {
5957      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5958      assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
5959      leaq(dst, Address(dst, src, Address::times_8, 0));
5960    } else {
5961      addq(dst, src);
5962    }
5963  }
5964}
5965
5966void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5967  assert (UseCompressedOops, "should only be used for compressed headers");
5968  assert (Universe::heap() != NULL, "java heap should be initialized");
5969  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5970  int oop_index = oop_recorder()->find_index(obj);
5971  RelocationHolder rspec = oop_Relocation::spec(oop_index);
5972  mov_narrow_oop(dst, oop_index, rspec);
5973}
5974
5975void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5976  assert (UseCompressedOops, "should only be used for compressed headers");
5977  assert (Universe::heap() != NULL, "java heap should be initialized");
5978  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5979  int oop_index = oop_recorder()->find_index(obj);
5980  RelocationHolder rspec = oop_Relocation::spec(oop_index);
5981  mov_narrow_oop(dst, oop_index, rspec);
5982}
5983
5984void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5985  assert (UseCompressedClassPointers, "should only be used for compressed headers");
5986  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5987  int klass_index = oop_recorder()->find_index(k);
5988  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5989  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
5990}
5991
5992void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
5993  assert (UseCompressedClassPointers, "should only be used for compressed headers");
5994  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5995  int klass_index = oop_recorder()->find_index(k);
5996  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5997  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
5998}
5999
6000void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6001  assert (UseCompressedOops, "should only be used for compressed headers");
6002  assert (Universe::heap() != NULL, "java heap should be initialized");
6003  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6004  int oop_index = oop_recorder()->find_index(obj);
6005  RelocationHolder rspec = oop_Relocation::spec(oop_index);
6006  Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6007}
6008
6009void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6010  assert (UseCompressedOops, "should only be used for compressed headers");
6011  assert (Universe::heap() != NULL, "java heap should be initialized");
6012  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6013  int oop_index = oop_recorder()->find_index(obj);
6014  RelocationHolder rspec = oop_Relocation::spec(oop_index);
6015  Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6016}
6017
6018void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6019  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6020  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6021  int klass_index = oop_recorder()->find_index(k);
6022  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6023  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6024}
6025
6026void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6027  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6028  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6029  int klass_index = oop_recorder()->find_index(k);
6030  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6031  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6032}
6033
6034void MacroAssembler::reinit_heapbase() {
6035  if (UseCompressedOops || UseCompressedClassPointers) {
6036    if (Universe::heap() != NULL) {
6037      if (Universe::narrow_oop_base() == NULL) {
6038        MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6039      } else {
6040        mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
6041      }
6042    } else {
6043      movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
6044    }
6045  }
6046}
6047
6048#endif // _LP64
6049
6050
6051// C2 compiled method's prolog code.
6052void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
6053
6054  // WARNING: Initial instruction MUST be 5 bytes or longer so that
6055  // NativeJump::patch_verified_entry will be able to patch out the entry
6056  // code safely. The push to verify stack depth is ok at 5 bytes,
6057  // the frame allocation can be either 3 or 6 bytes. So if we don't do
6058  // stack bang then we must use the 6 byte frame allocation even if
6059  // we have no frame. :-(
6060  assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
6061
6062  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6063  // Remove word for return addr
6064  framesize -= wordSize;
6065  stack_bang_size -= wordSize;
6066
6067  // Calls to C2R adapters often do not accept exceptional returns.
6068  // We require that their callers must bang for them.  But be careful, because
6069  // some VM calls (such as call site linkage) can use several kilobytes of
6070  // stack.  But the stack safety zone should account for that.
6071  // See bugs 4446381, 4468289, 4497237.
6072  if (stack_bang_size > 0) {
6073    generate_stack_overflow_check(stack_bang_size);
6074
6075    // We always push rbp, so that on return to interpreter rbp, will be
6076    // restored correctly and we can correct the stack.
6077    push(rbp);
6078    // Save caller's stack pointer into RBP if the frame pointer is preserved.
6079    if (PreserveFramePointer) {
6080      mov(rbp, rsp);
6081    }
6082    // Remove word for ebp
6083    framesize -= wordSize;
6084
6085    // Create frame
6086    if (framesize) {
6087      subptr(rsp, framesize);
6088    }
6089  } else {
6090    // Create frame (force generation of a 4 byte immediate value)
6091    subptr_imm32(rsp, framesize);
6092
6093    // Save RBP register now.
6094    framesize -= wordSize;
6095    movptr(Address(rsp, framesize), rbp);
6096    // Save caller's stack pointer into RBP if the frame pointer is preserved.
6097    if (PreserveFramePointer) {
6098      movptr(rbp, rsp);
6099      addptr(rbp, framesize + wordSize);
6100    }
6101  }
6102
6103  if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
6104    framesize -= wordSize;
6105    movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
6106  }
6107
6108#ifndef _LP64
6109  // If method sets FPU control word do it now
6110  if (fp_mode_24b) {
6111    fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
6112  }
6113  if (UseSSE >= 2 && VerifyFPU) {
6114    verify_FPU(0, "FPU stack must be clean on entry");
6115  }
6116#endif
6117
6118#ifdef ASSERT
6119  if (VerifyStackAtCalls) {
6120    Label L;
6121    push(rax);
6122    mov(rax, rsp);
6123    andptr(rax, StackAlignmentInBytes-1);
6124    cmpptr(rax, StackAlignmentInBytes-wordSize);
6125    pop(rax);
6126    jcc(Assembler::equal, L);
6127    STOP("Stack is not properly aligned!");
6128    bind(L);
6129  }
6130#endif
6131
6132}
6133
6134void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) {
6135  // cnt - number of qwords (8-byte words).
6136  // base - start address, qword aligned.
6137  assert(base==rdi, "base register must be edi for rep stos");
6138  assert(tmp==rax,   "tmp register must be eax for rep stos");
6139  assert(cnt==rcx,   "cnt register must be ecx for rep stos");
6140
6141  xorptr(tmp, tmp);
6142  if (UseFastStosb) {
6143    shlptr(cnt,3); // convert to number of bytes
6144    rep_stosb();
6145  } else {
6146    NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM
6147    rep_stos();
6148  }
6149}
6150
6151// IndexOf for constant substrings with size >= 8 chars
6152// which don't need to be loaded through stack.
6153void MacroAssembler::string_indexofC8(Register str1, Register str2,
6154                                      Register cnt1, Register cnt2,
6155                                      int int_cnt2,  Register result,
6156                                      XMMRegister vec, Register tmp) {
6157  ShortBranchVerifier sbv(this);
6158  assert(UseSSE42Intrinsics, "SSE4.2 is required");
6159
6160  // This method uses pcmpestri instruction with bound registers
6161  //   inputs:
6162  //     xmm - substring
6163  //     rax - substring length (elements count)
6164  //     mem - scanned string
6165  //     rdx - string length (elements count)
6166  //     0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6167  //   outputs:
6168  //     rcx - matched index in string
6169  assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6170
6171  Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
6172        RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
6173        MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
6174
6175  // Note, inline_string_indexOf() generates checks:
6176  // if (substr.count > string.count) return -1;
6177  // if (substr.count == 0) return 0;
6178  assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
6179
6180  // Load substring.
6181  movdqu(vec, Address(str2, 0));
6182  movl(cnt2, int_cnt2);
6183  movptr(result, str1); // string addr
6184
6185  if (int_cnt2 > 8) {
6186    jmpb(SCAN_TO_SUBSTR);
6187
6188    // Reload substr for rescan, this code
6189    // is executed only for large substrings (> 8 chars)
6190    bind(RELOAD_SUBSTR);
6191    movdqu(vec, Address(str2, 0));
6192    negptr(cnt2); // Jumped here with negative cnt2, convert to positive
6193
6194    bind(RELOAD_STR);
6195    // We came here after the beginning of the substring was
6196    // matched but the rest of it was not so we need to search
6197    // again. Start from the next element after the previous match.
6198
6199    // cnt2 is number of substring reminding elements and
6200    // cnt1 is number of string reminding elements when cmp failed.
6201    // Restored cnt1 = cnt1 - cnt2 + int_cnt2
6202    subl(cnt1, cnt2);
6203    addl(cnt1, int_cnt2);
6204    movl(cnt2, int_cnt2); // Now restore cnt2
6205
6206    decrementl(cnt1);     // Shift to next element
6207    cmpl(cnt1, cnt2);
6208    jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6209
6210    addptr(result, 2);
6211
6212  } // (int_cnt2 > 8)
6213
6214  // Scan string for start of substr in 16-byte vectors
6215  bind(SCAN_TO_SUBSTR);
6216  pcmpestri(vec, Address(result, 0), 0x0d);
6217  jccb(Assembler::below, FOUND_CANDIDATE);   // CF == 1
6218  subl(cnt1, 8);
6219  jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6220  cmpl(cnt1, cnt2);
6221  jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6222  addptr(result, 16);
6223  jmpb(SCAN_TO_SUBSTR);
6224
6225  // Found a potential substr
6226  bind(FOUND_CANDIDATE);
6227  // Matched whole vector if first element matched (tmp(rcx) == 0).
6228  if (int_cnt2 == 8) {
6229    jccb(Assembler::overflow, RET_FOUND);    // OF == 1
6230  } else { // int_cnt2 > 8
6231    jccb(Assembler::overflow, FOUND_SUBSTR);
6232  }
6233  // After pcmpestri tmp(rcx) contains matched element index
6234  // Compute start addr of substr
6235  lea(result, Address(result, tmp, Address::times_2));
6236
6237  // Make sure string is still long enough
6238  subl(cnt1, tmp);
6239  cmpl(cnt1, cnt2);
6240  if (int_cnt2 == 8) {
6241    jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6242  } else { // int_cnt2 > 8
6243    jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
6244  }
6245  // Left less then substring.
6246
6247  bind(RET_NOT_FOUND);
6248  movl(result, -1);
6249  jmpb(EXIT);
6250
6251  if (int_cnt2 > 8) {
6252    // This code is optimized for the case when whole substring
6253    // is matched if its head is matched.
6254    bind(MATCH_SUBSTR_HEAD);
6255    pcmpestri(vec, Address(result, 0), 0x0d);
6256    // Reload only string if does not match
6257    jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
6258
6259    Label CONT_SCAN_SUBSTR;
6260    // Compare the rest of substring (> 8 chars).
6261    bind(FOUND_SUBSTR);
6262    // First 8 chars are already matched.
6263    negptr(cnt2);
6264    addptr(cnt2, 8);
6265
6266    bind(SCAN_SUBSTR);
6267    subl(cnt1, 8);
6268    cmpl(cnt2, -8); // Do not read beyond substring
6269    jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
6270    // Back-up strings to avoid reading beyond substring:
6271    // cnt1 = cnt1 - cnt2 + 8
6272    addl(cnt1, cnt2); // cnt2 is negative
6273    addl(cnt1, 8);
6274    movl(cnt2, 8); negptr(cnt2);
6275    bind(CONT_SCAN_SUBSTR);
6276    if (int_cnt2 < (int)G) {
6277      movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
6278      pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
6279    } else {
6280      // calculate index in register to avoid integer overflow (int_cnt2*2)
6281      movl(tmp, int_cnt2);
6282      addptr(tmp, cnt2);
6283      movdqu(vec, Address(str2, tmp, Address::times_2, 0));
6284      pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
6285    }
6286    // Need to reload strings pointers if not matched whole vector
6287    jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6288    addptr(cnt2, 8);
6289    jcc(Assembler::negative, SCAN_SUBSTR);
6290    // Fall through if found full substring
6291
6292  } // (int_cnt2 > 8)
6293
6294  bind(RET_FOUND);
6295  // Found result if we matched full small substring.
6296  // Compute substr offset
6297  subptr(result, str1);
6298  shrl(result, 1); // index
6299  bind(EXIT);
6300
6301} // string_indexofC8
6302
6303// Small strings are loaded through stack if they cross page boundary.
6304void MacroAssembler::string_indexof(Register str1, Register str2,
6305                                    Register cnt1, Register cnt2,
6306                                    int int_cnt2,  Register result,
6307                                    XMMRegister vec, Register tmp) {
6308  ShortBranchVerifier sbv(this);
6309  assert(UseSSE42Intrinsics, "SSE4.2 is required");
6310  //
6311  // int_cnt2 is length of small (< 8 chars) constant substring
6312  // or (-1) for non constant substring in which case its length
6313  // is in cnt2 register.
6314  //
6315  // Note, inline_string_indexOf() generates checks:
6316  // if (substr.count > string.count) return -1;
6317  // if (substr.count == 0) return 0;
6318  //
6319  assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
6320
6321  // This method uses pcmpestri instruction with bound registers
6322  //   inputs:
6323  //     xmm - substring
6324  //     rax - substring length (elements count)
6325  //     mem - scanned string
6326  //     rdx - string length (elements count)
6327  //     0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6328  //   outputs:
6329  //     rcx - matched index in string
6330  assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6331
6332  Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
6333        RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
6334        FOUND_CANDIDATE;
6335
6336  { //========================================================
6337    // We don't know where these strings are located
6338    // and we can't read beyond them. Load them through stack.
6339    Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
6340
6341    movptr(tmp, rsp); // save old SP
6342
6343    if (int_cnt2 > 0) {     // small (< 8 chars) constant substring
6344      if (int_cnt2 == 1) {  // One char
6345        load_unsigned_short(result, Address(str2, 0));
6346        movdl(vec, result); // move 32 bits
6347      } else if (int_cnt2 == 2) { // Two chars
6348        movdl(vec, Address(str2, 0)); // move 32 bits
6349      } else if (int_cnt2 == 4) { // Four chars
6350        movq(vec, Address(str2, 0));  // move 64 bits
6351      } else { // cnt2 = { 3, 5, 6, 7 }
6352        // Array header size is 12 bytes in 32-bit VM
6353        // + 6 bytes for 3 chars == 18 bytes,
6354        // enough space to load vec and shift.
6355        assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
6356        movdqu(vec, Address(str2, (int_cnt2*2)-16));
6357        psrldq(vec, 16-(int_cnt2*2));
6358      }
6359    } else { // not constant substring
6360      cmpl(cnt2, 8);
6361      jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
6362
6363      // We can read beyond string if srt+16 does not cross page boundary
6364      // since heaps are aligned and mapped by pages.
6365      assert(os::vm_page_size() < (int)G, "default page should be small");
6366      movl(result, str2); // We need only low 32 bits
6367      andl(result, (os::vm_page_size()-1));
6368      cmpl(result, (os::vm_page_size()-16));
6369      jccb(Assembler::belowEqual, CHECK_STR);
6370
6371      // Move small strings to stack to allow load 16 bytes into vec.
6372      subptr(rsp, 16);
6373      int stk_offset = wordSize-2;
6374      push(cnt2);
6375
6376      bind(COPY_SUBSTR);
6377      load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
6378      movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6379      decrement(cnt2);
6380      jccb(Assembler::notZero, COPY_SUBSTR);
6381
6382      pop(cnt2);
6383      movptr(str2, rsp);  // New substring address
6384    } // non constant
6385
6386    bind(CHECK_STR);
6387    cmpl(cnt1, 8);
6388    jccb(Assembler::aboveEqual, BIG_STRINGS);
6389
6390    // Check cross page boundary.
6391    movl(result, str1); // We need only low 32 bits
6392    andl(result, (os::vm_page_size()-1));
6393    cmpl(result, (os::vm_page_size()-16));
6394    jccb(Assembler::belowEqual, BIG_STRINGS);
6395
6396    subptr(rsp, 16);
6397    int stk_offset = -2;
6398    if (int_cnt2 < 0) { // not constant
6399      push(cnt2);
6400      stk_offset += wordSize;
6401    }
6402    movl(cnt2, cnt1);
6403
6404    bind(COPY_STR);
6405    load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
6406    movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6407    decrement(cnt2);
6408    jccb(Assembler::notZero, COPY_STR);
6409
6410    if (int_cnt2 < 0) { // not constant
6411      pop(cnt2);
6412    }
6413    movptr(str1, rsp);  // New string address
6414
6415    bind(BIG_STRINGS);
6416    // Load substring.
6417    if (int_cnt2 < 0) { // -1
6418      movdqu(vec, Address(str2, 0));
6419      push(cnt2);       // substr count
6420      push(str2);       // substr addr
6421      push(str1);       // string addr
6422    } else {
6423      // Small (< 8 chars) constant substrings are loaded already.
6424      movl(cnt2, int_cnt2);
6425    }
6426    push(tmp);  // original SP
6427
6428  } // Finished loading
6429
6430  //========================================================
6431  // Start search
6432  //
6433
6434  movptr(result, str1); // string addr
6435
6436  if (int_cnt2  < 0) {  // Only for non constant substring
6437    jmpb(SCAN_TO_SUBSTR);
6438
6439    // SP saved at sp+0
6440    // String saved at sp+1*wordSize
6441    // Substr saved at sp+2*wordSize
6442    // Substr count saved at sp+3*wordSize
6443
6444    // Reload substr for rescan, this code
6445    // is executed only for large substrings (> 8 chars)
6446    bind(RELOAD_SUBSTR);
6447    movptr(str2, Address(rsp, 2*wordSize));
6448    movl(cnt2, Address(rsp, 3*wordSize));
6449    movdqu(vec, Address(str2, 0));
6450    // We came here after the beginning of the substring was
6451    // matched but the rest of it was not so we need to search
6452    // again. Start from the next element after the previous match.
6453    subptr(str1, result); // Restore counter
6454    shrl(str1, 1);
6455    addl(cnt1, str1);
6456    decrementl(cnt1);   // Shift to next element
6457    cmpl(cnt1, cnt2);
6458    jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6459
6460    addptr(result, 2);
6461  } // non constant
6462
6463  // Scan string for start of substr in 16-byte vectors
6464  bind(SCAN_TO_SUBSTR);
6465  assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6466  pcmpestri(vec, Address(result, 0), 0x0d);
6467  jccb(Assembler::below, FOUND_CANDIDATE);   // CF == 1
6468  subl(cnt1, 8);
6469  jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6470  cmpl(cnt1, cnt2);
6471  jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6472  addptr(result, 16);
6473
6474  bind(ADJUST_STR);
6475  cmpl(cnt1, 8); // Do not read beyond string
6476  jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6477  // Back-up string to avoid reading beyond string.
6478  lea(result, Address(result, cnt1, Address::times_2, -16));
6479  movl(cnt1, 8);
6480  jmpb(SCAN_TO_SUBSTR);
6481
6482  // Found a potential substr
6483  bind(FOUND_CANDIDATE);
6484  // After pcmpestri tmp(rcx) contains matched element index
6485
6486  // Make sure string is still long enough
6487  subl(cnt1, tmp);
6488  cmpl(cnt1, cnt2);
6489  jccb(Assembler::greaterEqual, FOUND_SUBSTR);
6490  // Left less then substring.
6491
6492  bind(RET_NOT_FOUND);
6493  movl(result, -1);
6494  jmpb(CLEANUP);
6495
6496  bind(FOUND_SUBSTR);
6497  // Compute start addr of substr
6498  lea(result, Address(result, tmp, Address::times_2));
6499
6500  if (int_cnt2 > 0) { // Constant substring
6501    // Repeat search for small substring (< 8 chars)
6502    // from new point without reloading substring.
6503    // Have to check that we don't read beyond string.
6504    cmpl(tmp, 8-int_cnt2);
6505    jccb(Assembler::greater, ADJUST_STR);
6506    // Fall through if matched whole substring.
6507  } else { // non constant
6508    assert(int_cnt2 == -1, "should be != 0");
6509
6510    addl(tmp, cnt2);
6511    // Found result if we matched whole substring.
6512    cmpl(tmp, 8);
6513    jccb(Assembler::lessEqual, RET_FOUND);
6514
6515    // Repeat search for small substring (<= 8 chars)
6516    // from new point 'str1' without reloading substring.
6517    cmpl(cnt2, 8);
6518    // Have to check that we don't read beyond string.
6519    jccb(Assembler::lessEqual, ADJUST_STR);
6520
6521    Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
6522    // Compare the rest of substring (> 8 chars).
6523    movptr(str1, result);
6524
6525    cmpl(tmp, cnt2);
6526    // First 8 chars are already matched.
6527    jccb(Assembler::equal, CHECK_NEXT);
6528
6529    bind(SCAN_SUBSTR);
6530    pcmpestri(vec, Address(str1, 0), 0x0d);
6531    // Need to reload strings pointers if not matched whole vector
6532    jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6533
6534    bind(CHECK_NEXT);
6535    subl(cnt2, 8);
6536    jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
6537    addptr(str1, 16);
6538    addptr(str2, 16);
6539    subl(cnt1, 8);
6540    cmpl(cnt2, 8); // Do not read beyond substring
6541    jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
6542    // Back-up strings to avoid reading beyond substring.
6543    lea(str2, Address(str2, cnt2, Address::times_2, -16));
6544    lea(str1, Address(str1, cnt2, Address::times_2, -16));
6545    subl(cnt1, cnt2);
6546    movl(cnt2, 8);
6547    addl(cnt1, 8);
6548    bind(CONT_SCAN_SUBSTR);
6549    movdqu(vec, Address(str2, 0));
6550    jmpb(SCAN_SUBSTR);
6551
6552    bind(RET_FOUND_LONG);
6553    movptr(str1, Address(rsp, wordSize));
6554  } // non constant
6555
6556  bind(RET_FOUND);
6557  // Compute substr offset
6558  subptr(result, str1);
6559  shrl(result, 1); // index
6560
6561  bind(CLEANUP);
6562  pop(rsp); // restore SP
6563
6564} // string_indexof
6565
6566// Compare strings.
6567void MacroAssembler::string_compare(Register str1, Register str2,
6568                                    Register cnt1, Register cnt2, Register result,
6569                                    XMMRegister vec1) {
6570  ShortBranchVerifier sbv(this);
6571  Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
6572
6573  // Compute the minimum of the string lengths and the
6574  // difference of the string lengths (stack).
6575  // Do the conditional move stuff
6576  movl(result, cnt1);
6577  subl(cnt1, cnt2);
6578  push(cnt1);
6579  cmov32(Assembler::lessEqual, cnt2, result);
6580
6581  // Is the minimum length zero?
6582  testl(cnt2, cnt2);
6583  jcc(Assembler::zero, LENGTH_DIFF_LABEL);
6584
6585  // Compare first characters
6586  load_unsigned_short(result, Address(str1, 0));
6587  load_unsigned_short(cnt1, Address(str2, 0));
6588  subl(result, cnt1);
6589  jcc(Assembler::notZero,  POP_LABEL);
6590  cmpl(cnt2, 1);
6591  jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6592
6593  // Check if the strings start at the same location.
6594  cmpptr(str1, str2);
6595  jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6596
6597  Address::ScaleFactor scale = Address::times_2;
6598  int stride = 8;
6599
6600  if (UseAVX >= 2 && UseSSE42Intrinsics) {
6601    Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
6602    Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
6603    Label COMPARE_TAIL_LONG;
6604    int pcmpmask = 0x19;
6605
6606    // Setup to compare 16-chars (32-bytes) vectors,
6607    // start from first character again because it has aligned address.
6608    int stride2 = 16;
6609    int adr_stride  = stride  << scale;
6610
6611    assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6612    // rax and rdx are used by pcmpestri as elements counters
6613    movl(result, cnt2);
6614    andl(cnt2, ~(stride2-1));   // cnt2 holds the vector count
6615    jcc(Assembler::zero, COMPARE_TAIL_LONG);
6616
6617    // fast path : compare first 2 8-char vectors.
6618    bind(COMPARE_16_CHARS);
6619    movdqu(vec1, Address(str1, 0));
6620    pcmpestri(vec1, Address(str2, 0), pcmpmask);
6621    jccb(Assembler::below, COMPARE_INDEX_CHAR);
6622
6623    movdqu(vec1, Address(str1, adr_stride));
6624    pcmpestri(vec1, Address(str2, adr_stride), pcmpmask);
6625    jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS);
6626    addl(cnt1, stride);
6627
6628    // Compare the characters at index in cnt1
6629    bind(COMPARE_INDEX_CHAR); //cnt1 has the offset of the mismatching character
6630    load_unsigned_short(result, Address(str1, cnt1, scale));
6631    load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6632    subl(result, cnt2);
6633    jmp(POP_LABEL);
6634
6635    // Setup the registers to start vector comparison loop
6636    bind(COMPARE_WIDE_VECTORS);
6637    lea(str1, Address(str1, result, scale));
6638    lea(str2, Address(str2, result, scale));
6639    subl(result, stride2);
6640    subl(cnt2, stride2);
6641    jccb(Assembler::zero, COMPARE_WIDE_TAIL);
6642    negptr(result);
6643
6644    //  In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest)
6645    bind(COMPARE_WIDE_VECTORS_LOOP);
6646    vmovdqu(vec1, Address(str1, result, scale));
6647    vpxor(vec1, Address(str2, result, scale));
6648    vptest(vec1, vec1);
6649    jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
6650    addptr(result, stride2);
6651    subl(cnt2, stride2);
6652    jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
6653    // clean upper bits of YMM registers
6654    vpxor(vec1, vec1);
6655
6656    // compare wide vectors tail
6657    bind(COMPARE_WIDE_TAIL);
6658    testptr(result, result);
6659    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6660
6661    movl(result, stride2);
6662    movl(cnt2, result);
6663    negptr(result);
6664    jmpb(COMPARE_WIDE_VECTORS_LOOP);
6665
6666    // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
6667    bind(VECTOR_NOT_EQUAL);
6668    // clean upper bits of YMM registers
6669    vpxor(vec1, vec1);
6670    lea(str1, Address(str1, result, scale));
6671    lea(str2, Address(str2, result, scale));
6672    jmp(COMPARE_16_CHARS);
6673
6674    // Compare tail chars, length between 1 to 15 chars
6675    bind(COMPARE_TAIL_LONG);
6676    movl(cnt2, result);
6677    cmpl(cnt2, stride);
6678    jccb(Assembler::less, COMPARE_SMALL_STR);
6679
6680    movdqu(vec1, Address(str1, 0));
6681    pcmpestri(vec1, Address(str2, 0), pcmpmask);
6682    jcc(Assembler::below, COMPARE_INDEX_CHAR);
6683    subptr(cnt2, stride);
6684    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6685    lea(str1, Address(str1, result, scale));
6686    lea(str2, Address(str2, result, scale));
6687    negptr(cnt2);
6688    jmpb(WHILE_HEAD_LABEL);
6689
6690    bind(COMPARE_SMALL_STR);
6691  } else if (UseSSE42Intrinsics) {
6692    Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
6693    int pcmpmask = 0x19;
6694    // Setup to compare 8-char (16-byte) vectors,
6695    // start from first character again because it has aligned address.
6696    movl(result, cnt2);
6697    andl(cnt2, ~(stride - 1));   // cnt2 holds the vector count
6698    jccb(Assembler::zero, COMPARE_TAIL);
6699
6700    lea(str1, Address(str1, result, scale));
6701    lea(str2, Address(str2, result, scale));
6702    negptr(result);
6703
6704    // pcmpestri
6705    //   inputs:
6706    //     vec1- substring
6707    //     rax - negative string length (elements count)
6708    //     mem - scanned string
6709    //     rdx - string length (elements count)
6710    //     pcmpmask - cmp mode: 11000 (string compare with negated result)
6711    //               + 00 (unsigned bytes) or  + 01 (unsigned shorts)
6712    //   outputs:
6713    //     rcx - first mismatched element index
6714    assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6715
6716    bind(COMPARE_WIDE_VECTORS);
6717    movdqu(vec1, Address(str1, result, scale));
6718    pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6719    // After pcmpestri cnt1(rcx) contains mismatched element index
6720
6721    jccb(Assembler::below, VECTOR_NOT_EQUAL);  // CF==1
6722    addptr(result, stride);
6723    subptr(cnt2, stride);
6724    jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
6725
6726    // compare wide vectors tail
6727    testptr(result, result);
6728    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6729
6730    movl(cnt2, stride);
6731    movl(result, stride);
6732    negptr(result);
6733    movdqu(vec1, Address(str1, result, scale));
6734    pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6735    jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
6736
6737    // Mismatched characters in the vectors
6738    bind(VECTOR_NOT_EQUAL);
6739    addptr(cnt1, result);
6740    load_unsigned_short(result, Address(str1, cnt1, scale));
6741    load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6742    subl(result, cnt2);
6743    jmpb(POP_LABEL);
6744
6745    bind(COMPARE_TAIL); // limit is zero
6746    movl(cnt2, result);
6747    // Fallthru to tail compare
6748  }
6749  // Shift str2 and str1 to the end of the arrays, negate min
6750  lea(str1, Address(str1, cnt2, scale));
6751  lea(str2, Address(str2, cnt2, scale));
6752  decrementl(cnt2);  // first character was compared already
6753  negptr(cnt2);
6754
6755  // Compare the rest of the elements
6756  bind(WHILE_HEAD_LABEL);
6757  load_unsigned_short(result, Address(str1, cnt2, scale, 0));
6758  load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
6759  subl(result, cnt1);
6760  jccb(Assembler::notZero, POP_LABEL);
6761  increment(cnt2);
6762  jccb(Assembler::notZero, WHILE_HEAD_LABEL);
6763
6764  // Strings are equal up to min length.  Return the length difference.
6765  bind(LENGTH_DIFF_LABEL);
6766  pop(result);
6767  jmpb(DONE_LABEL);
6768
6769  // Discard the stored length difference
6770  bind(POP_LABEL);
6771  pop(cnt1);
6772
6773  // That's it
6774  bind(DONE_LABEL);
6775}
6776
6777// Compare char[] arrays aligned to 4 bytes or substrings.
6778void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
6779                                        Register limit, Register result, Register chr,
6780                                        XMMRegister vec1, XMMRegister vec2) {
6781  ShortBranchVerifier sbv(this);
6782  Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
6783
6784  int length_offset  = arrayOopDesc::length_offset_in_bytes();
6785  int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
6786
6787  // Check the input args
6788  cmpptr(ary1, ary2);
6789  jcc(Assembler::equal, TRUE_LABEL);
6790
6791  if (is_array_equ) {
6792    // Need additional checks for arrays_equals.
6793    testptr(ary1, ary1);
6794    jcc(Assembler::zero, FALSE_LABEL);
6795    testptr(ary2, ary2);
6796    jcc(Assembler::zero, FALSE_LABEL);
6797
6798    // Check the lengths
6799    movl(limit, Address(ary1, length_offset));
6800    cmpl(limit, Address(ary2, length_offset));
6801    jcc(Assembler::notEqual, FALSE_LABEL);
6802  }
6803
6804  // count == 0
6805  testl(limit, limit);
6806  jcc(Assembler::zero, TRUE_LABEL);
6807
6808  if (is_array_equ) {
6809    // Load array address
6810    lea(ary1, Address(ary1, base_offset));
6811    lea(ary2, Address(ary2, base_offset));
6812  }
6813
6814  shll(limit, 1);      // byte count != 0
6815  movl(result, limit); // copy
6816
6817  if (UseAVX >= 2) {
6818    // With AVX2, use 32-byte vector compare
6819    Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6820
6821    // Compare 32-byte vectors
6822    andl(result, 0x0000001e);  //   tail count (in bytes)
6823    andl(limit, 0xffffffe0);   // vector count (in bytes)
6824    jccb(Assembler::zero, COMPARE_TAIL);
6825
6826    lea(ary1, Address(ary1, limit, Address::times_1));
6827    lea(ary2, Address(ary2, limit, Address::times_1));
6828    negptr(limit);
6829
6830    bind(COMPARE_WIDE_VECTORS);
6831    vmovdqu(vec1, Address(ary1, limit, Address::times_1));
6832    vmovdqu(vec2, Address(ary2, limit, Address::times_1));
6833    vpxor(vec1, vec2);
6834
6835    vptest(vec1, vec1);
6836    jccb(Assembler::notZero, FALSE_LABEL);
6837    addptr(limit, 32);
6838    jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6839
6840    testl(result, result);
6841    jccb(Assembler::zero, TRUE_LABEL);
6842
6843    vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
6844    vmovdqu(vec2, Address(ary2, result, Address::times_1, -32));
6845    vpxor(vec1, vec2);
6846
6847    vptest(vec1, vec1);
6848    jccb(Assembler::notZero, FALSE_LABEL);
6849    jmpb(TRUE_LABEL);
6850
6851    bind(COMPARE_TAIL); // limit is zero
6852    movl(limit, result);
6853    // Fallthru to tail compare
6854  } else if (UseSSE42Intrinsics) {
6855    // With SSE4.2, use double quad vector compare
6856    Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6857
6858    // Compare 16-byte vectors
6859    andl(result, 0x0000000e);  //   tail count (in bytes)
6860    andl(limit, 0xfffffff0);   // vector count (in bytes)
6861    jccb(Assembler::zero, COMPARE_TAIL);
6862
6863    lea(ary1, Address(ary1, limit, Address::times_1));
6864    lea(ary2, Address(ary2, limit, Address::times_1));
6865    negptr(limit);
6866
6867    bind(COMPARE_WIDE_VECTORS);
6868    movdqu(vec1, Address(ary1, limit, Address::times_1));
6869    movdqu(vec2, Address(ary2, limit, Address::times_1));
6870    pxor(vec1, vec2);
6871
6872    ptest(vec1, vec1);
6873    jccb(Assembler::notZero, FALSE_LABEL);
6874    addptr(limit, 16);
6875    jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6876
6877    testl(result, result);
6878    jccb(Assembler::zero, TRUE_LABEL);
6879
6880    movdqu(vec1, Address(ary1, result, Address::times_1, -16));
6881    movdqu(vec2, Address(ary2, result, Address::times_1, -16));
6882    pxor(vec1, vec2);
6883
6884    ptest(vec1, vec1);
6885    jccb(Assembler::notZero, FALSE_LABEL);
6886    jmpb(TRUE_LABEL);
6887
6888    bind(COMPARE_TAIL); // limit is zero
6889    movl(limit, result);
6890    // Fallthru to tail compare
6891  }
6892
6893  // Compare 4-byte vectors
6894  andl(limit, 0xfffffffc); // vector count (in bytes)
6895  jccb(Assembler::zero, COMPARE_CHAR);
6896
6897  lea(ary1, Address(ary1, limit, Address::times_1));
6898  lea(ary2, Address(ary2, limit, Address::times_1));
6899  negptr(limit);
6900
6901  bind(COMPARE_VECTORS);
6902  movl(chr, Address(ary1, limit, Address::times_1));
6903  cmpl(chr, Address(ary2, limit, Address::times_1));
6904  jccb(Assembler::notEqual, FALSE_LABEL);
6905  addptr(limit, 4);
6906  jcc(Assembler::notZero, COMPARE_VECTORS);
6907
6908  // Compare trailing char (final 2 bytes), if any
6909  bind(COMPARE_CHAR);
6910  testl(result, 0x2);   // tail  char
6911  jccb(Assembler::zero, TRUE_LABEL);
6912  load_unsigned_short(chr, Address(ary1, 0));
6913  load_unsigned_short(limit, Address(ary2, 0));
6914  cmpl(chr, limit);
6915  jccb(Assembler::notEqual, FALSE_LABEL);
6916
6917  bind(TRUE_LABEL);
6918  movl(result, 1);   // return true
6919  jmpb(DONE);
6920
6921  bind(FALSE_LABEL);
6922  xorl(result, result); // return false
6923
6924  // That's it
6925  bind(DONE);
6926  if (UseAVX >= 2) {
6927    // clean upper bits of YMM registers
6928    vpxor(vec1, vec1);
6929    vpxor(vec2, vec2);
6930  }
6931}
6932
6933void MacroAssembler::generate_fill(BasicType t, bool aligned,
6934                                   Register to, Register value, Register count,
6935                                   Register rtmp, XMMRegister xtmp) {
6936  ShortBranchVerifier sbv(this);
6937  assert_different_registers(to, value, count, rtmp);
6938  Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
6939  Label L_fill_2_bytes, L_fill_4_bytes;
6940
6941  int shift = -1;
6942  switch (t) {
6943    case T_BYTE:
6944      shift = 2;
6945      break;
6946    case T_SHORT:
6947      shift = 1;
6948      break;
6949    case T_INT:
6950      shift = 0;
6951      break;
6952    default: ShouldNotReachHere();
6953  }
6954
6955  if (t == T_BYTE) {
6956    andl(value, 0xff);
6957    movl(rtmp, value);
6958    shll(rtmp, 8);
6959    orl(value, rtmp);
6960  }
6961  if (t == T_SHORT) {
6962    andl(value, 0xffff);
6963  }
6964  if (t == T_BYTE || t == T_SHORT) {
6965    movl(rtmp, value);
6966    shll(rtmp, 16);
6967    orl(value, rtmp);
6968  }
6969
6970  cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
6971  jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
6972  if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
6973    // align source address at 4 bytes address boundary
6974    if (t == T_BYTE) {
6975      // One byte misalignment happens only for byte arrays
6976      testptr(to, 1);
6977      jccb(Assembler::zero, L_skip_align1);
6978      movb(Address(to, 0), value);
6979      increment(to);
6980      decrement(count);
6981      BIND(L_skip_align1);
6982    }
6983    // Two bytes misalignment happens only for byte and short (char) arrays
6984    testptr(to, 2);
6985    jccb(Assembler::zero, L_skip_align2);
6986    movw(Address(to, 0), value);
6987    addptr(to, 2);
6988    subl(count, 1<<(shift-1));
6989    BIND(L_skip_align2);
6990  }
6991  if (UseSSE < 2) {
6992    Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
6993    // Fill 32-byte chunks
6994    subl(count, 8 << shift);
6995    jcc(Assembler::less, L_check_fill_8_bytes);
6996    align(16);
6997
6998    BIND(L_fill_32_bytes_loop);
6999
7000    for (int i = 0; i < 32; i += 4) {
7001      movl(Address(to, i), value);
7002    }
7003
7004    addptr(to, 32);
7005    subl(count, 8 << shift);
7006    jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7007    BIND(L_check_fill_8_bytes);
7008    addl(count, 8 << shift);
7009    jccb(Assembler::zero, L_exit);
7010    jmpb(L_fill_8_bytes);
7011
7012    //
7013    // length is too short, just fill qwords
7014    //
7015    BIND(L_fill_8_bytes_loop);
7016    movl(Address(to, 0), value);
7017    movl(Address(to, 4), value);
7018    addptr(to, 8);
7019    BIND(L_fill_8_bytes);
7020    subl(count, 1 << (shift + 1));
7021    jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7022    // fall through to fill 4 bytes
7023  } else {
7024    Label L_fill_32_bytes;
7025    if (!UseUnalignedLoadStores) {
7026      // align to 8 bytes, we know we are 4 byte aligned to start
7027      testptr(to, 4);
7028      jccb(Assembler::zero, L_fill_32_bytes);
7029      movl(Address(to, 0), value);
7030      addptr(to, 4);
7031      subl(count, 1<<shift);
7032    }
7033    BIND(L_fill_32_bytes);
7034    {
7035      assert( UseSSE >= 2, "supported cpu only" );
7036      Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7037      if (UseAVX > 2) {
7038        movl(rtmp, 0xffff);
7039#ifdef _LP64
7040        kmovql(k1, rtmp);
7041#else
7042        kmovdl(k1, rtmp);
7043#endif
7044      }
7045      movdl(xtmp, value);
7046      if (UseAVX > 2 && UseUnalignedLoadStores) {
7047        // Fill 64-byte chunks
7048        Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
7049        evpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
7050
7051        subl(count, 16 << shift);
7052        jcc(Assembler::less, L_check_fill_32_bytes);
7053        align(16);
7054
7055        BIND(L_fill_64_bytes_loop);
7056        evmovdqu(Address(to, 0), xtmp, Assembler::AVX_512bit);
7057        addptr(to, 64);
7058        subl(count, 16 << shift);
7059        jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7060
7061        BIND(L_check_fill_32_bytes);
7062        addl(count, 8 << shift);
7063        jccb(Assembler::less, L_check_fill_8_bytes);
7064        evmovdqu(Address(to, 0), xtmp, Assembler::AVX_256bit);
7065        addptr(to, 32);
7066        subl(count, 8 << shift);
7067
7068        BIND(L_check_fill_8_bytes);
7069      } else if (UseAVX == 2 && UseUnalignedLoadStores) {
7070        // Fill 64-byte chunks
7071        Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
7072        vpbroadcastd(xtmp, xtmp);
7073
7074        subl(count, 16 << shift);
7075        jcc(Assembler::less, L_check_fill_32_bytes);
7076        align(16);
7077
7078        BIND(L_fill_64_bytes_loop);
7079        vmovdqu(Address(to, 0), xtmp);
7080        vmovdqu(Address(to, 32), xtmp);
7081        addptr(to, 64);
7082        subl(count, 16 << shift);
7083        jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7084
7085        BIND(L_check_fill_32_bytes);
7086        addl(count, 8 << shift);
7087        jccb(Assembler::less, L_check_fill_8_bytes);
7088        vmovdqu(Address(to, 0), xtmp);
7089        addptr(to, 32);
7090        subl(count, 8 << shift);
7091
7092        BIND(L_check_fill_8_bytes);
7093        // clean upper bits of YMM registers
7094        movdl(xtmp, value);
7095        pshufd(xtmp, xtmp, 0);
7096      } else {
7097        // Fill 32-byte chunks
7098        pshufd(xtmp, xtmp, 0);
7099
7100        subl(count, 8 << shift);
7101        jcc(Assembler::less, L_check_fill_8_bytes);
7102        align(16);
7103
7104        BIND(L_fill_32_bytes_loop);
7105
7106        if (UseUnalignedLoadStores) {
7107          movdqu(Address(to, 0), xtmp);
7108          movdqu(Address(to, 16), xtmp);
7109        } else {
7110          movq(Address(to, 0), xtmp);
7111          movq(Address(to, 8), xtmp);
7112          movq(Address(to, 16), xtmp);
7113          movq(Address(to, 24), xtmp);
7114        }
7115
7116        addptr(to, 32);
7117        subl(count, 8 << shift);
7118        jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7119
7120        BIND(L_check_fill_8_bytes);
7121      }
7122      addl(count, 8 << shift);
7123      jccb(Assembler::zero, L_exit);
7124      jmpb(L_fill_8_bytes);
7125
7126      //
7127      // length is too short, just fill qwords
7128      //
7129      BIND(L_fill_8_bytes_loop);
7130      movq(Address(to, 0), xtmp);
7131      addptr(to, 8);
7132      BIND(L_fill_8_bytes);
7133      subl(count, 1 << (shift + 1));
7134      jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7135    }
7136  }
7137  // fill trailing 4 bytes
7138  BIND(L_fill_4_bytes);
7139  testl(count, 1<<shift);
7140  jccb(Assembler::zero, L_fill_2_bytes);
7141  movl(Address(to, 0), value);
7142  if (t == T_BYTE || t == T_SHORT) {
7143    addptr(to, 4);
7144    BIND(L_fill_2_bytes);
7145    // fill trailing 2 bytes
7146    testl(count, 1<<(shift-1));
7147    jccb(Assembler::zero, L_fill_byte);
7148    movw(Address(to, 0), value);
7149    if (t == T_BYTE) {
7150      addptr(to, 2);
7151      BIND(L_fill_byte);
7152      // fill trailing byte
7153      testl(count, 1);
7154      jccb(Assembler::zero, L_exit);
7155      movb(Address(to, 0), value);
7156    } else {
7157      BIND(L_fill_byte);
7158    }
7159  } else {
7160    BIND(L_fill_2_bytes);
7161  }
7162  BIND(L_exit);
7163}
7164
7165// encode char[] to byte[] in ISO_8859_1
7166void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7167                                      XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7168                                      XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7169                                      Register tmp5, Register result) {
7170  // rsi: src
7171  // rdi: dst
7172  // rdx: len
7173  // rcx: tmp5
7174  // rax: result
7175  ShortBranchVerifier sbv(this);
7176  assert_different_registers(src, dst, len, tmp5, result);
7177  Label L_done, L_copy_1_char, L_copy_1_char_exit;
7178
7179  // set result
7180  xorl(result, result);
7181  // check for zero length
7182  testl(len, len);
7183  jcc(Assembler::zero, L_done);
7184  movl(result, len);
7185
7186  // Setup pointers
7187  lea(src, Address(src, len, Address::times_2)); // char[]
7188  lea(dst, Address(dst, len, Address::times_1)); // byte[]
7189  negptr(len);
7190
7191  if (UseSSE42Intrinsics || UseAVX >= 2) {
7192    Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
7193    Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7194
7195    if (UseAVX >= 2) {
7196      Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7197      movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vector
7198      movdl(tmp1Reg, tmp5);
7199      vpbroadcastd(tmp1Reg, tmp1Reg);
7200      jmpb(L_chars_32_check);
7201
7202      bind(L_copy_32_chars);
7203      vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7204      vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7205      vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7206      vptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in  vector
7207      jccb(Assembler::notZero, L_copy_32_chars_exit);
7208      vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7209      vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
7210      vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7211
7212      bind(L_chars_32_check);
7213      addptr(len, 32);
7214      jccb(Assembler::lessEqual, L_copy_32_chars);
7215
7216      bind(L_copy_32_chars_exit);
7217      subptr(len, 16);
7218      jccb(Assembler::greater, L_copy_16_chars_exit);
7219
7220    } else if (UseSSE42Intrinsics) {
7221      movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vector
7222      movdl(tmp1Reg, tmp5);
7223      pshufd(tmp1Reg, tmp1Reg, 0);
7224      jmpb(L_chars_16_check);
7225    }
7226
7227    bind(L_copy_16_chars);
7228    if (UseAVX >= 2) {
7229      vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7230      vptest(tmp2Reg, tmp1Reg);
7231      jccb(Assembler::notZero, L_copy_16_chars_exit);
7232      vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
7233      vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
7234    } else {
7235      if (UseAVX > 0) {
7236        movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7237        movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7238        vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
7239      } else {
7240        movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7241        por(tmp2Reg, tmp3Reg);
7242        movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7243        por(tmp2Reg, tmp4Reg);
7244      }
7245      ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in  vector
7246      jccb(Assembler::notZero, L_copy_16_chars_exit);
7247      packuswb(tmp3Reg, tmp4Reg);
7248    }
7249    movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7250
7251    bind(L_chars_16_check);
7252    addptr(len, 16);
7253    jccb(Assembler::lessEqual, L_copy_16_chars);
7254
7255    bind(L_copy_16_chars_exit);
7256    if (UseAVX >= 2) {
7257      // clean upper bits of YMM registers
7258      vpxor(tmp2Reg, tmp2Reg);
7259      vpxor(tmp3Reg, tmp3Reg);
7260      vpxor(tmp4Reg, tmp4Reg);
7261      movdl(tmp1Reg, tmp5);
7262      pshufd(tmp1Reg, tmp1Reg, 0);
7263    }
7264    subptr(len, 8);
7265    jccb(Assembler::greater, L_copy_8_chars_exit);
7266
7267    bind(L_copy_8_chars);
7268    movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7269    ptest(tmp3Reg, tmp1Reg);
7270    jccb(Assembler::notZero, L_copy_8_chars_exit);
7271    packuswb(tmp3Reg, tmp1Reg);
7272    movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7273    addptr(len, 8);
7274    jccb(Assembler::lessEqual, L_copy_8_chars);
7275
7276    bind(L_copy_8_chars_exit);
7277    subptr(len, 8);
7278    jccb(Assembler::zero, L_done);
7279  }
7280
7281  bind(L_copy_1_char);
7282  load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7283  testl(tmp5, 0xff00);      // check if Unicode char
7284  jccb(Assembler::notZero, L_copy_1_char_exit);
7285  movb(Address(dst, len, Address::times_1, 0), tmp5);
7286  addptr(len, 1);
7287  jccb(Assembler::less, L_copy_1_char);
7288
7289  bind(L_copy_1_char_exit);
7290  addptr(result, len); // len is negative count of not processed elements
7291  bind(L_done);
7292}
7293
7294#ifdef _LP64
7295/**
7296 * Helper for multiply_to_len().
7297 */
7298void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7299  addq(dest_lo, src1);
7300  adcq(dest_hi, 0);
7301  addq(dest_lo, src2);
7302  adcq(dest_hi, 0);
7303}
7304
7305/**
7306 * Multiply 64 bit by 64 bit first loop.
7307 */
7308void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7309                                           Register y, Register y_idx, Register z,
7310                                           Register carry, Register product,
7311                                           Register idx, Register kdx) {
7312  //
7313  //  jlong carry, x[], y[], z[];
7314  //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7315  //    huge_128 product = y[idx] * x[xstart] + carry;
7316  //    z[kdx] = (jlong)product;
7317  //    carry  = (jlong)(product >>> 64);
7318  //  }
7319  //  z[xstart] = carry;
7320  //
7321
7322  Label L_first_loop, L_first_loop_exit;
7323  Label L_one_x, L_one_y, L_multiply;
7324
7325  decrementl(xstart);
7326  jcc(Assembler::negative, L_one_x);
7327
7328  movq(x_xstart, Address(x, xstart, Address::times_4,  0));
7329  rorq(x_xstart, 32); // convert big-endian to little-endian
7330
7331  bind(L_first_loop);
7332  decrementl(idx);
7333  jcc(Assembler::negative, L_first_loop_exit);
7334  decrementl(idx);
7335  jcc(Assembler::negative, L_one_y);
7336  movq(y_idx, Address(y, idx, Address::times_4,  0));
7337  rorq(y_idx, 32); // convert big-endian to little-endian
7338  bind(L_multiply);
7339  movq(product, x_xstart);
7340  mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7341  addq(product, carry);
7342  adcq(rdx, 0);
7343  subl(kdx, 2);
7344  movl(Address(z, kdx, Address::times_4,  4), product);
7345  shrq(product, 32);
7346  movl(Address(z, kdx, Address::times_4,  0), product);
7347  movq(carry, rdx);
7348  jmp(L_first_loop);
7349
7350  bind(L_one_y);
7351  movl(y_idx, Address(y,  0));
7352  jmp(L_multiply);
7353
7354  bind(L_one_x);
7355  movl(x_xstart, Address(x,  0));
7356  jmp(L_first_loop);
7357
7358  bind(L_first_loop_exit);
7359}
7360
7361/**
7362 * Multiply 64 bit by 64 bit and add 128 bit.
7363 */
7364void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7365                                            Register yz_idx, Register idx,
7366                                            Register carry, Register product, int offset) {
7367  //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7368  //     z[kdx] = (jlong)product;
7369
7370  movq(yz_idx, Address(y, idx, Address::times_4,  offset));
7371  rorq(yz_idx, 32); // convert big-endian to little-endian
7372  movq(product, x_xstart);
7373  mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
7374  movq(yz_idx, Address(z, idx, Address::times_4,  offset));
7375  rorq(yz_idx, 32); // convert big-endian to little-endian
7376
7377  add2_with_carry(rdx, product, carry, yz_idx);
7378
7379  movl(Address(z, idx, Address::times_4,  offset+4), product);
7380  shrq(product, 32);
7381  movl(Address(z, idx, Address::times_4,  offset), product);
7382
7383}
7384
7385/**
7386 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7387 */
7388void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7389                                             Register yz_idx, Register idx, Register jdx,
7390                                             Register carry, Register product,
7391                                             Register carry2) {
7392  //   jlong carry, x[], y[], z[];
7393  //   int kdx = ystart+1;
7394  //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7395  //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7396  //     z[kdx+idx+1] = (jlong)product;
7397  //     jlong carry2  = (jlong)(product >>> 64);
7398  //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7399  //     z[kdx+idx] = (jlong)product;
7400  //     carry  = (jlong)(product >>> 64);
7401  //   }
7402  //   idx += 2;
7403  //   if (idx > 0) {
7404  //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7405  //     z[kdx+idx] = (jlong)product;
7406  //     carry  = (jlong)(product >>> 64);
7407  //   }
7408  //
7409
7410  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7411
7412  movl(jdx, idx);
7413  andl(jdx, 0xFFFFFFFC);
7414  shrl(jdx, 2);
7415
7416  bind(L_third_loop);
7417  subl(jdx, 1);
7418  jcc(Assembler::negative, L_third_loop_exit);
7419  subl(idx, 4);
7420
7421  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7422  movq(carry2, rdx);
7423
7424  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7425  movq(carry, rdx);
7426  jmp(L_third_loop);
7427
7428  bind (L_third_loop_exit);
7429
7430  andl (idx, 0x3);
7431  jcc(Assembler::zero, L_post_third_loop_done);
7432
7433  Label L_check_1;
7434  subl(idx, 2);
7435  jcc(Assembler::negative, L_check_1);
7436
7437  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7438  movq(carry, rdx);
7439
7440  bind (L_check_1);
7441  addl (idx, 0x2);
7442  andl (idx, 0x1);
7443  subl(idx, 1);
7444  jcc(Assembler::negative, L_post_third_loop_done);
7445
7446  movl(yz_idx, Address(y, idx, Address::times_4,  0));
7447  movq(product, x_xstart);
7448  mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7449  movl(yz_idx, Address(z, idx, Address::times_4,  0));
7450
7451  add2_with_carry(rdx, product, yz_idx, carry);
7452
7453  movl(Address(z, idx, Address::times_4,  0), product);
7454  shrq(product, 32);
7455
7456  shlq(rdx, 32);
7457  orq(product, rdx);
7458  movq(carry, product);
7459
7460  bind(L_post_third_loop_done);
7461}
7462
7463/**
7464 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7465 *
7466 */
7467void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7468                                                  Register carry, Register carry2,
7469                                                  Register idx, Register jdx,
7470                                                  Register yz_idx1, Register yz_idx2,
7471                                                  Register tmp, Register tmp3, Register tmp4) {
7472  assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7473
7474  //   jlong carry, x[], y[], z[];
7475  //   int kdx = ystart+1;
7476  //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7477  //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7478  //     jlong carry2  = (jlong)(tmp3 >>> 64);
7479  //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
7480  //     carry  = (jlong)(tmp4 >>> 64);
7481  //     z[kdx+idx+1] = (jlong)tmp3;
7482  //     z[kdx+idx] = (jlong)tmp4;
7483  //   }
7484  //   idx += 2;
7485  //   if (idx > 0) {
7486  //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7487  //     z[kdx+idx] = (jlong)yz_idx1;
7488  //     carry  = (jlong)(yz_idx1 >>> 64);
7489  //   }
7490  //
7491
7492  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7493
7494  movl(jdx, idx);
7495  andl(jdx, 0xFFFFFFFC);
7496  shrl(jdx, 2);
7497
7498  bind(L_third_loop);
7499  subl(jdx, 1);
7500  jcc(Assembler::negative, L_third_loop_exit);
7501  subl(idx, 4);
7502
7503  movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
7504  rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7505  movq(yz_idx2, Address(y, idx, Address::times_4,  0));
7506  rorxq(yz_idx2, yz_idx2, 32);
7507
7508  mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
7509  mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
7510
7511  movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
7512  rorxq(yz_idx1, yz_idx1, 32);
7513  movq(yz_idx2, Address(z, idx, Address::times_4,  0));
7514  rorxq(yz_idx2, yz_idx2, 32);
7515
7516  if (VM_Version::supports_adx()) {
7517    adcxq(tmp3, carry);
7518    adoxq(tmp3, yz_idx1);
7519
7520    adcxq(tmp4, tmp);
7521    adoxq(tmp4, yz_idx2);
7522
7523    movl(carry, 0); // does not affect flags
7524    adcxq(carry2, carry);
7525    adoxq(carry2, carry);
7526  } else {
7527    add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7528    add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7529  }
7530  movq(carry, carry2);
7531
7532  movl(Address(z, idx, Address::times_4, 12), tmp3);
7533  shrq(tmp3, 32);
7534  movl(Address(z, idx, Address::times_4,  8), tmp3);
7535
7536  movl(Address(z, idx, Address::times_4,  4), tmp4);
7537  shrq(tmp4, 32);
7538  movl(Address(z, idx, Address::times_4,  0), tmp4);
7539
7540  jmp(L_third_loop);
7541
7542  bind (L_third_loop_exit);
7543
7544  andl (idx, 0x3);
7545  jcc(Assembler::zero, L_post_third_loop_done);
7546
7547  Label L_check_1;
7548  subl(idx, 2);
7549  jcc(Assembler::negative, L_check_1);
7550
7551  movq(yz_idx1, Address(y, idx, Address::times_4,  0));
7552  rorxq(yz_idx1, yz_idx1, 32);
7553  mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
7554  movq(yz_idx2, Address(z, idx, Address::times_4,  0));
7555  rorxq(yz_idx2, yz_idx2, 32);
7556
7557  add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7558
7559  movl(Address(z, idx, Address::times_4,  4), tmp3);
7560  shrq(tmp3, 32);
7561  movl(Address(z, idx, Address::times_4,  0), tmp3);
7562  movq(carry, tmp4);
7563
7564  bind (L_check_1);
7565  addl (idx, 0x2);
7566  andl (idx, 0x1);
7567  subl(idx, 1);
7568  jcc(Assembler::negative, L_post_third_loop_done);
7569  movl(tmp4, Address(y, idx, Address::times_4,  0));
7570  mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
7571  movl(tmp4, Address(z, idx, Address::times_4,  0));
7572
7573  add2_with_carry(carry2, tmp3, tmp4, carry);
7574
7575  movl(Address(z, idx, Address::times_4,  0), tmp3);
7576  shrq(tmp3, 32);
7577
7578  shlq(carry2, 32);
7579  orq(tmp3, carry2);
7580  movq(carry, tmp3);
7581
7582  bind(L_post_third_loop_done);
7583}
7584
7585/**
7586 * Code for BigInteger::multiplyToLen() instrinsic.
7587 *
7588 * rdi: x
7589 * rax: xlen
7590 * rsi: y
7591 * rcx: ylen
7592 * r8:  z
7593 * r11: zlen
7594 * r12: tmp1
7595 * r13: tmp2
7596 * r14: tmp3
7597 * r15: tmp4
7598 * rbx: tmp5
7599 *
7600 */
7601void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
7602                                     Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7603  ShortBranchVerifier sbv(this);
7604  assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7605
7606  push(tmp1);
7607  push(tmp2);
7608  push(tmp3);
7609  push(tmp4);
7610  push(tmp5);
7611
7612  push(xlen);
7613  push(zlen);
7614
7615  const Register idx = tmp1;
7616  const Register kdx = tmp2;
7617  const Register xstart = tmp3;
7618
7619  const Register y_idx = tmp4;
7620  const Register carry = tmp5;
7621  const Register product  = xlen;
7622  const Register x_xstart = zlen;  // reuse register
7623
7624  // First Loop.
7625  //
7626  //  final static long LONG_MASK = 0xffffffffL;
7627  //  int xstart = xlen - 1;
7628  //  int ystart = ylen - 1;
7629  //  long carry = 0;
7630  //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7631  //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7632  //    z[kdx] = (int)product;
7633  //    carry = product >>> 32;
7634  //  }
7635  //  z[xstart] = (int)carry;
7636  //
7637
7638  movl(idx, ylen);      // idx = ylen;
7639  movl(kdx, zlen);      // kdx = xlen+ylen;
7640  xorq(carry, carry);   // carry = 0;
7641
7642  Label L_done;
7643
7644  movl(xstart, xlen);
7645  decrementl(xstart);
7646  jcc(Assembler::negative, L_done);
7647
7648  multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7649
7650  Label L_second_loop;
7651  testl(kdx, kdx);
7652  jcc(Assembler::zero, L_second_loop);
7653
7654  Label L_carry;
7655  subl(kdx, 1);
7656  jcc(Assembler::zero, L_carry);
7657
7658  movl(Address(z, kdx, Address::times_4,  0), carry);
7659  shrq(carry, 32);
7660  subl(kdx, 1);
7661
7662  bind(L_carry);
7663  movl(Address(z, kdx, Address::times_4,  0), carry);
7664
7665  // Second and third (nested) loops.
7666  //
7667  // for (int i = xstart-1; i >= 0; i--) { // Second loop
7668  //   carry = 0;
7669  //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7670  //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7671  //                    (z[k] & LONG_MASK) + carry;
7672  //     z[k] = (int)product;
7673  //     carry = product >>> 32;
7674  //   }
7675  //   z[i] = (int)carry;
7676  // }
7677  //
7678  // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7679
7680  const Register jdx = tmp1;
7681
7682  bind(L_second_loop);
7683  xorl(carry, carry);    // carry = 0;
7684  movl(jdx, ylen);       // j = ystart+1
7685
7686  subl(xstart, 1);       // i = xstart-1;
7687  jcc(Assembler::negative, L_done);
7688
7689  push (z);
7690
7691  Label L_last_x;
7692  lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7693  subl(xstart, 1);       // i = xstart-1;
7694  jcc(Assembler::negative, L_last_x);
7695
7696  if (UseBMI2Instructions) {
7697    movq(rdx,  Address(x, xstart, Address::times_4,  0));
7698    rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7699  } else {
7700    movq(x_xstart, Address(x, xstart, Address::times_4,  0));
7701    rorq(x_xstart, 32);  // convert big-endian to little-endian
7702  }
7703
7704  Label L_third_loop_prologue;
7705  bind(L_third_loop_prologue);
7706
7707  push (x);
7708  push (xstart);
7709  push (ylen);
7710
7711
7712  if (UseBMI2Instructions) {
7713    multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7714  } else { // !UseBMI2Instructions
7715    multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7716  }
7717
7718  pop(ylen);
7719  pop(xlen);
7720  pop(x);
7721  pop(z);
7722
7723  movl(tmp3, xlen);
7724  addl(tmp3, 1);
7725  movl(Address(z, tmp3, Address::times_4,  0), carry);
7726  subl(tmp3, 1);
7727  jccb(Assembler::negative, L_done);
7728
7729  shrq(carry, 32);
7730  movl(Address(z, tmp3, Address::times_4,  0), carry);
7731  jmp(L_second_loop);
7732
7733  // Next infrequent code is moved outside loops.
7734  bind(L_last_x);
7735  if (UseBMI2Instructions) {
7736    movl(rdx, Address(x,  0));
7737  } else {
7738    movl(x_xstart, Address(x,  0));
7739  }
7740  jmp(L_third_loop_prologue);
7741
7742  bind(L_done);
7743
7744  pop(zlen);
7745  pop(xlen);
7746
7747  pop(tmp5);
7748  pop(tmp4);
7749  pop(tmp3);
7750  pop(tmp2);
7751  pop(tmp1);
7752}
7753#endif
7754
7755/**
7756 * Emits code to update CRC-32 with a byte value according to constants in table
7757 *
7758 * @param [in,out]crc   Register containing the crc.
7759 * @param [in]val       Register containing the byte to fold into the CRC.
7760 * @param [in]table     Register containing the table of crc constants.
7761 *
7762 * uint32_t crc;
7763 * val = crc_table[(val ^ crc) & 0xFF];
7764 * crc = val ^ (crc >> 8);
7765 *
7766 */
7767void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
7768  xorl(val, crc);
7769  andl(val, 0xFF);
7770  shrl(crc, 8); // unsigned shift
7771  xorl(crc, Address(table, val, Address::times_4, 0));
7772}
7773
7774/**
7775 * Fold 128-bit data chunk
7776 */
7777void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
7778  if (UseAVX > 0) {
7779    vpclmulhdq(xtmp, xK, xcrc); // [123:64]
7780    vpclmulldq(xcrc, xK, xcrc); // [63:0]
7781    vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
7782    pxor(xcrc, xtmp);
7783  } else {
7784    movdqa(xtmp, xcrc);
7785    pclmulhdq(xtmp, xK);   // [123:64]
7786    pclmulldq(xcrc, xK);   // [63:0]
7787    pxor(xcrc, xtmp);
7788    movdqu(xtmp, Address(buf, offset));
7789    pxor(xcrc, xtmp);
7790  }
7791}
7792
7793void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
7794  if (UseAVX > 0) {
7795    vpclmulhdq(xtmp, xK, xcrc);
7796    vpclmulldq(xcrc, xK, xcrc);
7797    pxor(xcrc, xbuf);
7798    pxor(xcrc, xtmp);
7799  } else {
7800    movdqa(xtmp, xcrc);
7801    pclmulhdq(xtmp, xK);
7802    pclmulldq(xcrc, xK);
7803    pxor(xcrc, xbuf);
7804    pxor(xcrc, xtmp);
7805  }
7806}
7807
7808/**
7809 * 8-bit folds to compute 32-bit CRC
7810 *
7811 * uint64_t xcrc;
7812 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
7813 */
7814void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
7815  movdl(tmp, xcrc);
7816  andl(tmp, 0xFF);
7817  movdl(xtmp, Address(table, tmp, Address::times_4, 0));
7818  psrldq(xcrc, 1); // unsigned shift one byte
7819  pxor(xcrc, xtmp);
7820}
7821
7822/**
7823 * uint32_t crc;
7824 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
7825 */
7826void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
7827  movl(tmp, crc);
7828  andl(tmp, 0xFF);
7829  shrl(crc, 8);
7830  xorl(crc, Address(table, tmp, Address::times_4, 0));
7831}
7832
7833/**
7834 * @param crc   register containing existing CRC (32-bit)
7835 * @param buf   register pointing to input byte buffer (byte*)
7836 * @param len   register containing number of bytes
7837 * @param table register that will contain address of CRC table
7838 * @param tmp   scratch register
7839 */
7840void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
7841  assert_different_registers(crc, buf, len, table, tmp, rax);
7842
7843  Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
7844  Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
7845
7846  lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
7847  notl(crc); // ~crc
7848  cmpl(len, 16);
7849  jcc(Assembler::less, L_tail);
7850
7851  // Align buffer to 16 bytes
7852  movl(tmp, buf);
7853  andl(tmp, 0xF);
7854  jccb(Assembler::zero, L_aligned);
7855  subl(tmp,  16);
7856  addl(len, tmp);
7857
7858  align(4);
7859  BIND(L_align_loop);
7860  movsbl(rax, Address(buf, 0)); // load byte with sign extension
7861  update_byte_crc32(crc, rax, table);
7862  increment(buf);
7863  incrementl(tmp);
7864  jccb(Assembler::less, L_align_loop);
7865
7866  BIND(L_aligned);
7867  movl(tmp, len); // save
7868  shrl(len, 4);
7869  jcc(Assembler::zero, L_tail_restore);
7870
7871  // Fold crc into first bytes of vector
7872  movdqa(xmm1, Address(buf, 0));
7873  movdl(rax, xmm1);
7874  xorl(crc, rax);
7875  pinsrd(xmm1, crc, 0);
7876  addptr(buf, 16);
7877  subl(len, 4); // len > 0
7878  jcc(Assembler::less, L_fold_tail);
7879
7880  movdqa(xmm2, Address(buf,  0));
7881  movdqa(xmm3, Address(buf, 16));
7882  movdqa(xmm4, Address(buf, 32));
7883  addptr(buf, 48);
7884  subl(len, 3);
7885  jcc(Assembler::lessEqual, L_fold_512b);
7886
7887  // Fold total 512 bits of polynomial on each iteration,
7888  // 128 bits per each of 4 parallel streams.
7889  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
7890
7891  align(32);
7892  BIND(L_fold_512b_loop);
7893  fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
7894  fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
7895  fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
7896  fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
7897  addptr(buf, 64);
7898  subl(len, 4);
7899  jcc(Assembler::greater, L_fold_512b_loop);
7900
7901  // Fold 512 bits to 128 bits.
7902  BIND(L_fold_512b);
7903  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
7904  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
7905  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
7906  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
7907
7908  // Fold the rest of 128 bits data chunks
7909  BIND(L_fold_tail);
7910  addl(len, 3);
7911  jccb(Assembler::lessEqual, L_fold_128b);
7912  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
7913
7914  BIND(L_fold_tail_loop);
7915  fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
7916  addptr(buf, 16);
7917  decrementl(len);
7918  jccb(Assembler::greater, L_fold_tail_loop);
7919
7920  // Fold 128 bits in xmm1 down into 32 bits in crc register.
7921  BIND(L_fold_128b);
7922  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
7923  if (UseAVX > 0) {
7924    vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
7925    vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
7926    vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
7927  } else {
7928    movdqa(xmm2, xmm0);
7929    pclmulqdq(xmm2, xmm1, 0x1);
7930    movdqa(xmm3, xmm0);
7931    pand(xmm3, xmm2);
7932    pclmulqdq(xmm0, xmm3, 0x1);
7933  }
7934  psrldq(xmm1, 8);
7935  psrldq(xmm2, 4);
7936  pxor(xmm0, xmm1);
7937  pxor(xmm0, xmm2);
7938
7939  // 8 8-bit folds to compute 32-bit CRC.
7940  for (int j = 0; j < 4; j++) {
7941    fold_8bit_crc32(xmm0, table, xmm1, rax);
7942  }
7943  movdl(crc, xmm0); // mov 32 bits to general register
7944  for (int j = 0; j < 4; j++) {
7945    fold_8bit_crc32(crc, table, rax);
7946  }
7947
7948  BIND(L_tail_restore);
7949  movl(len, tmp); // restore
7950  BIND(L_tail);
7951  andl(len, 0xf);
7952  jccb(Assembler::zero, L_exit);
7953
7954  // Fold the rest of bytes
7955  align(4);
7956  BIND(L_tail_loop);
7957  movsbl(rax, Address(buf, 0)); // load byte with sign extension
7958  update_byte_crc32(crc, rax, table);
7959  increment(buf);
7960  decrementl(len);
7961  jccb(Assembler::greater, L_tail_loop);
7962
7963  BIND(L_exit);
7964  notl(crc); // ~c
7965}
7966
7967#undef BIND
7968#undef BLOCK_COMMENT
7969
7970
7971Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
7972  switch (cond) {
7973    // Note some conditions are synonyms for others
7974    case Assembler::zero:         return Assembler::notZero;
7975    case Assembler::notZero:      return Assembler::zero;
7976    case Assembler::less:         return Assembler::greaterEqual;
7977    case Assembler::lessEqual:    return Assembler::greater;
7978    case Assembler::greater:      return Assembler::lessEqual;
7979    case Assembler::greaterEqual: return Assembler::less;
7980    case Assembler::below:        return Assembler::aboveEqual;
7981    case Assembler::belowEqual:   return Assembler::above;
7982    case Assembler::above:        return Assembler::belowEqual;
7983    case Assembler::aboveEqual:   return Assembler::below;
7984    case Assembler::overflow:     return Assembler::noOverflow;
7985    case Assembler::noOverflow:   return Assembler::overflow;
7986    case Assembler::negative:     return Assembler::positive;
7987    case Assembler::positive:     return Assembler::negative;
7988    case Assembler::parity:       return Assembler::noParity;
7989    case Assembler::noParity:     return Assembler::parity;
7990  }
7991  ShouldNotReachHere(); return Assembler::overflow;
7992}
7993
7994SkipIfEqual::SkipIfEqual(
7995    MacroAssembler* masm, const bool* flag_addr, bool value) {
7996  _masm = masm;
7997  _masm->cmp8(ExternalAddress((address)flag_addr), value);
7998  _masm->jcc(Assembler::equal, _label);
7999}
8000
8001SkipIfEqual::~SkipIfEqual() {
8002  _masm->bind(_label);
8003}
8004