macroAssembler_x86.cpp revision 9149:a8a8604f890f
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/assembler.hpp"
27#include "asm/assembler.inline.hpp"
28#include "compiler/disassembler.hpp"
29#include "gc/shared/cardTableModRefBS.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "interpreter/interpreter.hpp"
32#include "memory/resourceArea.hpp"
33#include "memory/universe.hpp"
34#include "oops/klass.inline.hpp"
35#include "prims/methodHandles.hpp"
36#include "runtime/biasedLocking.hpp"
37#include "runtime/interfaceSupport.hpp"
38#include "runtime/objectMonitor.hpp"
39#include "runtime/os.hpp"
40#include "runtime/sharedRuntime.hpp"
41#include "runtime/stubRoutines.hpp"
42#include "utilities/macros.hpp"
43#if INCLUDE_ALL_GCS
44#include "gc/g1/g1CollectedHeap.inline.hpp"
45#include "gc/g1/g1SATBCardTableModRefBS.hpp"
46#include "gc/g1/heapRegion.hpp"
47#endif // INCLUDE_ALL_GCS
48#include "crc32c.h"
49
50#ifdef PRODUCT
51#define BLOCK_COMMENT(str) /* nothing */
52#define STOP(error) stop(error)
53#else
54#define BLOCK_COMMENT(str) block_comment(str)
55#define STOP(error) block_comment(error); stop(error)
56#endif
57
58#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
59
60#ifdef ASSERT
61bool AbstractAssembler::pd_check_instruction_mark() { return true; }
62#endif
63
64static Assembler::Condition reverse[] = {
65    Assembler::noOverflow     /* overflow      = 0x0 */ ,
66    Assembler::overflow       /* noOverflow    = 0x1 */ ,
67    Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
68    Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
69    Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
70    Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
71    Assembler::above          /* belowEqual    = 0x6 */ ,
72    Assembler::belowEqual     /* above         = 0x7 */ ,
73    Assembler::positive       /* negative      = 0x8 */ ,
74    Assembler::negative       /* positive      = 0x9 */ ,
75    Assembler::noParity       /* parity        = 0xa */ ,
76    Assembler::parity         /* noParity      = 0xb */ ,
77    Assembler::greaterEqual   /* less          = 0xc */ ,
78    Assembler::less           /* greaterEqual  = 0xd */ ,
79    Assembler::greater        /* lessEqual     = 0xe */ ,
80    Assembler::lessEqual      /* greater       = 0xf, */
81
82};
83
84
85// Implementation of MacroAssembler
86
87// First all the versions that have distinct versions depending on 32/64 bit
88// Unless the difference is trivial (1 line or so).
89
90#ifndef _LP64
91
92// 32bit versions
93
94Address MacroAssembler::as_Address(AddressLiteral adr) {
95  return Address(adr.target(), adr.rspec());
96}
97
98Address MacroAssembler::as_Address(ArrayAddress adr) {
99  return Address::make_array(adr);
100}
101
102void MacroAssembler::call_VM_leaf_base(address entry_point,
103                                       int number_of_arguments) {
104  call(RuntimeAddress(entry_point));
105  increment(rsp, number_of_arguments * wordSize);
106}
107
108void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
109  cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
110}
111
112void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
113  cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
114}
115
116void MacroAssembler::cmpoop(Address src1, jobject obj) {
117  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
118}
119
120void MacroAssembler::cmpoop(Register src1, jobject obj) {
121  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
122}
123
124void MacroAssembler::extend_sign(Register hi, Register lo) {
125  // According to Intel Doc. AP-526, "Integer Divide", p.18.
126  if (VM_Version::is_P6() && hi == rdx && lo == rax) {
127    cdql();
128  } else {
129    movl(hi, lo);
130    sarl(hi, 31);
131  }
132}
133
134void MacroAssembler::jC2(Register tmp, Label& L) {
135  // set parity bit if FPU flag C2 is set (via rax)
136  save_rax(tmp);
137  fwait(); fnstsw_ax();
138  sahf();
139  restore_rax(tmp);
140  // branch
141  jcc(Assembler::parity, L);
142}
143
144void MacroAssembler::jnC2(Register tmp, Label& L) {
145  // set parity bit if FPU flag C2 is set (via rax)
146  save_rax(tmp);
147  fwait(); fnstsw_ax();
148  sahf();
149  restore_rax(tmp);
150  // branch
151  jcc(Assembler::noParity, L);
152}
153
154// 32bit can do a case table jump in one instruction but we no longer allow the base
155// to be installed in the Address class
156void MacroAssembler::jump(ArrayAddress entry) {
157  jmp(as_Address(entry));
158}
159
160// Note: y_lo will be destroyed
161void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
162  // Long compare for Java (semantics as described in JVM spec.)
163  Label high, low, done;
164
165  cmpl(x_hi, y_hi);
166  jcc(Assembler::less, low);
167  jcc(Assembler::greater, high);
168  // x_hi is the return register
169  xorl(x_hi, x_hi);
170  cmpl(x_lo, y_lo);
171  jcc(Assembler::below, low);
172  jcc(Assembler::equal, done);
173
174  bind(high);
175  xorl(x_hi, x_hi);
176  increment(x_hi);
177  jmp(done);
178
179  bind(low);
180  xorl(x_hi, x_hi);
181  decrementl(x_hi);
182
183  bind(done);
184}
185
186void MacroAssembler::lea(Register dst, AddressLiteral src) {
187    mov_literal32(dst, (int32_t)src.target(), src.rspec());
188}
189
190void MacroAssembler::lea(Address dst, AddressLiteral adr) {
191  // leal(dst, as_Address(adr));
192  // see note in movl as to why we must use a move
193  mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
194}
195
196void MacroAssembler::leave() {
197  mov(rsp, rbp);
198  pop(rbp);
199}
200
201void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
202  // Multiplication of two Java long values stored on the stack
203  // as illustrated below. Result is in rdx:rax.
204  //
205  // rsp ---> [  ??  ] \               \
206  //            ....    | y_rsp_offset  |
207  //          [ y_lo ] /  (in bytes)    | x_rsp_offset
208  //          [ y_hi ]                  | (in bytes)
209  //            ....                    |
210  //          [ x_lo ]                 /
211  //          [ x_hi ]
212  //            ....
213  //
214  // Basic idea: lo(result) = lo(x_lo * y_lo)
215  //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
216  Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
217  Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
218  Label quick;
219  // load x_hi, y_hi and check if quick
220  // multiplication is possible
221  movl(rbx, x_hi);
222  movl(rcx, y_hi);
223  movl(rax, rbx);
224  orl(rbx, rcx);                                 // rbx, = 0 <=> x_hi = 0 and y_hi = 0
225  jcc(Assembler::zero, quick);                   // if rbx, = 0 do quick multiply
226  // do full multiplication
227  // 1st step
228  mull(y_lo);                                    // x_hi * y_lo
229  movl(rbx, rax);                                // save lo(x_hi * y_lo) in rbx,
230  // 2nd step
231  movl(rax, x_lo);
232  mull(rcx);                                     // x_lo * y_hi
233  addl(rbx, rax);                                // add lo(x_lo * y_hi) to rbx,
234  // 3rd step
235  bind(quick);                                   // note: rbx, = 0 if quick multiply!
236  movl(rax, x_lo);
237  mull(y_lo);                                    // x_lo * y_lo
238  addl(rdx, rbx);                                // correct hi(x_lo * y_lo)
239}
240
241void MacroAssembler::lneg(Register hi, Register lo) {
242  negl(lo);
243  adcl(hi, 0);
244  negl(hi);
245}
246
247void MacroAssembler::lshl(Register hi, Register lo) {
248  // Java shift left long support (semantics as described in JVM spec., p.305)
249  // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
250  // shift value is in rcx !
251  assert(hi != rcx, "must not use rcx");
252  assert(lo != rcx, "must not use rcx");
253  const Register s = rcx;                        // shift count
254  const int      n = BitsPerWord;
255  Label L;
256  andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
257  cmpl(s, n);                                    // if (s < n)
258  jcc(Assembler::less, L);                       // else (s >= n)
259  movl(hi, lo);                                  // x := x << n
260  xorl(lo, lo);
261  // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
262  bind(L);                                       // s (mod n) < n
263  shldl(hi, lo);                                 // x := x << s
264  shll(lo);
265}
266
267
268void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
269  // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
270  // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
271  assert(hi != rcx, "must not use rcx");
272  assert(lo != rcx, "must not use rcx");
273  const Register s = rcx;                        // shift count
274  const int      n = BitsPerWord;
275  Label L;
276  andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
277  cmpl(s, n);                                    // if (s < n)
278  jcc(Assembler::less, L);                       // else (s >= n)
279  movl(lo, hi);                                  // x := x >> n
280  if (sign_extension) sarl(hi, 31);
281  else                xorl(hi, hi);
282  // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
283  bind(L);                                       // s (mod n) < n
284  shrdl(lo, hi);                                 // x := x >> s
285  if (sign_extension) sarl(hi);
286  else                shrl(hi);
287}
288
289void MacroAssembler::movoop(Register dst, jobject obj) {
290  mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
291}
292
293void MacroAssembler::movoop(Address dst, jobject obj) {
294  mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
295}
296
297void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
298  mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
299}
300
301void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
302  mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
303}
304
305void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
306  // scratch register is not used,
307  // it is defined to match parameters of 64-bit version of this method.
308  if (src.is_lval()) {
309    mov_literal32(dst, (intptr_t)src.target(), src.rspec());
310  } else {
311    movl(dst, as_Address(src));
312  }
313}
314
315void MacroAssembler::movptr(ArrayAddress dst, Register src) {
316  movl(as_Address(dst), src);
317}
318
319void MacroAssembler::movptr(Register dst, ArrayAddress src) {
320  movl(dst, as_Address(src));
321}
322
323// src should NEVER be a real pointer. Use AddressLiteral for true pointers
324void MacroAssembler::movptr(Address dst, intptr_t src) {
325  movl(dst, src);
326}
327
328
329void MacroAssembler::pop_callee_saved_registers() {
330  pop(rcx);
331  pop(rdx);
332  pop(rdi);
333  pop(rsi);
334}
335
336void MacroAssembler::pop_fTOS() {
337  fld_d(Address(rsp, 0));
338  addl(rsp, 2 * wordSize);
339}
340
341void MacroAssembler::push_callee_saved_registers() {
342  push(rsi);
343  push(rdi);
344  push(rdx);
345  push(rcx);
346}
347
348void MacroAssembler::push_fTOS() {
349  subl(rsp, 2 * wordSize);
350  fstp_d(Address(rsp, 0));
351}
352
353
354void MacroAssembler::pushoop(jobject obj) {
355  push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
356}
357
358void MacroAssembler::pushklass(Metadata* obj) {
359  push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
360}
361
362void MacroAssembler::pushptr(AddressLiteral src) {
363  if (src.is_lval()) {
364    push_literal32((int32_t)src.target(), src.rspec());
365  } else {
366    pushl(as_Address(src));
367  }
368}
369
370void MacroAssembler::set_word_if_not_zero(Register dst) {
371  xorl(dst, dst);
372  set_byte_if_not_zero(dst);
373}
374
375static void pass_arg0(MacroAssembler* masm, Register arg) {
376  masm->push(arg);
377}
378
379static void pass_arg1(MacroAssembler* masm, Register arg) {
380  masm->push(arg);
381}
382
383static void pass_arg2(MacroAssembler* masm, Register arg) {
384  masm->push(arg);
385}
386
387static void pass_arg3(MacroAssembler* masm, Register arg) {
388  masm->push(arg);
389}
390
391#ifndef PRODUCT
392extern "C" void findpc(intptr_t x);
393#endif
394
395void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
396  // In order to get locks to work, we need to fake a in_VM state
397  JavaThread* thread = JavaThread::current();
398  JavaThreadState saved_state = thread->thread_state();
399  thread->set_thread_state(_thread_in_vm);
400  if (ShowMessageBoxOnError) {
401    JavaThread* thread = JavaThread::current();
402    JavaThreadState saved_state = thread->thread_state();
403    thread->set_thread_state(_thread_in_vm);
404    if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
405      ttyLocker ttyl;
406      BytecodeCounter::print();
407    }
408    // To see where a verify_oop failed, get $ebx+40/X for this frame.
409    // This is the value of eip which points to where verify_oop will return.
410    if (os::message_box(msg, "Execution stopped, print registers?")) {
411      print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
412      BREAKPOINT;
413    }
414  } else {
415    ttyLocker ttyl;
416    ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
417  }
418  // Don't assert holding the ttyLock
419    assert(false, "DEBUG MESSAGE: %s", msg);
420  ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
421}
422
423void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
424  ttyLocker ttyl;
425  FlagSetting fs(Debugging, true);
426  tty->print_cr("eip = 0x%08x", eip);
427#ifndef PRODUCT
428  if ((WizardMode || Verbose) && PrintMiscellaneous) {
429    tty->cr();
430    findpc(eip);
431    tty->cr();
432  }
433#endif
434#define PRINT_REG(rax) \
435  { tty->print("%s = ", #rax); os::print_location(tty, rax); }
436  PRINT_REG(rax);
437  PRINT_REG(rbx);
438  PRINT_REG(rcx);
439  PRINT_REG(rdx);
440  PRINT_REG(rdi);
441  PRINT_REG(rsi);
442  PRINT_REG(rbp);
443  PRINT_REG(rsp);
444#undef PRINT_REG
445  // Print some words near top of staack.
446  int* dump_sp = (int*) rsp;
447  for (int col1 = 0; col1 < 8; col1++) {
448    tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
449    os::print_location(tty, *dump_sp++);
450  }
451  for (int row = 0; row < 16; row++) {
452    tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
453    for (int col = 0; col < 8; col++) {
454      tty->print(" 0x%08x", *dump_sp++);
455    }
456    tty->cr();
457  }
458  // Print some instructions around pc:
459  Disassembler::decode((address)eip-64, (address)eip);
460  tty->print_cr("--------");
461  Disassembler::decode((address)eip, (address)eip+32);
462}
463
464void MacroAssembler::stop(const char* msg) {
465  ExternalAddress message((address)msg);
466  // push address of message
467  pushptr(message.addr());
468  { Label L; call(L, relocInfo::none); bind(L); }     // push eip
469  pusha();                                            // push registers
470  call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
471  hlt();
472}
473
474void MacroAssembler::warn(const char* msg) {
475  push_CPU_state();
476
477  ExternalAddress message((address) msg);
478  // push address of message
479  pushptr(message.addr());
480
481  call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
482  addl(rsp, wordSize);       // discard argument
483  pop_CPU_state();
484}
485
486void MacroAssembler::print_state() {
487  { Label L; call(L, relocInfo::none); bind(L); }     // push eip
488  pusha();                                            // push registers
489
490  push_CPU_state();
491  call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
492  pop_CPU_state();
493
494  popa();
495  addl(rsp, wordSize);
496}
497
498#else // _LP64
499
500// 64 bit versions
501
502Address MacroAssembler::as_Address(AddressLiteral adr) {
503  // amd64 always does this as a pc-rel
504  // we can be absolute or disp based on the instruction type
505  // jmp/call are displacements others are absolute
506  assert(!adr.is_lval(), "must be rval");
507  assert(reachable(adr), "must be");
508  return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
509
510}
511
512Address MacroAssembler::as_Address(ArrayAddress adr) {
513  AddressLiteral base = adr.base();
514  lea(rscratch1, base);
515  Address index = adr.index();
516  assert(index._disp == 0, "must not have disp"); // maybe it can?
517  Address array(rscratch1, index._index, index._scale, index._disp);
518  return array;
519}
520
521void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
522  Label L, E;
523
524#ifdef _WIN64
525  // Windows always allocates space for it's register args
526  assert(num_args <= 4, "only register arguments supported");
527  subq(rsp,  frame::arg_reg_save_area_bytes);
528#endif
529
530  // Align stack if necessary
531  testl(rsp, 15);
532  jcc(Assembler::zero, L);
533
534  subq(rsp, 8);
535  {
536    call(RuntimeAddress(entry_point));
537  }
538  addq(rsp, 8);
539  jmp(E);
540
541  bind(L);
542  {
543    call(RuntimeAddress(entry_point));
544  }
545
546  bind(E);
547
548#ifdef _WIN64
549  // restore stack pointer
550  addq(rsp, frame::arg_reg_save_area_bytes);
551#endif
552
553}
554
555void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
556  assert(!src2.is_lval(), "should use cmpptr");
557
558  if (reachable(src2)) {
559    cmpq(src1, as_Address(src2));
560  } else {
561    lea(rscratch1, src2);
562    Assembler::cmpq(src1, Address(rscratch1, 0));
563  }
564}
565
566int MacroAssembler::corrected_idivq(Register reg) {
567  // Full implementation of Java ldiv and lrem; checks for special
568  // case as described in JVM spec., p.243 & p.271.  The function
569  // returns the (pc) offset of the idivl instruction - may be needed
570  // for implicit exceptions.
571  //
572  //         normal case                           special case
573  //
574  // input : rax: dividend                         min_long
575  //         reg: divisor   (may not be eax/edx)   -1
576  //
577  // output: rax: quotient  (= rax idiv reg)       min_long
578  //         rdx: remainder (= rax irem reg)       0
579  assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
580  static const int64_t min_long = 0x8000000000000000;
581  Label normal_case, special_case;
582
583  // check for special case
584  cmp64(rax, ExternalAddress((address) &min_long));
585  jcc(Assembler::notEqual, normal_case);
586  xorl(rdx, rdx); // prepare rdx for possible special case (where
587                  // remainder = 0)
588  cmpq(reg, -1);
589  jcc(Assembler::equal, special_case);
590
591  // handle normal case
592  bind(normal_case);
593  cdqq();
594  int idivq_offset = offset();
595  idivq(reg);
596
597  // normal and special case exit
598  bind(special_case);
599
600  return idivq_offset;
601}
602
603void MacroAssembler::decrementq(Register reg, int value) {
604  if (value == min_jint) { subq(reg, value); return; }
605  if (value <  0) { incrementq(reg, -value); return; }
606  if (value == 0) {                        ; return; }
607  if (value == 1 && UseIncDec) { decq(reg) ; return; }
608  /* else */      { subq(reg, value)       ; return; }
609}
610
611void MacroAssembler::decrementq(Address dst, int value) {
612  if (value == min_jint) { subq(dst, value); return; }
613  if (value <  0) { incrementq(dst, -value); return; }
614  if (value == 0) {                        ; return; }
615  if (value == 1 && UseIncDec) { decq(dst) ; return; }
616  /* else */      { subq(dst, value)       ; return; }
617}
618
619void MacroAssembler::incrementq(AddressLiteral dst) {
620  if (reachable(dst)) {
621    incrementq(as_Address(dst));
622  } else {
623    lea(rscratch1, dst);
624    incrementq(Address(rscratch1, 0));
625  }
626}
627
628void MacroAssembler::incrementq(Register reg, int value) {
629  if (value == min_jint) { addq(reg, value); return; }
630  if (value <  0) { decrementq(reg, -value); return; }
631  if (value == 0) {                        ; return; }
632  if (value == 1 && UseIncDec) { incq(reg) ; return; }
633  /* else */      { addq(reg, value)       ; return; }
634}
635
636void MacroAssembler::incrementq(Address dst, int value) {
637  if (value == min_jint) { addq(dst, value); return; }
638  if (value <  0) { decrementq(dst, -value); return; }
639  if (value == 0) {                        ; return; }
640  if (value == 1 && UseIncDec) { incq(dst) ; return; }
641  /* else */      { addq(dst, value)       ; return; }
642}
643
644// 32bit can do a case table jump in one instruction but we no longer allow the base
645// to be installed in the Address class
646void MacroAssembler::jump(ArrayAddress entry) {
647  lea(rscratch1, entry.base());
648  Address dispatch = entry.index();
649  assert(dispatch._base == noreg, "must be");
650  dispatch._base = rscratch1;
651  jmp(dispatch);
652}
653
654void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
655  ShouldNotReachHere(); // 64bit doesn't use two regs
656  cmpq(x_lo, y_lo);
657}
658
659void MacroAssembler::lea(Register dst, AddressLiteral src) {
660    mov_literal64(dst, (intptr_t)src.target(), src.rspec());
661}
662
663void MacroAssembler::lea(Address dst, AddressLiteral adr) {
664  mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
665  movptr(dst, rscratch1);
666}
667
668void MacroAssembler::leave() {
669  // %%% is this really better? Why not on 32bit too?
670  emit_int8((unsigned char)0xC9); // LEAVE
671}
672
673void MacroAssembler::lneg(Register hi, Register lo) {
674  ShouldNotReachHere(); // 64bit doesn't use two regs
675  negq(lo);
676}
677
678void MacroAssembler::movoop(Register dst, jobject obj) {
679  mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
680}
681
682void MacroAssembler::movoop(Address dst, jobject obj) {
683  mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
684  movq(dst, rscratch1);
685}
686
687void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
688  mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
689}
690
691void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
692  mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
693  movq(dst, rscratch1);
694}
695
696void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
697  if (src.is_lval()) {
698    mov_literal64(dst, (intptr_t)src.target(), src.rspec());
699  } else {
700    if (reachable(src)) {
701      movq(dst, as_Address(src));
702    } else {
703      lea(scratch, src);
704      movq(dst, Address(scratch, 0));
705    }
706  }
707}
708
709void MacroAssembler::movptr(ArrayAddress dst, Register src) {
710  movq(as_Address(dst), src);
711}
712
713void MacroAssembler::movptr(Register dst, ArrayAddress src) {
714  movq(dst, as_Address(src));
715}
716
717// src should NEVER be a real pointer. Use AddressLiteral for true pointers
718void MacroAssembler::movptr(Address dst, intptr_t src) {
719  mov64(rscratch1, src);
720  movq(dst, rscratch1);
721}
722
723// These are mostly for initializing NULL
724void MacroAssembler::movptr(Address dst, int32_t src) {
725  movslq(dst, src);
726}
727
728void MacroAssembler::movptr(Register dst, int32_t src) {
729  mov64(dst, (intptr_t)src);
730}
731
732void MacroAssembler::pushoop(jobject obj) {
733  movoop(rscratch1, obj);
734  push(rscratch1);
735}
736
737void MacroAssembler::pushklass(Metadata* obj) {
738  mov_metadata(rscratch1, obj);
739  push(rscratch1);
740}
741
742void MacroAssembler::pushptr(AddressLiteral src) {
743  lea(rscratch1, src);
744  if (src.is_lval()) {
745    push(rscratch1);
746  } else {
747    pushq(Address(rscratch1, 0));
748  }
749}
750
751void MacroAssembler::reset_last_Java_frame(bool clear_fp,
752                                           bool clear_pc) {
753  // we must set sp to zero to clear frame
754  movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
755  // must clear fp, so that compiled frames are not confused; it is
756  // possible that we need it only for debugging
757  if (clear_fp) {
758    movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
759  }
760
761  if (clear_pc) {
762    movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
763  }
764}
765
766void MacroAssembler::set_last_Java_frame(Register last_java_sp,
767                                         Register last_java_fp,
768                                         address  last_java_pc) {
769  // determine last_java_sp register
770  if (!last_java_sp->is_valid()) {
771    last_java_sp = rsp;
772  }
773
774  // last_java_fp is optional
775  if (last_java_fp->is_valid()) {
776    movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
777           last_java_fp);
778  }
779
780  // last_java_pc is optional
781  if (last_java_pc != NULL) {
782    Address java_pc(r15_thread,
783                    JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
784    lea(rscratch1, InternalAddress(last_java_pc));
785    movptr(java_pc, rscratch1);
786  }
787
788  movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
789}
790
791static void pass_arg0(MacroAssembler* masm, Register arg) {
792  if (c_rarg0 != arg ) {
793    masm->mov(c_rarg0, arg);
794  }
795}
796
797static void pass_arg1(MacroAssembler* masm, Register arg) {
798  if (c_rarg1 != arg ) {
799    masm->mov(c_rarg1, arg);
800  }
801}
802
803static void pass_arg2(MacroAssembler* masm, Register arg) {
804  if (c_rarg2 != arg ) {
805    masm->mov(c_rarg2, arg);
806  }
807}
808
809static void pass_arg3(MacroAssembler* masm, Register arg) {
810  if (c_rarg3 != arg ) {
811    masm->mov(c_rarg3, arg);
812  }
813}
814
815void MacroAssembler::stop(const char* msg) {
816  address rip = pc();
817  pusha(); // get regs on stack
818  lea(c_rarg0, ExternalAddress((address) msg));
819  lea(c_rarg1, InternalAddress(rip));
820  movq(c_rarg2, rsp); // pass pointer to regs array
821  andq(rsp, -16); // align stack as required by ABI
822  call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
823  hlt();
824}
825
826void MacroAssembler::warn(const char* msg) {
827  push(rbp);
828  movq(rbp, rsp);
829  andq(rsp, -16);     // align stack as required by push_CPU_state and call
830  push_CPU_state();   // keeps alignment at 16 bytes
831  lea(c_rarg0, ExternalAddress((address) msg));
832  call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
833  pop_CPU_state();
834  mov(rsp, rbp);
835  pop(rbp);
836}
837
838void MacroAssembler::print_state() {
839  address rip = pc();
840  pusha();            // get regs on stack
841  push(rbp);
842  movq(rbp, rsp);
843  andq(rsp, -16);     // align stack as required by push_CPU_state and call
844  push_CPU_state();   // keeps alignment at 16 bytes
845
846  lea(c_rarg0, InternalAddress(rip));
847  lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
848  call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
849
850  pop_CPU_state();
851  mov(rsp, rbp);
852  pop(rbp);
853  popa();
854}
855
856#ifndef PRODUCT
857extern "C" void findpc(intptr_t x);
858#endif
859
860void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
861  // In order to get locks to work, we need to fake a in_VM state
862  if (ShowMessageBoxOnError) {
863    JavaThread* thread = JavaThread::current();
864    JavaThreadState saved_state = thread->thread_state();
865    thread->set_thread_state(_thread_in_vm);
866#ifndef PRODUCT
867    if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
868      ttyLocker ttyl;
869      BytecodeCounter::print();
870    }
871#endif
872    // To see where a verify_oop failed, get $ebx+40/X for this frame.
873    // XXX correct this offset for amd64
874    // This is the value of eip which points to where verify_oop will return.
875    if (os::message_box(msg, "Execution stopped, print registers?")) {
876      print_state64(pc, regs);
877      BREAKPOINT;
878      assert(false, "start up GDB");
879    }
880    ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
881  } else {
882    ttyLocker ttyl;
883    ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
884                    msg);
885    assert(false, "DEBUG MESSAGE: %s", msg);
886  }
887}
888
889void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
890  ttyLocker ttyl;
891  FlagSetting fs(Debugging, true);
892  tty->print_cr("rip = 0x%016lx", pc);
893#ifndef PRODUCT
894  tty->cr();
895  findpc(pc);
896  tty->cr();
897#endif
898#define PRINT_REG(rax, value) \
899  { tty->print("%s = ", #rax); os::print_location(tty, value); }
900  PRINT_REG(rax, regs[15]);
901  PRINT_REG(rbx, regs[12]);
902  PRINT_REG(rcx, regs[14]);
903  PRINT_REG(rdx, regs[13]);
904  PRINT_REG(rdi, regs[8]);
905  PRINT_REG(rsi, regs[9]);
906  PRINT_REG(rbp, regs[10]);
907  PRINT_REG(rsp, regs[11]);
908  PRINT_REG(r8 , regs[7]);
909  PRINT_REG(r9 , regs[6]);
910  PRINT_REG(r10, regs[5]);
911  PRINT_REG(r11, regs[4]);
912  PRINT_REG(r12, regs[3]);
913  PRINT_REG(r13, regs[2]);
914  PRINT_REG(r14, regs[1]);
915  PRINT_REG(r15, regs[0]);
916#undef PRINT_REG
917  // Print some words near top of staack.
918  int64_t* rsp = (int64_t*) regs[11];
919  int64_t* dump_sp = rsp;
920  for (int col1 = 0; col1 < 8; col1++) {
921    tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
922    os::print_location(tty, *dump_sp++);
923  }
924  for (int row = 0; row < 25; row++) {
925    tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
926    for (int col = 0; col < 4; col++) {
927      tty->print(" 0x%016lx", *dump_sp++);
928    }
929    tty->cr();
930  }
931  // Print some instructions around pc:
932  Disassembler::decode((address)pc-64, (address)pc);
933  tty->print_cr("--------");
934  Disassembler::decode((address)pc, (address)pc+32);
935}
936
937#endif // _LP64
938
939// Now versions that are common to 32/64 bit
940
941void MacroAssembler::addptr(Register dst, int32_t imm32) {
942  LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
943}
944
945void MacroAssembler::addptr(Register dst, Register src) {
946  LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
947}
948
949void MacroAssembler::addptr(Address dst, Register src) {
950  LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
951}
952
953void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
954  if (reachable(src)) {
955    Assembler::addsd(dst, as_Address(src));
956  } else {
957    lea(rscratch1, src);
958    Assembler::addsd(dst, Address(rscratch1, 0));
959  }
960}
961
962void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
963  if (reachable(src)) {
964    addss(dst, as_Address(src));
965  } else {
966    lea(rscratch1, src);
967    addss(dst, Address(rscratch1, 0));
968  }
969}
970
971void MacroAssembler::align(int modulus) {
972  align(modulus, offset());
973}
974
975void MacroAssembler::align(int modulus, int target) {
976  if (target % modulus != 0) {
977    nop(modulus - (target % modulus));
978  }
979}
980
981void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
982  // Used in sign-masking with aligned address.
983  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
984  if (reachable(src)) {
985    Assembler::andpd(dst, as_Address(src));
986  } else {
987    lea(rscratch1, src);
988    Assembler::andpd(dst, Address(rscratch1, 0));
989  }
990}
991
992void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
993  // Used in sign-masking with aligned address.
994  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
995  if (reachable(src)) {
996    Assembler::andps(dst, as_Address(src));
997  } else {
998    lea(rscratch1, src);
999    Assembler::andps(dst, Address(rscratch1, 0));
1000  }
1001}
1002
1003void MacroAssembler::andptr(Register dst, int32_t imm32) {
1004  LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
1005}
1006
1007void MacroAssembler::atomic_incl(Address counter_addr) {
1008  if (os::is_MP())
1009    lock();
1010  incrementl(counter_addr);
1011}
1012
1013void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
1014  if (reachable(counter_addr)) {
1015    atomic_incl(as_Address(counter_addr));
1016  } else {
1017    lea(scr, counter_addr);
1018    atomic_incl(Address(scr, 0));
1019  }
1020}
1021
1022#ifdef _LP64
1023void MacroAssembler::atomic_incq(Address counter_addr) {
1024  if (os::is_MP())
1025    lock();
1026  incrementq(counter_addr);
1027}
1028
1029void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
1030  if (reachable(counter_addr)) {
1031    atomic_incq(as_Address(counter_addr));
1032  } else {
1033    lea(scr, counter_addr);
1034    atomic_incq(Address(scr, 0));
1035  }
1036}
1037#endif
1038
1039// Writes to stack successive pages until offset reached to check for
1040// stack overflow + shadow pages.  This clobbers tmp.
1041void MacroAssembler::bang_stack_size(Register size, Register tmp) {
1042  movptr(tmp, rsp);
1043  // Bang stack for total size given plus shadow page size.
1044  // Bang one page at a time because large size can bang beyond yellow and
1045  // red zones.
1046  Label loop;
1047  bind(loop);
1048  movl(Address(tmp, (-os::vm_page_size())), size );
1049  subptr(tmp, os::vm_page_size());
1050  subl(size, os::vm_page_size());
1051  jcc(Assembler::greater, loop);
1052
1053  // Bang down shadow pages too.
1054  // At this point, (tmp-0) is the last address touched, so don't
1055  // touch it again.  (It was touched as (tmp-pagesize) but then tmp
1056  // was post-decremented.)  Skip this address by starting at i=1, and
1057  // touch a few more pages below.  N.B.  It is important to touch all
1058  // the way down to and including i=StackShadowPages.
1059  for (int i = 1; i < StackShadowPages; i++) {
1060    // this could be any sized move but this is can be a debugging crumb
1061    // so the bigger the better.
1062    movptr(Address(tmp, (-i*os::vm_page_size())), size );
1063  }
1064}
1065
1066int MacroAssembler::biased_locking_enter(Register lock_reg,
1067                                         Register obj_reg,
1068                                         Register swap_reg,
1069                                         Register tmp_reg,
1070                                         bool swap_reg_contains_mark,
1071                                         Label& done,
1072                                         Label* slow_case,
1073                                         BiasedLockingCounters* counters) {
1074  assert(UseBiasedLocking, "why call this otherwise?");
1075  assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1076  assert(tmp_reg != noreg, "tmp_reg must be supplied");
1077  assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1078  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1079  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
1080  Address saved_mark_addr(lock_reg, 0);
1081
1082  if (PrintBiasedLockingStatistics && counters == NULL) {
1083    counters = BiasedLocking::counters();
1084  }
1085  // Biased locking
1086  // See whether the lock is currently biased toward our thread and
1087  // whether the epoch is still valid
1088  // Note that the runtime guarantees sufficient alignment of JavaThread
1089  // pointers to allow age to be placed into low bits
1090  // First check to see whether biasing is even enabled for this object
1091  Label cas_label;
1092  int null_check_offset = -1;
1093  if (!swap_reg_contains_mark) {
1094    null_check_offset = offset();
1095    movptr(swap_reg, mark_addr);
1096  }
1097  movptr(tmp_reg, swap_reg);
1098  andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
1099  cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
1100  jcc(Assembler::notEqual, cas_label);
1101  // The bias pattern is present in the object's header. Need to check
1102  // whether the bias owner and the epoch are both still current.
1103#ifndef _LP64
1104  // Note that because there is no current thread register on x86_32 we
1105  // need to store off the mark word we read out of the object to
1106  // avoid reloading it and needing to recheck invariants below. This
1107  // store is unfortunate but it makes the overall code shorter and
1108  // simpler.
1109  movptr(saved_mark_addr, swap_reg);
1110#endif
1111  if (swap_reg_contains_mark) {
1112    null_check_offset = offset();
1113  }
1114  load_prototype_header(tmp_reg, obj_reg);
1115#ifdef _LP64
1116  orptr(tmp_reg, r15_thread);
1117  xorptr(tmp_reg, swap_reg);
1118  Register header_reg = tmp_reg;
1119#else
1120  xorptr(tmp_reg, swap_reg);
1121  get_thread(swap_reg);
1122  xorptr(swap_reg, tmp_reg);
1123  Register header_reg = swap_reg;
1124#endif
1125  andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
1126  if (counters != NULL) {
1127    cond_inc32(Assembler::zero,
1128               ExternalAddress((address) counters->biased_lock_entry_count_addr()));
1129  }
1130  jcc(Assembler::equal, done);
1131
1132  Label try_revoke_bias;
1133  Label try_rebias;
1134
1135  // At this point we know that the header has the bias pattern and
1136  // that we are not the bias owner in the current epoch. We need to
1137  // figure out more details about the state of the header in order to
1138  // know what operations can be legally performed on the object's
1139  // header.
1140
1141  // If the low three bits in the xor result aren't clear, that means
1142  // the prototype header is no longer biased and we have to revoke
1143  // the bias on this object.
1144  testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
1145  jccb(Assembler::notZero, try_revoke_bias);
1146
1147  // Biasing is still enabled for this data type. See whether the
1148  // epoch of the current bias is still valid, meaning that the epoch
1149  // bits of the mark word are equal to the epoch bits of the
1150  // prototype header. (Note that the prototype header's epoch bits
1151  // only change at a safepoint.) If not, attempt to rebias the object
1152  // toward the current thread. Note that we must be absolutely sure
1153  // that the current epoch is invalid in order to do this because
1154  // otherwise the manipulations it performs on the mark word are
1155  // illegal.
1156  testptr(header_reg, markOopDesc::epoch_mask_in_place);
1157  jccb(Assembler::notZero, try_rebias);
1158
1159  // The epoch of the current bias is still valid but we know nothing
1160  // about the owner; it might be set or it might be clear. Try to
1161  // acquire the bias of the object using an atomic operation. If this
1162  // fails we will go in to the runtime to revoke the object's bias.
1163  // Note that we first construct the presumed unbiased header so we
1164  // don't accidentally blow away another thread's valid bias.
1165  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1166  andptr(swap_reg,
1167         markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
1168#ifdef _LP64
1169  movptr(tmp_reg, swap_reg);
1170  orptr(tmp_reg, r15_thread);
1171#else
1172  get_thread(tmp_reg);
1173  orptr(tmp_reg, swap_reg);
1174#endif
1175  if (os::is_MP()) {
1176    lock();
1177  }
1178  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1179  // If the biasing toward our thread failed, this means that
1180  // another thread succeeded in biasing it toward itself and we
1181  // need to revoke that bias. The revocation will occur in the
1182  // interpreter runtime in the slow case.
1183  if (counters != NULL) {
1184    cond_inc32(Assembler::zero,
1185               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
1186  }
1187  if (slow_case != NULL) {
1188    jcc(Assembler::notZero, *slow_case);
1189  }
1190  jmp(done);
1191
1192  bind(try_rebias);
1193  // At this point we know the epoch has expired, meaning that the
1194  // current "bias owner", if any, is actually invalid. Under these
1195  // circumstances _only_, we are allowed to use the current header's
1196  // value as the comparison value when doing the cas to acquire the
1197  // bias in the current epoch. In other words, we allow transfer of
1198  // the bias from one thread to another directly in this situation.
1199  //
1200  // FIXME: due to a lack of registers we currently blow away the age
1201  // bits in this situation. Should attempt to preserve them.
1202  load_prototype_header(tmp_reg, obj_reg);
1203#ifdef _LP64
1204  orptr(tmp_reg, r15_thread);
1205#else
1206  get_thread(swap_reg);
1207  orptr(tmp_reg, swap_reg);
1208  movptr(swap_reg, saved_mark_addr);
1209#endif
1210  if (os::is_MP()) {
1211    lock();
1212  }
1213  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1214  // If the biasing toward our thread failed, then another thread
1215  // succeeded in biasing it toward itself and we need to revoke that
1216  // bias. The revocation will occur in the runtime in the slow case.
1217  if (counters != NULL) {
1218    cond_inc32(Assembler::zero,
1219               ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
1220  }
1221  if (slow_case != NULL) {
1222    jcc(Assembler::notZero, *slow_case);
1223  }
1224  jmp(done);
1225
1226  bind(try_revoke_bias);
1227  // The prototype mark in the klass doesn't have the bias bit set any
1228  // more, indicating that objects of this data type are not supposed
1229  // to be biased any more. We are going to try to reset the mark of
1230  // this object to the prototype value and fall through to the
1231  // CAS-based locking scheme. Note that if our CAS fails, it means
1232  // that another thread raced us for the privilege of revoking the
1233  // bias of this particular object, so it's okay to continue in the
1234  // normal locking code.
1235  //
1236  // FIXME: due to a lack of registers we currently blow away the age
1237  // bits in this situation. Should attempt to preserve them.
1238  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1239  load_prototype_header(tmp_reg, obj_reg);
1240  if (os::is_MP()) {
1241    lock();
1242  }
1243  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1244  // Fall through to the normal CAS-based lock, because no matter what
1245  // the result of the above CAS, some thread must have succeeded in
1246  // removing the bias bit from the object's header.
1247  if (counters != NULL) {
1248    cond_inc32(Assembler::zero,
1249               ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
1250  }
1251
1252  bind(cas_label);
1253
1254  return null_check_offset;
1255}
1256
1257void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
1258  assert(UseBiasedLocking, "why call this otherwise?");
1259
1260  // Check for biased locking unlock case, which is a no-op
1261  // Note: we do not have to check the thread ID for two reasons.
1262  // First, the interpreter checks for IllegalMonitorStateException at
1263  // a higher level. Second, if the bias was revoked while we held the
1264  // lock, the object could not be rebiased toward another thread, so
1265  // the bias bit would be clear.
1266  movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1267  andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
1268  cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
1269  jcc(Assembler::equal, done);
1270}
1271
1272#ifdef COMPILER2
1273
1274#if INCLUDE_RTM_OPT
1275
1276// Update rtm_counters based on abort status
1277// input: abort_status
1278//        rtm_counters (RTMLockingCounters*)
1279// flags are killed
1280void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
1281
1282  atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
1283  if (PrintPreciseRTMLockingStatistics) {
1284    for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
1285      Label check_abort;
1286      testl(abort_status, (1<<i));
1287      jccb(Assembler::equal, check_abort);
1288      atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
1289      bind(check_abort);
1290    }
1291  }
1292}
1293
1294// Branch if (random & (count-1) != 0), count is 2^n
1295// tmp, scr and flags are killed
1296void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
1297  assert(tmp == rax, "");
1298  assert(scr == rdx, "");
1299  rdtsc(); // modifies EDX:EAX
1300  andptr(tmp, count-1);
1301  jccb(Assembler::notZero, brLabel);
1302}
1303
1304// Perform abort ratio calculation, set no_rtm bit if high ratio
1305// input:  rtm_counters_Reg (RTMLockingCounters* address)
1306// tmpReg, rtm_counters_Reg and flags are killed
1307void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
1308                                                 Register rtm_counters_Reg,
1309                                                 RTMLockingCounters* rtm_counters,
1310                                                 Metadata* method_data) {
1311  Label L_done, L_check_always_rtm1, L_check_always_rtm2;
1312
1313  if (RTMLockingCalculationDelay > 0) {
1314    // Delay calculation
1315    movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
1316    testptr(tmpReg, tmpReg);
1317    jccb(Assembler::equal, L_done);
1318  }
1319  // Abort ratio calculation only if abort_count > RTMAbortThreshold
1320  //   Aborted transactions = abort_count * 100
1321  //   All transactions = total_count *  RTMTotalCountIncrRate
1322  //   Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
1323
1324  movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
1325  cmpptr(tmpReg, RTMAbortThreshold);
1326  jccb(Assembler::below, L_check_always_rtm2);
1327  imulptr(tmpReg, tmpReg, 100);
1328
1329  Register scrReg = rtm_counters_Reg;
1330  movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1331  imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
1332  imulptr(scrReg, scrReg, RTMAbortRatio);
1333  cmpptr(tmpReg, scrReg);
1334  jccb(Assembler::below, L_check_always_rtm1);
1335  if (method_data != NULL) {
1336    // set rtm_state to "no rtm" in MDO
1337    mov_metadata(tmpReg, method_data);
1338    if (os::is_MP()) {
1339      lock();
1340    }
1341    orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
1342  }
1343  jmpb(L_done);
1344  bind(L_check_always_rtm1);
1345  // Reload RTMLockingCounters* address
1346  lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1347  bind(L_check_always_rtm2);
1348  movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1349  cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
1350  jccb(Assembler::below, L_done);
1351  if (method_data != NULL) {
1352    // set rtm_state to "always rtm" in MDO
1353    mov_metadata(tmpReg, method_data);
1354    if (os::is_MP()) {
1355      lock();
1356    }
1357    orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
1358  }
1359  bind(L_done);
1360}
1361
1362// Update counters and perform abort ratio calculation
1363// input:  abort_status_Reg
1364// rtm_counters_Reg, flags are killed
1365void MacroAssembler::rtm_profiling(Register abort_status_Reg,
1366                                   Register rtm_counters_Reg,
1367                                   RTMLockingCounters* rtm_counters,
1368                                   Metadata* method_data,
1369                                   bool profile_rtm) {
1370
1371  assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1372  // update rtm counters based on rax value at abort
1373  // reads abort_status_Reg, updates flags
1374  lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1375  rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
1376  if (profile_rtm) {
1377    // Save abort status because abort_status_Reg is used by following code.
1378    if (RTMRetryCount > 0) {
1379      push(abort_status_Reg);
1380    }
1381    assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1382    rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
1383    // restore abort status
1384    if (RTMRetryCount > 0) {
1385      pop(abort_status_Reg);
1386    }
1387  }
1388}
1389
1390// Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
1391// inputs: retry_count_Reg
1392//       : abort_status_Reg
1393// output: retry_count_Reg decremented by 1
1394// flags are killed
1395void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
1396  Label doneRetry;
1397  assert(abort_status_Reg == rax, "");
1398  // The abort reason bits are in eax (see all states in rtmLocking.hpp)
1399  // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
1400  // if reason is in 0x6 and retry count != 0 then retry
1401  andptr(abort_status_Reg, 0x6);
1402  jccb(Assembler::zero, doneRetry);
1403  testl(retry_count_Reg, retry_count_Reg);
1404  jccb(Assembler::zero, doneRetry);
1405  pause();
1406  decrementl(retry_count_Reg);
1407  jmp(retryLabel);
1408  bind(doneRetry);
1409}
1410
1411// Spin and retry if lock is busy,
1412// inputs: box_Reg (monitor address)
1413//       : retry_count_Reg
1414// output: retry_count_Reg decremented by 1
1415//       : clear z flag if retry count exceeded
1416// tmp_Reg, scr_Reg, flags are killed
1417void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
1418                                            Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
1419  Label SpinLoop, SpinExit, doneRetry;
1420  int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1421
1422  testl(retry_count_Reg, retry_count_Reg);
1423  jccb(Assembler::zero, doneRetry);
1424  decrementl(retry_count_Reg);
1425  movptr(scr_Reg, RTMSpinLoopCount);
1426
1427  bind(SpinLoop);
1428  pause();
1429  decrementl(scr_Reg);
1430  jccb(Assembler::lessEqual, SpinExit);
1431  movptr(tmp_Reg, Address(box_Reg, owner_offset));
1432  testptr(tmp_Reg, tmp_Reg);
1433  jccb(Assembler::notZero, SpinLoop);
1434
1435  bind(SpinExit);
1436  jmp(retryLabel);
1437  bind(doneRetry);
1438  incrementl(retry_count_Reg); // clear z flag
1439}
1440
1441// Use RTM for normal stack locks
1442// Input: objReg (object to lock)
1443void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
1444                                       Register retry_on_abort_count_Reg,
1445                                       RTMLockingCounters* stack_rtm_counters,
1446                                       Metadata* method_data, bool profile_rtm,
1447                                       Label& DONE_LABEL, Label& IsInflated) {
1448  assert(UseRTMForStackLocks, "why call this otherwise?");
1449  assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1450  assert(tmpReg == rax, "");
1451  assert(scrReg == rdx, "");
1452  Label L_rtm_retry, L_decrement_retry, L_on_abort;
1453
1454  if (RTMRetryCount > 0) {
1455    movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1456    bind(L_rtm_retry);
1457  }
1458  movptr(tmpReg, Address(objReg, 0));
1459  testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
1460  jcc(Assembler::notZero, IsInflated);
1461
1462  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1463    Label L_noincrement;
1464    if (RTMTotalCountIncrRate > 1) {
1465      // tmpReg, scrReg and flags are killed
1466      branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1467    }
1468    assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1469    atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1470    bind(L_noincrement);
1471  }
1472  xbegin(L_on_abort);
1473  movptr(tmpReg, Address(objReg, 0));       // fetch markword
1474  andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1475  cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1476  jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
1477
1478  Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1479  if (UseRTMXendForLockBusy) {
1480    xend();
1481    movptr(abort_status_Reg, 0x2);   // Set the abort status to 2 (so we can retry)
1482    jmp(L_decrement_retry);
1483  }
1484  else {
1485    xabort(0);
1486  }
1487  bind(L_on_abort);
1488  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1489    rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
1490  }
1491  bind(L_decrement_retry);
1492  if (RTMRetryCount > 0) {
1493    // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1494    rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1495  }
1496}
1497
1498// Use RTM for inflating locks
1499// inputs: objReg (object to lock)
1500//         boxReg (on-stack box address (displaced header location) - KILLED)
1501//         tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
1502void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
1503                                          Register scrReg, Register retry_on_busy_count_Reg,
1504                                          Register retry_on_abort_count_Reg,
1505                                          RTMLockingCounters* rtm_counters,
1506                                          Metadata* method_data, bool profile_rtm,
1507                                          Label& DONE_LABEL) {
1508  assert(UseRTMLocking, "why call this otherwise?");
1509  assert(tmpReg == rax, "");
1510  assert(scrReg == rdx, "");
1511  Label L_rtm_retry, L_decrement_retry, L_on_abort;
1512  int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1513
1514  // Without cast to int32_t a movptr will destroy r10 which is typically obj
1515  movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1516  movptr(boxReg, tmpReg); // Save ObjectMonitor address
1517
1518  if (RTMRetryCount > 0) {
1519    movl(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy
1520    movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1521    bind(L_rtm_retry);
1522  }
1523  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1524    Label L_noincrement;
1525    if (RTMTotalCountIncrRate > 1) {
1526      // tmpReg, scrReg and flags are killed
1527      branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1528    }
1529    assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1530    atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1531    bind(L_noincrement);
1532  }
1533  xbegin(L_on_abort);
1534  movptr(tmpReg, Address(objReg, 0));
1535  movptr(tmpReg, Address(tmpReg, owner_offset));
1536  testptr(tmpReg, tmpReg);
1537  jcc(Assembler::zero, DONE_LABEL);
1538  if (UseRTMXendForLockBusy) {
1539    xend();
1540    jmp(L_decrement_retry);
1541  }
1542  else {
1543    xabort(0);
1544  }
1545  bind(L_on_abort);
1546  Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1547  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1548    rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
1549  }
1550  if (RTMRetryCount > 0) {
1551    // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1552    rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1553  }
1554
1555  movptr(tmpReg, Address(boxReg, owner_offset)) ;
1556  testptr(tmpReg, tmpReg) ;
1557  jccb(Assembler::notZero, L_decrement_retry) ;
1558
1559  // Appears unlocked - try to swing _owner from null to non-null.
1560  // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1561#ifdef _LP64
1562  Register threadReg = r15_thread;
1563#else
1564  get_thread(scrReg);
1565  Register threadReg = scrReg;
1566#endif
1567  if (os::is_MP()) {
1568    lock();
1569  }
1570  cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
1571
1572  if (RTMRetryCount > 0) {
1573    // success done else retry
1574    jccb(Assembler::equal, DONE_LABEL) ;
1575    bind(L_decrement_retry);
1576    // Spin and retry if lock is busy.
1577    rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1578  }
1579  else {
1580    bind(L_decrement_retry);
1581  }
1582}
1583
1584#endif //  INCLUDE_RTM_OPT
1585
1586// Fast_Lock and Fast_Unlock used by C2
1587
1588// Because the transitions from emitted code to the runtime
1589// monitorenter/exit helper stubs are so slow it's critical that
1590// we inline both the stack-locking fast-path and the inflated fast path.
1591//
1592// See also: cmpFastLock and cmpFastUnlock.
1593//
1594// What follows is a specialized inline transliteration of the code
1595// in slow_enter() and slow_exit().  If we're concerned about I$ bloat
1596// another option would be to emit TrySlowEnter and TrySlowExit methods
1597// at startup-time.  These methods would accept arguments as
1598// (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1599// indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
1600// marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1601// In practice, however, the # of lock sites is bounded and is usually small.
1602// Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1603// if the processor uses simple bimodal branch predictors keyed by EIP
1604// Since the helper routines would be called from multiple synchronization
1605// sites.
1606//
1607// An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1608// in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1609// to those specialized methods.  That'd give us a mostly platform-independent
1610// implementation that the JITs could optimize and inline at their pleasure.
1611// Done correctly, the only time we'd need to cross to native could would be
1612// to park() or unpark() threads.  We'd also need a few more unsafe operators
1613// to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1614// (b) explicit barriers or fence operations.
1615//
1616// TODO:
1617//
1618// *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
1619//    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
1620//    Given TLAB allocation, Self is usually manifested in a register, so passing it into
1621//    the lock operators would typically be faster than reifying Self.
1622//
1623// *  Ideally I'd define the primitives as:
1624//       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
1625//       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
1626//    Unfortunately ADLC bugs prevent us from expressing the ideal form.
1627//    Instead, we're stuck with a rather awkward and brittle register assignments below.
1628//    Furthermore the register assignments are overconstrained, possibly resulting in
1629//    sub-optimal code near the synchronization site.
1630//
1631// *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
1632//    Alternately, use a better sp-proximity test.
1633//
1634// *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
1635//    Either one is sufficient to uniquely identify a thread.
1636//    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
1637//
1638// *  Intrinsify notify() and notifyAll() for the common cases where the
1639//    object is locked by the calling thread but the waitlist is empty.
1640//    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
1641//
1642// *  use jccb and jmpb instead of jcc and jmp to improve code density.
1643//    But beware of excessive branch density on AMD Opterons.
1644//
1645// *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
1646//    or failure of the fast-path.  If the fast-path fails then we pass
1647//    control to the slow-path, typically in C.  In Fast_Lock and
1648//    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
1649//    will emit a conditional branch immediately after the node.
1650//    So we have branches to branches and lots of ICC.ZF games.
1651//    Instead, it might be better to have C2 pass a "FailureLabel"
1652//    into Fast_Lock and Fast_Unlock.  In the case of success, control
1653//    will drop through the node.  ICC.ZF is undefined at exit.
1654//    In the case of failure, the node will branch directly to the
1655//    FailureLabel
1656
1657
1658// obj: object to lock
1659// box: on-stack box address (displaced header location) - KILLED
1660// rax,: tmp -- KILLED
1661// scr: tmp -- KILLED
1662void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
1663                               Register scrReg, Register cx1Reg, Register cx2Reg,
1664                               BiasedLockingCounters* counters,
1665                               RTMLockingCounters* rtm_counters,
1666                               RTMLockingCounters* stack_rtm_counters,
1667                               Metadata* method_data,
1668                               bool use_rtm, bool profile_rtm) {
1669  // Ensure the register assignents are disjoint
1670  assert(tmpReg == rax, "");
1671
1672  if (use_rtm) {
1673    assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
1674  } else {
1675    assert(cx1Reg == noreg, "");
1676    assert(cx2Reg == noreg, "");
1677    assert_different_registers(objReg, boxReg, tmpReg, scrReg);
1678  }
1679
1680  if (counters != NULL) {
1681    atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
1682  }
1683  if (EmitSync & 1) {
1684      // set box->dhw = markOopDesc::unused_mark()
1685      // Force all sync thru slow-path: slow_enter() and slow_exit()
1686      movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1687      cmpptr (rsp, (int32_t)NULL_WORD);
1688  } else {
1689    // Possible cases that we'll encounter in fast_lock
1690    // ------------------------------------------------
1691    // * Inflated
1692    //    -- unlocked
1693    //    -- Locked
1694    //       = by self
1695    //       = by other
1696    // * biased
1697    //    -- by Self
1698    //    -- by other
1699    // * neutral
1700    // * stack-locked
1701    //    -- by self
1702    //       = sp-proximity test hits
1703    //       = sp-proximity test generates false-negative
1704    //    -- by other
1705    //
1706
1707    Label IsInflated, DONE_LABEL;
1708
1709    // it's stack-locked, biased or neutral
1710    // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
1711    // order to reduce the number of conditional branches in the most common cases.
1712    // Beware -- there's a subtle invariant that fetch of the markword
1713    // at [FETCH], below, will never observe a biased encoding (*101b).
1714    // If this invariant is not held we risk exclusion (safety) failure.
1715    if (UseBiasedLocking && !UseOptoBiasInlining) {
1716      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
1717    }
1718
1719#if INCLUDE_RTM_OPT
1720    if (UseRTMForStackLocks && use_rtm) {
1721      rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
1722                        stack_rtm_counters, method_data, profile_rtm,
1723                        DONE_LABEL, IsInflated);
1724    }
1725#endif // INCLUDE_RTM_OPT
1726
1727    movptr(tmpReg, Address(objReg, 0));          // [FETCH]
1728    testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1729    jccb(Assembler::notZero, IsInflated);
1730
1731    // Attempt stack-locking ...
1732    orptr (tmpReg, markOopDesc::unlocked_value);
1733    movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
1734    if (os::is_MP()) {
1735      lock();
1736    }
1737    cmpxchgptr(boxReg, Address(objReg, 0));      // Updates tmpReg
1738    if (counters != NULL) {
1739      cond_inc32(Assembler::equal,
1740                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1741    }
1742    jcc(Assembler::equal, DONE_LABEL);           // Success
1743
1744    // Recursive locking.
1745    // The object is stack-locked: markword contains stack pointer to BasicLock.
1746    // Locked by current thread if difference with current SP is less than one page.
1747    subptr(tmpReg, rsp);
1748    // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
1749    andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1750    movptr(Address(boxReg, 0), tmpReg);
1751    if (counters != NULL) {
1752      cond_inc32(Assembler::equal,
1753                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1754    }
1755    jmp(DONE_LABEL);
1756
1757    bind(IsInflated);
1758    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
1759
1760#if INCLUDE_RTM_OPT
1761    // Use the same RTM locking code in 32- and 64-bit VM.
1762    if (use_rtm) {
1763      rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
1764                           rtm_counters, method_data, profile_rtm, DONE_LABEL);
1765    } else {
1766#endif // INCLUDE_RTM_OPT
1767
1768#ifndef _LP64
1769    // The object is inflated.
1770
1771    // boxReg refers to the on-stack BasicLock in the current frame.
1772    // We'd like to write:
1773    //   set box->_displaced_header = markOopDesc::unused_mark().  Any non-0 value suffices.
1774    // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
1775    // additional latency as we have another ST in the store buffer that must drain.
1776
1777    if (EmitSync & 8192) {
1778       movptr(Address(boxReg, 0), 3);            // results in ST-before-CAS penalty
1779       get_thread (scrReg);
1780       movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2]
1781       movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
1782       if (os::is_MP()) {
1783         lock();
1784       }
1785       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1786    } else
1787    if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
1788       // register juggle because we need tmpReg for cmpxchgptr below
1789       movptr(scrReg, boxReg);
1790       movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
1791
1792       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1793       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1794          // prefetchw [eax + Offset(_owner)-2]
1795          prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1796       }
1797
1798       if ((EmitSync & 64) == 0) {
1799         // Optimistic form: consider XORL tmpReg,tmpReg
1800         movptr(tmpReg, NULL_WORD);
1801       } else {
1802         // Can suffer RTS->RTO upgrades on shared or cold $ lines
1803         // Test-And-CAS instead of CAS
1804         movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
1805         testptr(tmpReg, tmpReg);                   // Locked ?
1806         jccb  (Assembler::notZero, DONE_LABEL);
1807       }
1808
1809       // Appears unlocked - try to swing _owner from null to non-null.
1810       // Ideally, I'd manifest "Self" with get_thread and then attempt
1811       // to CAS the register containing Self into m->Owner.
1812       // But we don't have enough registers, so instead we can either try to CAS
1813       // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
1814       // we later store "Self" into m->Owner.  Transiently storing a stack address
1815       // (rsp or the address of the box) into  m->owner is harmless.
1816       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1817       if (os::is_MP()) {
1818         lock();
1819       }
1820       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1821       movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
1822       // If we weren't able to swing _owner from NULL to the BasicLock
1823       // then take the slow path.
1824       jccb  (Assembler::notZero, DONE_LABEL);
1825       // update _owner from BasicLock to thread
1826       get_thread (scrReg);                    // beware: clobbers ICCs
1827       movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
1828       xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
1829
1830       // If the CAS fails we can either retry or pass control to the slow-path.
1831       // We use the latter tactic.
1832       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1833       // If the CAS was successful ...
1834       //   Self has acquired the lock
1835       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1836       // Intentional fall-through into DONE_LABEL ...
1837    } else {
1838       movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark()));  // results in ST-before-CAS penalty
1839       movptr(boxReg, tmpReg);
1840
1841       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1842       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1843          // prefetchw [eax + Offset(_owner)-2]
1844          prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1845       }
1846
1847       if ((EmitSync & 64) == 0) {
1848         // Optimistic form
1849         xorptr  (tmpReg, tmpReg);
1850       } else {
1851         // Can suffer RTS->RTO upgrades on shared or cold $ lines
1852         movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
1853         testptr(tmpReg, tmpReg);                   // Locked ?
1854         jccb  (Assembler::notZero, DONE_LABEL);
1855       }
1856
1857       // Appears unlocked - try to swing _owner from null to non-null.
1858       // Use either "Self" (in scr) or rsp as thread identity in _owner.
1859       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1860       get_thread (scrReg);
1861       if (os::is_MP()) {
1862         lock();
1863       }
1864       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1865
1866       // If the CAS fails we can either retry or pass control to the slow-path.
1867       // We use the latter tactic.
1868       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1869       // If the CAS was successful ...
1870       //   Self has acquired the lock
1871       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1872       // Intentional fall-through into DONE_LABEL ...
1873    }
1874#else // _LP64
1875    // It's inflated
1876    movq(scrReg, tmpReg);
1877    xorq(tmpReg, tmpReg);
1878
1879    if (os::is_MP()) {
1880      lock();
1881    }
1882    cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1883    // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
1884    // Without cast to int32_t movptr will destroy r10 which is typically obj.
1885    movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1886    // Intentional fall-through into DONE_LABEL ...
1887    // Propagate ICC.ZF from CAS above into DONE_LABEL.
1888#endif // _LP64
1889#if INCLUDE_RTM_OPT
1890    } // use_rtm()
1891#endif
1892    // DONE_LABEL is a hot target - we'd really like to place it at the
1893    // start of cache line by padding with NOPs.
1894    // See the AMD and Intel software optimization manuals for the
1895    // most efficient "long" NOP encodings.
1896    // Unfortunately none of our alignment mechanisms suffice.
1897    bind(DONE_LABEL);
1898
1899    // At DONE_LABEL the icc ZFlag is set as follows ...
1900    // Fast_Unlock uses the same protocol.
1901    // ZFlag == 1 -> Success
1902    // ZFlag == 0 -> Failure - force control through the slow-path
1903  }
1904}
1905
1906// obj: object to unlock
1907// box: box address (displaced header location), killed.  Must be EAX.
1908// tmp: killed, cannot be obj nor box.
1909//
1910// Some commentary on balanced locking:
1911//
1912// Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
1913// Methods that don't have provably balanced locking are forced to run in the
1914// interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
1915// The interpreter provides two properties:
1916// I1:  At return-time the interpreter automatically and quietly unlocks any
1917//      objects acquired the current activation (frame).  Recall that the
1918//      interpreter maintains an on-stack list of locks currently held by
1919//      a frame.
1920// I2:  If a method attempts to unlock an object that is not held by the
1921//      the frame the interpreter throws IMSX.
1922//
1923// Lets say A(), which has provably balanced locking, acquires O and then calls B().
1924// B() doesn't have provably balanced locking so it runs in the interpreter.
1925// Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
1926// is still locked by A().
1927//
1928// The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
1929// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
1930// should not be unlocked by "normal" java-level locking and vice-versa.  The specification
1931// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
1932// Arguably given that the spec legislates the JNI case as undefined our implementation
1933// could reasonably *avoid* checking owner in Fast_Unlock().
1934// In the interest of performance we elide m->Owner==Self check in unlock.
1935// A perfectly viable alternative is to elide the owner check except when
1936// Xcheck:jni is enabled.
1937
1938void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
1939  assert(boxReg == rax, "");
1940  assert_different_registers(objReg, boxReg, tmpReg);
1941
1942  if (EmitSync & 4) {
1943    // Disable - inhibit all inlining.  Force control through the slow-path
1944    cmpptr (rsp, 0);
1945  } else {
1946    Label DONE_LABEL, Stacked, CheckSucc;
1947
1948    // Critically, the biased locking test must have precedence over
1949    // and appear before the (box->dhw == 0) recursive stack-lock test.
1950    if (UseBiasedLocking && !UseOptoBiasInlining) {
1951       biased_locking_exit(objReg, tmpReg, DONE_LABEL);
1952    }
1953
1954#if INCLUDE_RTM_OPT
1955    if (UseRTMForStackLocks && use_rtm) {
1956      assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1957      Label L_regular_unlock;
1958      movptr(tmpReg, Address(objReg, 0));           // fetch markword
1959      andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1960      cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1961      jccb(Assembler::notEqual, L_regular_unlock);  // if !HLE RegularLock
1962      xend();                                       // otherwise end...
1963      jmp(DONE_LABEL);                              // ... and we're done
1964      bind(L_regular_unlock);
1965    }
1966#endif
1967
1968    cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
1969    jcc   (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
1970    movptr(tmpReg, Address(objReg, 0));             // Examine the object's markword
1971    testptr(tmpReg, markOopDesc::monitor_value);    // Inflated?
1972    jccb  (Assembler::zero, Stacked);
1973
1974    // It's inflated.
1975#if INCLUDE_RTM_OPT
1976    if (use_rtm) {
1977      Label L_regular_inflated_unlock;
1978      int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1979      movptr(boxReg, Address(tmpReg, owner_offset));
1980      testptr(boxReg, boxReg);
1981      jccb(Assembler::notZero, L_regular_inflated_unlock);
1982      xend();
1983      jmpb(DONE_LABEL);
1984      bind(L_regular_inflated_unlock);
1985    }
1986#endif
1987
1988    // Despite our balanced locking property we still check that m->_owner == Self
1989    // as java routines or native JNI code called by this thread might
1990    // have released the lock.
1991    // Refer to the comments in synchronizer.cpp for how we might encode extra
1992    // state in _succ so we can avoid fetching EntryList|cxq.
1993    //
1994    // I'd like to add more cases in fast_lock() and fast_unlock() --
1995    // such as recursive enter and exit -- but we have to be wary of
1996    // I$ bloat, T$ effects and BP$ effects.
1997    //
1998    // If there's no contention try a 1-0 exit.  That is, exit without
1999    // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
2000    // we detect and recover from the race that the 1-0 exit admits.
2001    //
2002    // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
2003    // before it STs null into _owner, releasing the lock.  Updates
2004    // to data protected by the critical section must be visible before
2005    // we drop the lock (and thus before any other thread could acquire
2006    // the lock and observe the fields protected by the lock).
2007    // IA32's memory-model is SPO, so STs are ordered with respect to
2008    // each other and there's no need for an explicit barrier (fence).
2009    // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
2010#ifndef _LP64
2011    get_thread (boxReg);
2012    if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
2013      // prefetchw [ebx + Offset(_owner)-2]
2014      prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2015    }
2016
2017    // Note that we could employ various encoding schemes to reduce
2018    // the number of loads below (currently 4) to just 2 or 3.
2019    // Refer to the comments in synchronizer.cpp.
2020    // In practice the chain of fetches doesn't seem to impact performance, however.
2021    xorptr(boxReg, boxReg);
2022    if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
2023       // Attempt to reduce branch density - AMD's branch predictor.
2024       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2025       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2026       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2027       jccb  (Assembler::notZero, DONE_LABEL);
2028       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2029       jmpb  (DONE_LABEL);
2030    } else {
2031       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2032       jccb  (Assembler::notZero, DONE_LABEL);
2033       movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2034       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2035       jccb  (Assembler::notZero, CheckSucc);
2036       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2037       jmpb  (DONE_LABEL);
2038    }
2039
2040    // The Following code fragment (EmitSync & 65536) improves the performance of
2041    // contended applications and contended synchronization microbenchmarks.
2042    // Unfortunately the emission of the code - even though not executed - causes regressions
2043    // in scimark and jetstream, evidently because of $ effects.  Replacing the code
2044    // with an equal number of never-executed NOPs results in the same regression.
2045    // We leave it off by default.
2046
2047    if ((EmitSync & 65536) != 0) {
2048       Label LSuccess, LGoSlowPath ;
2049
2050       bind  (CheckSucc);
2051
2052       // Optional pre-test ... it's safe to elide this
2053       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2054       jccb(Assembler::zero, LGoSlowPath);
2055
2056       // We have a classic Dekker-style idiom:
2057       //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
2058       // There are a number of ways to implement the barrier:
2059       // (1) lock:andl &m->_owner, 0
2060       //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
2061       //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
2062       //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
2063       // (2) If supported, an explicit MFENCE is appealing.
2064       //     In older IA32 processors MFENCE is slower than lock:add or xchg
2065       //     particularly if the write-buffer is full as might be the case if
2066       //     if stores closely precede the fence or fence-equivalent instruction.
2067       //     See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
2068       //     as the situation has changed with Nehalem and Shanghai.
2069       // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
2070       //     The $lines underlying the top-of-stack should be in M-state.
2071       //     The locked add instruction is serializing, of course.
2072       // (4) Use xchg, which is serializing
2073       //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
2074       // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
2075       //     The integer condition codes will tell us if succ was 0.
2076       //     Since _succ and _owner should reside in the same $line and
2077       //     we just stored into _owner, it's likely that the $line
2078       //     remains in M-state for the lock:orl.
2079       //
2080       // We currently use (3), although it's likely that switching to (2)
2081       // is correct for the future.
2082
2083       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2084       if (os::is_MP()) {
2085         lock(); addptr(Address(rsp, 0), 0);
2086       }
2087       // Ratify _succ remains non-null
2088       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0);
2089       jccb  (Assembler::notZero, LSuccess);
2090
2091       xorptr(boxReg, boxReg);                  // box is really EAX
2092       if (os::is_MP()) { lock(); }
2093       cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2094       // There's no successor so we tried to regrab the lock with the
2095       // placeholder value. If that didn't work, then another thread
2096       // grabbed the lock so we're done (and exit was a success).
2097       jccb  (Assembler::notEqual, LSuccess);
2098       // Since we're low on registers we installed rsp as a placeholding in _owner.
2099       // Now install Self over rsp.  This is safe as we're transitioning from
2100       // non-null to non=null
2101       get_thread (boxReg);
2102       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg);
2103       // Intentional fall-through into LGoSlowPath ...
2104
2105       bind  (LGoSlowPath);
2106       orptr(boxReg, 1);                      // set ICC.ZF=0 to indicate failure
2107       jmpb  (DONE_LABEL);
2108
2109       bind  (LSuccess);
2110       xorptr(boxReg, boxReg);                 // set ICC.ZF=1 to indicate success
2111       jmpb  (DONE_LABEL);
2112    }
2113
2114    bind (Stacked);
2115    // It's not inflated and it's not recursively stack-locked and it's not biased.
2116    // It must be stack-locked.
2117    // Try to reset the header to displaced header.
2118    // The "box" value on the stack is stable, so we can reload
2119    // and be assured we observe the same value as above.
2120    movptr(tmpReg, Address(boxReg, 0));
2121    if (os::is_MP()) {
2122      lock();
2123    }
2124    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2125    // Intention fall-thru into DONE_LABEL
2126
2127    // DONE_LABEL is a hot target - we'd really like to place it at the
2128    // start of cache line by padding with NOPs.
2129    // See the AMD and Intel software optimization manuals for the
2130    // most efficient "long" NOP encodings.
2131    // Unfortunately none of our alignment mechanisms suffice.
2132    if ((EmitSync & 65536) == 0) {
2133       bind (CheckSucc);
2134    }
2135#else // _LP64
2136    // It's inflated
2137    if (EmitSync & 1024) {
2138      // Emit code to check that _owner == Self
2139      // We could fold the _owner test into subsequent code more efficiently
2140      // than using a stand-alone check, but since _owner checking is off by
2141      // default we don't bother. We also might consider predicating the
2142      // _owner==Self check on Xcheck:jni or running on a debug build.
2143      movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2144      xorptr(boxReg, r15_thread);
2145    } else {
2146      xorptr(boxReg, boxReg);
2147    }
2148    orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2149    jccb  (Assembler::notZero, DONE_LABEL);
2150    movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2151    orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2152    jccb  (Assembler::notZero, CheckSucc);
2153    movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
2154    jmpb  (DONE_LABEL);
2155
2156    if ((EmitSync & 65536) == 0) {
2157      // Try to avoid passing control into the slow_path ...
2158      Label LSuccess, LGoSlowPath ;
2159      bind  (CheckSucc);
2160
2161      // The following optional optimization can be elided if necessary
2162      // Effectively: if (succ == null) goto SlowPath
2163      // The code reduces the window for a race, however,
2164      // and thus benefits performance.
2165      cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2166      jccb  (Assembler::zero, LGoSlowPath);
2167
2168      if ((EmitSync & 16) && os::is_MP()) {
2169        orptr(boxReg, boxReg);
2170        xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2171      } else {
2172        movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
2173        if (os::is_MP()) {
2174          // Memory barrier/fence
2175          // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
2176          // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
2177          // This is faster on Nehalem and AMD Shanghai/Barcelona.
2178          // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
2179          // We might also restructure (ST Owner=0;barrier;LD _Succ) to
2180          // (mov box,0; xchgq box, &m->Owner; LD _succ) .
2181          lock(); addl(Address(rsp, 0), 0);
2182        }
2183      }
2184      cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2185      jccb  (Assembler::notZero, LSuccess);
2186
2187      // Rare inopportune interleaving - race.
2188      // The successor vanished in the small window above.
2189      // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor.
2190      // We need to ensure progress and succession.
2191      // Try to reacquire the lock.
2192      // If that fails then the new owner is responsible for succession and this
2193      // thread needs to take no further action and can exit via the fast path (success).
2194      // If the re-acquire succeeds then pass control into the slow path.
2195      // As implemented, this latter mode is horrible because we generated more
2196      // coherence traffic on the lock *and* artifically extended the critical section
2197      // length while by virtue of passing control into the slow path.
2198
2199      // box is really RAX -- the following CMPXCHG depends on that binding
2200      // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
2201      movptr(boxReg, (int32_t)NULL_WORD);
2202      if (os::is_MP()) { lock(); }
2203      cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2204      // There's no successor so we tried to regrab the lock.
2205      // If that didn't work, then another thread grabbed the
2206      // lock so we're done (and exit was a success).
2207      jccb  (Assembler::notEqual, LSuccess);
2208      // Intentional fall-through into slow-path
2209
2210      bind  (LGoSlowPath);
2211      orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
2212      jmpb  (DONE_LABEL);
2213
2214      bind  (LSuccess);
2215      testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
2216      jmpb  (DONE_LABEL);
2217    }
2218
2219    bind  (Stacked);
2220    movptr(tmpReg, Address (boxReg, 0));      // re-fetch
2221    if (os::is_MP()) { lock(); }
2222    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2223
2224    if (EmitSync & 65536) {
2225       bind (CheckSucc);
2226    }
2227#endif
2228    bind(DONE_LABEL);
2229  }
2230}
2231#endif // COMPILER2
2232
2233void MacroAssembler::c2bool(Register x) {
2234  // implements x == 0 ? 0 : 1
2235  // note: must only look at least-significant byte of x
2236  //       since C-style booleans are stored in one byte
2237  //       only! (was bug)
2238  andl(x, 0xFF);
2239  setb(Assembler::notZero, x);
2240}
2241
2242// Wouldn't need if AddressLiteral version had new name
2243void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
2244  Assembler::call(L, rtype);
2245}
2246
2247void MacroAssembler::call(Register entry) {
2248  Assembler::call(entry);
2249}
2250
2251void MacroAssembler::call(AddressLiteral entry) {
2252  if (reachable(entry)) {
2253    Assembler::call_literal(entry.target(), entry.rspec());
2254  } else {
2255    lea(rscratch1, entry);
2256    Assembler::call(rscratch1);
2257  }
2258}
2259
2260void MacroAssembler::ic_call(address entry) {
2261  RelocationHolder rh = virtual_call_Relocation::spec(pc());
2262  movptr(rax, (intptr_t)Universe::non_oop_word());
2263  call(AddressLiteral(entry, rh));
2264}
2265
2266// Implementation of call_VM versions
2267
2268void MacroAssembler::call_VM(Register oop_result,
2269                             address entry_point,
2270                             bool check_exceptions) {
2271  Label C, E;
2272  call(C, relocInfo::none);
2273  jmp(E);
2274
2275  bind(C);
2276  call_VM_helper(oop_result, entry_point, 0, check_exceptions);
2277  ret(0);
2278
2279  bind(E);
2280}
2281
2282void MacroAssembler::call_VM(Register oop_result,
2283                             address entry_point,
2284                             Register arg_1,
2285                             bool check_exceptions) {
2286  Label C, E;
2287  call(C, relocInfo::none);
2288  jmp(E);
2289
2290  bind(C);
2291  pass_arg1(this, arg_1);
2292  call_VM_helper(oop_result, entry_point, 1, check_exceptions);
2293  ret(0);
2294
2295  bind(E);
2296}
2297
2298void MacroAssembler::call_VM(Register oop_result,
2299                             address entry_point,
2300                             Register arg_1,
2301                             Register arg_2,
2302                             bool check_exceptions) {
2303  Label C, E;
2304  call(C, relocInfo::none);
2305  jmp(E);
2306
2307  bind(C);
2308
2309  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2310
2311  pass_arg2(this, arg_2);
2312  pass_arg1(this, arg_1);
2313  call_VM_helper(oop_result, entry_point, 2, check_exceptions);
2314  ret(0);
2315
2316  bind(E);
2317}
2318
2319void MacroAssembler::call_VM(Register oop_result,
2320                             address entry_point,
2321                             Register arg_1,
2322                             Register arg_2,
2323                             Register arg_3,
2324                             bool check_exceptions) {
2325  Label C, E;
2326  call(C, relocInfo::none);
2327  jmp(E);
2328
2329  bind(C);
2330
2331  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2332  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2333  pass_arg3(this, arg_3);
2334
2335  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2336  pass_arg2(this, arg_2);
2337
2338  pass_arg1(this, arg_1);
2339  call_VM_helper(oop_result, entry_point, 3, check_exceptions);
2340  ret(0);
2341
2342  bind(E);
2343}
2344
2345void MacroAssembler::call_VM(Register oop_result,
2346                             Register last_java_sp,
2347                             address entry_point,
2348                             int number_of_arguments,
2349                             bool check_exceptions) {
2350  Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2351  call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2352}
2353
2354void MacroAssembler::call_VM(Register oop_result,
2355                             Register last_java_sp,
2356                             address entry_point,
2357                             Register arg_1,
2358                             bool check_exceptions) {
2359  pass_arg1(this, arg_1);
2360  call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2361}
2362
2363void MacroAssembler::call_VM(Register oop_result,
2364                             Register last_java_sp,
2365                             address entry_point,
2366                             Register arg_1,
2367                             Register arg_2,
2368                             bool check_exceptions) {
2369
2370  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2371  pass_arg2(this, arg_2);
2372  pass_arg1(this, arg_1);
2373  call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2374}
2375
2376void MacroAssembler::call_VM(Register oop_result,
2377                             Register last_java_sp,
2378                             address entry_point,
2379                             Register arg_1,
2380                             Register arg_2,
2381                             Register arg_3,
2382                             bool check_exceptions) {
2383  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2384  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2385  pass_arg3(this, arg_3);
2386  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2387  pass_arg2(this, arg_2);
2388  pass_arg1(this, arg_1);
2389  call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2390}
2391
2392void MacroAssembler::super_call_VM(Register oop_result,
2393                                   Register last_java_sp,
2394                                   address entry_point,
2395                                   int number_of_arguments,
2396                                   bool check_exceptions) {
2397  Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2398  MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2399}
2400
2401void MacroAssembler::super_call_VM(Register oop_result,
2402                                   Register last_java_sp,
2403                                   address entry_point,
2404                                   Register arg_1,
2405                                   bool check_exceptions) {
2406  pass_arg1(this, arg_1);
2407  super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2408}
2409
2410void MacroAssembler::super_call_VM(Register oop_result,
2411                                   Register last_java_sp,
2412                                   address entry_point,
2413                                   Register arg_1,
2414                                   Register arg_2,
2415                                   bool check_exceptions) {
2416
2417  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2418  pass_arg2(this, arg_2);
2419  pass_arg1(this, arg_1);
2420  super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2421}
2422
2423void MacroAssembler::super_call_VM(Register oop_result,
2424                                   Register last_java_sp,
2425                                   address entry_point,
2426                                   Register arg_1,
2427                                   Register arg_2,
2428                                   Register arg_3,
2429                                   bool check_exceptions) {
2430  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2431  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2432  pass_arg3(this, arg_3);
2433  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2434  pass_arg2(this, arg_2);
2435  pass_arg1(this, arg_1);
2436  super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2437}
2438
2439void MacroAssembler::call_VM_base(Register oop_result,
2440                                  Register java_thread,
2441                                  Register last_java_sp,
2442                                  address  entry_point,
2443                                  int      number_of_arguments,
2444                                  bool     check_exceptions) {
2445  // determine java_thread register
2446  if (!java_thread->is_valid()) {
2447#ifdef _LP64
2448    java_thread = r15_thread;
2449#else
2450    java_thread = rdi;
2451    get_thread(java_thread);
2452#endif // LP64
2453  }
2454  // determine last_java_sp register
2455  if (!last_java_sp->is_valid()) {
2456    last_java_sp = rsp;
2457  }
2458  // debugging support
2459  assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
2460  LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
2461#ifdef ASSERT
2462  // TraceBytecodes does not use r12 but saves it over the call, so don't verify
2463  // r12 is the heapbase.
2464  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
2465#endif // ASSERT
2466
2467  assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
2468  assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
2469
2470  // push java thread (becomes first argument of C function)
2471
2472  NOT_LP64(push(java_thread); number_of_arguments++);
2473  LP64_ONLY(mov(c_rarg0, r15_thread));
2474
2475  // set last Java frame before call
2476  assert(last_java_sp != rbp, "can't use ebp/rbp");
2477
2478  // Only interpreter should have to set fp
2479  set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
2480
2481  // do the call, remove parameters
2482  MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
2483
2484  // restore the thread (cannot use the pushed argument since arguments
2485  // may be overwritten by C code generated by an optimizing compiler);
2486  // however can use the register value directly if it is callee saved.
2487  if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
2488    // rdi & rsi (also r15) are callee saved -> nothing to do
2489#ifdef ASSERT
2490    guarantee(java_thread != rax, "change this code");
2491    push(rax);
2492    { Label L;
2493      get_thread(rax);
2494      cmpptr(java_thread, rax);
2495      jcc(Assembler::equal, L);
2496      STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
2497      bind(L);
2498    }
2499    pop(rax);
2500#endif
2501  } else {
2502    get_thread(java_thread);
2503  }
2504  // reset last Java frame
2505  // Only interpreter should have to clear fp
2506  reset_last_Java_frame(java_thread, true, false);
2507
2508#ifndef CC_INTERP
2509   // C++ interp handles this in the interpreter
2510  check_and_handle_popframe(java_thread);
2511  check_and_handle_earlyret(java_thread);
2512#endif /* CC_INTERP */
2513
2514  if (check_exceptions) {
2515    // check for pending exceptions (java_thread is set upon return)
2516    cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
2517#ifndef _LP64
2518    jump_cc(Assembler::notEqual,
2519            RuntimeAddress(StubRoutines::forward_exception_entry()));
2520#else
2521    // This used to conditionally jump to forward_exception however it is
2522    // possible if we relocate that the branch will not reach. So we must jump
2523    // around so we can always reach
2524
2525    Label ok;
2526    jcc(Assembler::equal, ok);
2527    jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2528    bind(ok);
2529#endif // LP64
2530  }
2531
2532  // get oop result if there is one and reset the value in the thread
2533  if (oop_result->is_valid()) {
2534    get_vm_result(oop_result, java_thread);
2535  }
2536}
2537
2538void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
2539
2540  // Calculate the value for last_Java_sp
2541  // somewhat subtle. call_VM does an intermediate call
2542  // which places a return address on the stack just under the
2543  // stack pointer as the user finsihed with it. This allows
2544  // use to retrieve last_Java_pc from last_Java_sp[-1].
2545  // On 32bit we then have to push additional args on the stack to accomplish
2546  // the actual requested call. On 64bit call_VM only can use register args
2547  // so the only extra space is the return address that call_VM created.
2548  // This hopefully explains the calculations here.
2549
2550#ifdef _LP64
2551  // We've pushed one address, correct last_Java_sp
2552  lea(rax, Address(rsp, wordSize));
2553#else
2554  lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
2555#endif // LP64
2556
2557  call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
2558
2559}
2560
2561void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2562  call_VM_leaf_base(entry_point, number_of_arguments);
2563}
2564
2565void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2566  pass_arg0(this, arg_0);
2567  call_VM_leaf(entry_point, 1);
2568}
2569
2570void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2571
2572  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2573  pass_arg1(this, arg_1);
2574  pass_arg0(this, arg_0);
2575  call_VM_leaf(entry_point, 2);
2576}
2577
2578void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2579  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2580  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2581  pass_arg2(this, arg_2);
2582  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2583  pass_arg1(this, arg_1);
2584  pass_arg0(this, arg_0);
2585  call_VM_leaf(entry_point, 3);
2586}
2587
2588void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2589  pass_arg0(this, arg_0);
2590  MacroAssembler::call_VM_leaf_base(entry_point, 1);
2591}
2592
2593void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2594
2595  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2596  pass_arg1(this, arg_1);
2597  pass_arg0(this, arg_0);
2598  MacroAssembler::call_VM_leaf_base(entry_point, 2);
2599}
2600
2601void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2602  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2603  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2604  pass_arg2(this, arg_2);
2605  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2606  pass_arg1(this, arg_1);
2607  pass_arg0(this, arg_0);
2608  MacroAssembler::call_VM_leaf_base(entry_point, 3);
2609}
2610
2611void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2612  LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
2613  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2614  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2615  pass_arg3(this, arg_3);
2616  LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2617  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2618  pass_arg2(this, arg_2);
2619  LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2620  pass_arg1(this, arg_1);
2621  pass_arg0(this, arg_0);
2622  MacroAssembler::call_VM_leaf_base(entry_point, 4);
2623}
2624
2625void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
2626  movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
2627  movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
2628  verify_oop(oop_result, "broken oop in call_VM_base");
2629}
2630
2631void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
2632  movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
2633  movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
2634}
2635
2636void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2637}
2638
2639void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2640}
2641
2642void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
2643  if (reachable(src1)) {
2644    cmpl(as_Address(src1), imm);
2645  } else {
2646    lea(rscratch1, src1);
2647    cmpl(Address(rscratch1, 0), imm);
2648  }
2649}
2650
2651void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
2652  assert(!src2.is_lval(), "use cmpptr");
2653  if (reachable(src2)) {
2654    cmpl(src1, as_Address(src2));
2655  } else {
2656    lea(rscratch1, src2);
2657    cmpl(src1, Address(rscratch1, 0));
2658  }
2659}
2660
2661void MacroAssembler::cmp32(Register src1, int32_t imm) {
2662  Assembler::cmpl(src1, imm);
2663}
2664
2665void MacroAssembler::cmp32(Register src1, Address src2) {
2666  Assembler::cmpl(src1, src2);
2667}
2668
2669void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2670  ucomisd(opr1, opr2);
2671
2672  Label L;
2673  if (unordered_is_less) {
2674    movl(dst, -1);
2675    jcc(Assembler::parity, L);
2676    jcc(Assembler::below , L);
2677    movl(dst, 0);
2678    jcc(Assembler::equal , L);
2679    increment(dst);
2680  } else { // unordered is greater
2681    movl(dst, 1);
2682    jcc(Assembler::parity, L);
2683    jcc(Assembler::above , L);
2684    movl(dst, 0);
2685    jcc(Assembler::equal , L);
2686    decrementl(dst);
2687  }
2688  bind(L);
2689}
2690
2691void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2692  ucomiss(opr1, opr2);
2693
2694  Label L;
2695  if (unordered_is_less) {
2696    movl(dst, -1);
2697    jcc(Assembler::parity, L);
2698    jcc(Assembler::below , L);
2699    movl(dst, 0);
2700    jcc(Assembler::equal , L);
2701    increment(dst);
2702  } else { // unordered is greater
2703    movl(dst, 1);
2704    jcc(Assembler::parity, L);
2705    jcc(Assembler::above , L);
2706    movl(dst, 0);
2707    jcc(Assembler::equal , L);
2708    decrementl(dst);
2709  }
2710  bind(L);
2711}
2712
2713
2714void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
2715  if (reachable(src1)) {
2716    cmpb(as_Address(src1), imm);
2717  } else {
2718    lea(rscratch1, src1);
2719    cmpb(Address(rscratch1, 0), imm);
2720  }
2721}
2722
2723void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
2724#ifdef _LP64
2725  if (src2.is_lval()) {
2726    movptr(rscratch1, src2);
2727    Assembler::cmpq(src1, rscratch1);
2728  } else if (reachable(src2)) {
2729    cmpq(src1, as_Address(src2));
2730  } else {
2731    lea(rscratch1, src2);
2732    Assembler::cmpq(src1, Address(rscratch1, 0));
2733  }
2734#else
2735  if (src2.is_lval()) {
2736    cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2737  } else {
2738    cmpl(src1, as_Address(src2));
2739  }
2740#endif // _LP64
2741}
2742
2743void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
2744  assert(src2.is_lval(), "not a mem-mem compare");
2745#ifdef _LP64
2746  // moves src2's literal address
2747  movptr(rscratch1, src2);
2748  Assembler::cmpq(src1, rscratch1);
2749#else
2750  cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2751#endif // _LP64
2752}
2753
2754void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
2755  if (reachable(adr)) {
2756    if (os::is_MP())
2757      lock();
2758    cmpxchgptr(reg, as_Address(adr));
2759  } else {
2760    lea(rscratch1, adr);
2761    if (os::is_MP())
2762      lock();
2763    cmpxchgptr(reg, Address(rscratch1, 0));
2764  }
2765}
2766
2767void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
2768  LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
2769}
2770
2771void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
2772  if (reachable(src)) {
2773    Assembler::comisd(dst, as_Address(src));
2774  } else {
2775    lea(rscratch1, src);
2776    Assembler::comisd(dst, Address(rscratch1, 0));
2777  }
2778}
2779
2780void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
2781  if (reachable(src)) {
2782    Assembler::comiss(dst, as_Address(src));
2783  } else {
2784    lea(rscratch1, src);
2785    Assembler::comiss(dst, Address(rscratch1, 0));
2786  }
2787}
2788
2789
2790void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
2791  Condition negated_cond = negate_condition(cond);
2792  Label L;
2793  jcc(negated_cond, L);
2794  pushf(); // Preserve flags
2795  atomic_incl(counter_addr);
2796  popf();
2797  bind(L);
2798}
2799
2800int MacroAssembler::corrected_idivl(Register reg) {
2801  // Full implementation of Java idiv and irem; checks for
2802  // special case as described in JVM spec., p.243 & p.271.
2803  // The function returns the (pc) offset of the idivl
2804  // instruction - may be needed for implicit exceptions.
2805  //
2806  //         normal case                           special case
2807  //
2808  // input : rax,: dividend                         min_int
2809  //         reg: divisor   (may not be rax,/rdx)   -1
2810  //
2811  // output: rax,: quotient  (= rax, idiv reg)       min_int
2812  //         rdx: remainder (= rax, irem reg)       0
2813  assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
2814  const int min_int = 0x80000000;
2815  Label normal_case, special_case;
2816
2817  // check for special case
2818  cmpl(rax, min_int);
2819  jcc(Assembler::notEqual, normal_case);
2820  xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
2821  cmpl(reg, -1);
2822  jcc(Assembler::equal, special_case);
2823
2824  // handle normal case
2825  bind(normal_case);
2826  cdql();
2827  int idivl_offset = offset();
2828  idivl(reg);
2829
2830  // normal and special case exit
2831  bind(special_case);
2832
2833  return idivl_offset;
2834}
2835
2836
2837
2838void MacroAssembler::decrementl(Register reg, int value) {
2839  if (value == min_jint) {subl(reg, value) ; return; }
2840  if (value <  0) { incrementl(reg, -value); return; }
2841  if (value == 0) {                        ; return; }
2842  if (value == 1 && UseIncDec) { decl(reg) ; return; }
2843  /* else */      { subl(reg, value)       ; return; }
2844}
2845
2846void MacroAssembler::decrementl(Address dst, int value) {
2847  if (value == min_jint) {subl(dst, value) ; return; }
2848  if (value <  0) { incrementl(dst, -value); return; }
2849  if (value == 0) {                        ; return; }
2850  if (value == 1 && UseIncDec) { decl(dst) ; return; }
2851  /* else */      { subl(dst, value)       ; return; }
2852}
2853
2854void MacroAssembler::division_with_shift (Register reg, int shift_value) {
2855  assert (shift_value > 0, "illegal shift value");
2856  Label _is_positive;
2857  testl (reg, reg);
2858  jcc (Assembler::positive, _is_positive);
2859  int offset = (1 << shift_value) - 1 ;
2860
2861  if (offset == 1) {
2862    incrementl(reg);
2863  } else {
2864    addl(reg, offset);
2865  }
2866
2867  bind (_is_positive);
2868  sarl(reg, shift_value);
2869}
2870
2871void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
2872  if (reachable(src)) {
2873    Assembler::divsd(dst, as_Address(src));
2874  } else {
2875    lea(rscratch1, src);
2876    Assembler::divsd(dst, Address(rscratch1, 0));
2877  }
2878}
2879
2880void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
2881  if (reachable(src)) {
2882    Assembler::divss(dst, as_Address(src));
2883  } else {
2884    lea(rscratch1, src);
2885    Assembler::divss(dst, Address(rscratch1, 0));
2886  }
2887}
2888
2889// !defined(COMPILER2) is because of stupid core builds
2890#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) || INCLUDE_JVMCI
2891void MacroAssembler::empty_FPU_stack() {
2892  if (VM_Version::supports_mmx()) {
2893    emms();
2894  } else {
2895    for (int i = 8; i-- > 0; ) ffree(i);
2896  }
2897}
2898#endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI
2899
2900
2901// Defines obj, preserves var_size_in_bytes
2902void MacroAssembler::eden_allocate(Register obj,
2903                                   Register var_size_in_bytes,
2904                                   int con_size_in_bytes,
2905                                   Register t1,
2906                                   Label& slow_case) {
2907  assert(obj == rax, "obj must be in rax, for cmpxchg");
2908  assert_different_registers(obj, var_size_in_bytes, t1);
2909  if (!Universe::heap()->supports_inline_contig_alloc()) {
2910    jmp(slow_case);
2911  } else {
2912    Register end = t1;
2913    Label retry;
2914    bind(retry);
2915    ExternalAddress heap_top((address) Universe::heap()->top_addr());
2916    movptr(obj, heap_top);
2917    if (var_size_in_bytes == noreg) {
2918      lea(end, Address(obj, con_size_in_bytes));
2919    } else {
2920      lea(end, Address(obj, var_size_in_bytes, Address::times_1));
2921    }
2922    // if end < obj then we wrapped around => object too long => slow case
2923    cmpptr(end, obj);
2924    jcc(Assembler::below, slow_case);
2925    cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
2926    jcc(Assembler::above, slow_case);
2927    // Compare obj with the top addr, and if still equal, store the new top addr in
2928    // end at the address of the top addr pointer. Sets ZF if was equal, and clears
2929    // it otherwise. Use lock prefix for atomicity on MPs.
2930    locked_cmpxchgptr(end, heap_top);
2931    jcc(Assembler::notEqual, retry);
2932  }
2933}
2934
2935void MacroAssembler::enter() {
2936  push(rbp);
2937  mov(rbp, rsp);
2938}
2939
2940// A 5 byte nop that is safe for patching (see patch_verified_entry)
2941void MacroAssembler::fat_nop() {
2942  if (UseAddressNop) {
2943    addr_nop_5();
2944  } else {
2945    emit_int8(0x26); // es:
2946    emit_int8(0x2e); // cs:
2947    emit_int8(0x64); // fs:
2948    emit_int8(0x65); // gs:
2949    emit_int8((unsigned char)0x90);
2950  }
2951}
2952
2953void MacroAssembler::fcmp(Register tmp) {
2954  fcmp(tmp, 1, true, true);
2955}
2956
2957void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
2958  assert(!pop_right || pop_left, "usage error");
2959  if (VM_Version::supports_cmov()) {
2960    assert(tmp == noreg, "unneeded temp");
2961    if (pop_left) {
2962      fucomip(index);
2963    } else {
2964      fucomi(index);
2965    }
2966    if (pop_right) {
2967      fpop();
2968    }
2969  } else {
2970    assert(tmp != noreg, "need temp");
2971    if (pop_left) {
2972      if (pop_right) {
2973        fcompp();
2974      } else {
2975        fcomp(index);
2976      }
2977    } else {
2978      fcom(index);
2979    }
2980    // convert FPU condition into eflags condition via rax,
2981    save_rax(tmp);
2982    fwait(); fnstsw_ax();
2983    sahf();
2984    restore_rax(tmp);
2985  }
2986  // condition codes set as follows:
2987  //
2988  // CF (corresponds to C0) if x < y
2989  // PF (corresponds to C2) if unordered
2990  // ZF (corresponds to C3) if x = y
2991}
2992
2993void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
2994  fcmp2int(dst, unordered_is_less, 1, true, true);
2995}
2996
2997void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
2998  fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
2999  Label L;
3000  if (unordered_is_less) {
3001    movl(dst, -1);
3002    jcc(Assembler::parity, L);
3003    jcc(Assembler::below , L);
3004    movl(dst, 0);
3005    jcc(Assembler::equal , L);
3006    increment(dst);
3007  } else { // unordered is greater
3008    movl(dst, 1);
3009    jcc(Assembler::parity, L);
3010    jcc(Assembler::above , L);
3011    movl(dst, 0);
3012    jcc(Assembler::equal , L);
3013    decrementl(dst);
3014  }
3015  bind(L);
3016}
3017
3018void MacroAssembler::fld_d(AddressLiteral src) {
3019  fld_d(as_Address(src));
3020}
3021
3022void MacroAssembler::fld_s(AddressLiteral src) {
3023  fld_s(as_Address(src));
3024}
3025
3026void MacroAssembler::fld_x(AddressLiteral src) {
3027  Assembler::fld_x(as_Address(src));
3028}
3029
3030void MacroAssembler::fldcw(AddressLiteral src) {
3031  Assembler::fldcw(as_Address(src));
3032}
3033
3034void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src) {
3035  if (reachable(src)) {
3036    Assembler::mulpd(dst, as_Address(src));
3037  } else {
3038    lea(rscratch1, src);
3039    Assembler::mulpd(dst, Address(rscratch1, 0));
3040  }
3041}
3042
3043void MacroAssembler::pow_exp_core_encoding() {
3044  // kills rax, rcx, rdx
3045  subptr(rsp,sizeof(jdouble));
3046  // computes 2^X. Stack: X ...
3047  // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
3048  // keep it on the thread's stack to compute 2^int(X) later
3049  // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
3050  // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
3051  fld_s(0);                 // Stack: X X ...
3052  frndint();                // Stack: int(X) X ...
3053  fsuba(1);                 // Stack: int(X) X-int(X) ...
3054  fistp_s(Address(rsp,0));  // move int(X) as integer to thread's stack. Stack: X-int(X) ...
3055  f2xm1();                  // Stack: 2^(X-int(X))-1 ...
3056  fld1();                   // Stack: 1 2^(X-int(X))-1 ...
3057  faddp(1);                 // Stack: 2^(X-int(X))
3058  // computes 2^(int(X)): add exponent bias (1023) to int(X), then
3059  // shift int(X)+1023 to exponent position.
3060  // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
3061  // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
3062  // values so detect them and set result to NaN.
3063  movl(rax,Address(rsp,0));
3064  movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
3065  addl(rax, 1023);
3066  movl(rdx,rax);
3067  shll(rax,20);
3068  // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
3069  addl(rdx,1);
3070  // Check that 1 < int(X)+1023+1 < 2048
3071  // in 3 steps:
3072  // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
3073  // 2- (int(X)+1023+1)&-2048 != 0
3074  // 3- (int(X)+1023+1)&-2048 != 1
3075  // Do 2- first because addl just updated the flags.
3076  cmov32(Assembler::equal,rax,rcx);
3077  cmpl(rdx,1);
3078  cmov32(Assembler::equal,rax,rcx);
3079  testl(rdx,rcx);
3080  cmov32(Assembler::notEqual,rax,rcx);
3081  movl(Address(rsp,4),rax);
3082  movl(Address(rsp,0),0);
3083  fmul_d(Address(rsp,0));   // Stack: 2^X ...
3084  addptr(rsp,sizeof(jdouble));
3085}
3086
3087void MacroAssembler::increase_precision() {
3088  subptr(rsp, BytesPerWord);
3089  fnstcw(Address(rsp, 0));
3090  movl(rax, Address(rsp, 0));
3091  orl(rax, 0x300);
3092  push(rax);
3093  fldcw(Address(rsp, 0));
3094  pop(rax);
3095}
3096
3097void MacroAssembler::restore_precision() {
3098  fldcw(Address(rsp, 0));
3099  addptr(rsp, BytesPerWord);
3100}
3101
3102void MacroAssembler::fast_pow() {
3103  // computes X^Y = 2^(Y * log2(X))
3104  // if fast computation is not possible, result is NaN. Requires
3105  // fallback from user of this macro.
3106  // increase precision for intermediate steps of the computation
3107  BLOCK_COMMENT("fast_pow {");
3108  increase_precision();
3109  fyl2x();                 // Stack: (Y*log2(X)) ...
3110  pow_exp_core_encoding(); // Stack: exp(X) ...
3111  restore_precision();
3112  BLOCK_COMMENT("} fast_pow");
3113}
3114
3115void MacroAssembler::pow_or_exp(int num_fpu_regs_in_use) {
3116  // kills rax, rcx, rdx
3117  // pow and exp needs 2 extra registers on the fpu stack.
3118  Label slow_case, done;
3119  Register tmp = noreg;
3120  if (!VM_Version::supports_cmov()) {
3121    // fcmp needs a temporary so preserve rdx,
3122    tmp = rdx;
3123  }
3124  Register tmp2 = rax;
3125  Register tmp3 = rcx;
3126
3127  // Stack: X Y
3128  Label x_negative, y_not_2;
3129
3130  static double two = 2.0;
3131  ExternalAddress two_addr((address)&two);
3132
3133  // constant maybe too far on 64 bit
3134  lea(tmp2, two_addr);
3135  fld_d(Address(tmp2, 0));    // Stack: 2 X Y
3136  fcmp(tmp, 2, true, false);  // Stack: X Y
3137  jcc(Assembler::parity, y_not_2);
3138  jcc(Assembler::notEqual, y_not_2);
3139
3140  fxch(); fpop();             // Stack: X
3141  fmul(0);                    // Stack: X*X
3142
3143  jmp(done);
3144
3145  bind(y_not_2);
3146
3147  fldz();                     // Stack: 0 X Y
3148  fcmp(tmp, 1, true, false);  // Stack: X Y
3149  jcc(Assembler::above, x_negative);
3150
3151  // X >= 0
3152
3153  fld_s(1);                   // duplicate arguments for runtime call. Stack: Y X Y
3154  fld_s(1);                   // Stack: X Y X Y
3155  fast_pow();                 // Stack: X^Y X Y
3156  fcmp(tmp, 0, false, false); // Stack: X^Y X Y
3157  // X^Y not equal to itself: X^Y is NaN go to slow case.
3158  jcc(Assembler::parity, slow_case);
3159  // get rid of duplicate arguments. Stack: X^Y
3160  if (num_fpu_regs_in_use > 0) {
3161    fxch(); fpop();
3162    fxch(); fpop();
3163  } else {
3164    ffree(2);
3165    ffree(1);
3166  }
3167  jmp(done);
3168
3169  // X <= 0
3170  bind(x_negative);
3171
3172  fld_s(1);                   // Stack: Y X Y
3173  frndint();                  // Stack: int(Y) X Y
3174  fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
3175  jcc(Assembler::notEqual, slow_case);
3176
3177  subptr(rsp, 8);
3178
3179  // For X^Y, when X < 0, Y has to be an integer and the final
3180  // result depends on whether it's odd or even. We just checked
3181  // that int(Y) == Y.  We move int(Y) to gp registers as a 64 bit
3182  // integer to test its parity. If int(Y) is huge and doesn't fit
3183  // in the 64 bit integer range, the integer indefinite value will
3184  // end up in the gp registers. Huge numbers are all even, the
3185  // integer indefinite number is even so it's fine.
3186
3187#ifdef ASSERT
3188  // Let's check we don't end up with an integer indefinite number
3189  // when not expected. First test for huge numbers: check whether
3190  // int(Y)+1 == int(Y) which is true for very large numbers and
3191  // those are all even. A 64 bit integer is guaranteed to not
3192  // overflow for numbers where y+1 != y (when precision is set to
3193  // double precision).
3194  Label y_not_huge;
3195
3196  fld1();                     // Stack: 1 int(Y) X Y
3197  fadd(1);                    // Stack: 1+int(Y) int(Y) X Y
3198
3199#ifdef _LP64
3200  // trip to memory to force the precision down from double extended
3201  // precision
3202  fstp_d(Address(rsp, 0));
3203  fld_d(Address(rsp, 0));
3204#endif
3205
3206  fcmp(tmp, 1, true, false);  // Stack: int(Y) X Y
3207#endif
3208
3209  // move int(Y) as 64 bit integer to thread's stack
3210  fistp_d(Address(rsp,0));    // Stack: X Y
3211
3212#ifdef ASSERT
3213  jcc(Assembler::notEqual, y_not_huge);
3214
3215  // Y is huge so we know it's even. It may not fit in a 64 bit
3216  // integer and we don't want the debug code below to see the
3217  // integer indefinite value so overwrite int(Y) on the thread's
3218  // stack with 0.
3219  movl(Address(rsp, 0), 0);
3220  movl(Address(rsp, 4), 0);
3221
3222  bind(y_not_huge);
3223#endif
3224
3225  fld_s(1);                   // duplicate arguments for runtime call. Stack: Y X Y
3226  fld_s(1);                   // Stack: X Y X Y
3227  fabs();                     // Stack: abs(X) Y X Y
3228  fast_pow();                 // Stack: abs(X)^Y X Y
3229  fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
3230  // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
3231
3232  pop(tmp2);
3233  NOT_LP64(pop(tmp3));
3234  jcc(Assembler::parity, slow_case);
3235
3236#ifdef ASSERT
3237  // Check that int(Y) is not integer indefinite value (int
3238  // overflow). Shouldn't happen because for values that would
3239  // overflow, 1+int(Y)==Y which was tested earlier.
3240#ifndef _LP64
3241  {
3242    Label integer;
3243    testl(tmp2, tmp2);
3244    jcc(Assembler::notZero, integer);
3245    cmpl(tmp3, 0x80000000);
3246    jcc(Assembler::notZero, integer);
3247    STOP("integer indefinite value shouldn't be seen here");
3248    bind(integer);
3249  }
3250#else
3251  {
3252    Label integer;
3253    mov(tmp3, tmp2); // preserve tmp2 for parity check below
3254    shlq(tmp3, 1);
3255    jcc(Assembler::carryClear, integer);
3256    jcc(Assembler::notZero, integer);
3257    STOP("integer indefinite value shouldn't be seen here");
3258    bind(integer);
3259  }
3260#endif
3261#endif
3262
3263  // get rid of duplicate arguments. Stack: X^Y
3264  if (num_fpu_regs_in_use > 0) {
3265    fxch(); fpop();
3266    fxch(); fpop();
3267  } else {
3268    ffree(2);
3269    ffree(1);
3270  }
3271
3272  testl(tmp2, 1);
3273  jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
3274  // X <= 0, Y even: X^Y = -abs(X)^Y
3275
3276  fchs();                     // Stack: -abs(X)^Y Y
3277  jmp(done);
3278
3279  // slow case: runtime call
3280  bind(slow_case);
3281
3282  fpop();                       // pop incorrect result or int(Y)
3283
3284  fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), 2, num_fpu_regs_in_use);
3285
3286  // Come here with result in F-TOS
3287  bind(done);
3288}
3289
3290void MacroAssembler::fpop() {
3291  ffree();
3292  fincstp();
3293}
3294
3295void MacroAssembler::load_float(Address src) {
3296  if (UseSSE >= 1) {
3297    movflt(xmm0, src);
3298  } else {
3299    LP64_ONLY(ShouldNotReachHere());
3300    NOT_LP64(fld_s(src));
3301  }
3302}
3303
3304void MacroAssembler::store_float(Address dst) {
3305  if (UseSSE >= 1) {
3306    movflt(dst, xmm0);
3307  } else {
3308    LP64_ONLY(ShouldNotReachHere());
3309    NOT_LP64(fstp_s(dst));
3310  }
3311}
3312
3313void MacroAssembler::load_double(Address src) {
3314  if (UseSSE >= 2) {
3315    movdbl(xmm0, src);
3316  } else {
3317    LP64_ONLY(ShouldNotReachHere());
3318    NOT_LP64(fld_d(src));
3319  }
3320}
3321
3322void MacroAssembler::store_double(Address dst) {
3323  if (UseSSE >= 2) {
3324    movdbl(dst, xmm0);
3325  } else {
3326    LP64_ONLY(ShouldNotReachHere());
3327    NOT_LP64(fstp_d(dst));
3328  }
3329}
3330
3331void MacroAssembler::fremr(Register tmp) {
3332  save_rax(tmp);
3333  { Label L;
3334    bind(L);
3335    fprem();
3336    fwait(); fnstsw_ax();
3337#ifdef _LP64
3338    testl(rax, 0x400);
3339    jcc(Assembler::notEqual, L);
3340#else
3341    sahf();
3342    jcc(Assembler::parity, L);
3343#endif // _LP64
3344  }
3345  restore_rax(tmp);
3346  // Result is in ST0.
3347  // Note: fxch & fpop to get rid of ST1
3348  // (otherwise FPU stack could overflow eventually)
3349  fxch(1);
3350  fpop();
3351}
3352
3353
3354void MacroAssembler::incrementl(AddressLiteral dst) {
3355  if (reachable(dst)) {
3356    incrementl(as_Address(dst));
3357  } else {
3358    lea(rscratch1, dst);
3359    incrementl(Address(rscratch1, 0));
3360  }
3361}
3362
3363void MacroAssembler::incrementl(ArrayAddress dst) {
3364  incrementl(as_Address(dst));
3365}
3366
3367void MacroAssembler::incrementl(Register reg, int value) {
3368  if (value == min_jint) {addl(reg, value) ; return; }
3369  if (value <  0) { decrementl(reg, -value); return; }
3370  if (value == 0) {                        ; return; }
3371  if (value == 1 && UseIncDec) { incl(reg) ; return; }
3372  /* else */      { addl(reg, value)       ; return; }
3373}
3374
3375void MacroAssembler::incrementl(Address dst, int value) {
3376  if (value == min_jint) {addl(dst, value) ; return; }
3377  if (value <  0) { decrementl(dst, -value); return; }
3378  if (value == 0) {                        ; return; }
3379  if (value == 1 && UseIncDec) { incl(dst) ; return; }
3380  /* else */      { addl(dst, value)       ; return; }
3381}
3382
3383void MacroAssembler::jump(AddressLiteral dst) {
3384  if (reachable(dst)) {
3385    jmp_literal(dst.target(), dst.rspec());
3386  } else {
3387    lea(rscratch1, dst);
3388    jmp(rscratch1);
3389  }
3390}
3391
3392void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
3393  if (reachable(dst)) {
3394    InstructionMark im(this);
3395    relocate(dst.reloc());
3396    const int short_size = 2;
3397    const int long_size = 6;
3398    int offs = (intptr_t)dst.target() - ((intptr_t)pc());
3399    if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
3400      // 0111 tttn #8-bit disp
3401      emit_int8(0x70 | cc);
3402      emit_int8((offs - short_size) & 0xFF);
3403    } else {
3404      // 0000 1111 1000 tttn #32-bit disp
3405      emit_int8(0x0F);
3406      emit_int8((unsigned char)(0x80 | cc));
3407      emit_int32(offs - long_size);
3408    }
3409  } else {
3410#ifdef ASSERT
3411    warning("reversing conditional branch");
3412#endif /* ASSERT */
3413    Label skip;
3414    jccb(reverse[cc], skip);
3415    lea(rscratch1, dst);
3416    Assembler::jmp(rscratch1);
3417    bind(skip);
3418  }
3419}
3420
3421void MacroAssembler::ldmxcsr(AddressLiteral src) {
3422  if (reachable(src)) {
3423    Assembler::ldmxcsr(as_Address(src));
3424  } else {
3425    lea(rscratch1, src);
3426    Assembler::ldmxcsr(Address(rscratch1, 0));
3427  }
3428}
3429
3430int MacroAssembler::load_signed_byte(Register dst, Address src) {
3431  int off;
3432  if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3433    off = offset();
3434    movsbl(dst, src); // movsxb
3435  } else {
3436    off = load_unsigned_byte(dst, src);
3437    shll(dst, 24);
3438    sarl(dst, 24);
3439  }
3440  return off;
3441}
3442
3443// Note: load_signed_short used to be called load_signed_word.
3444// Although the 'w' in x86 opcodes refers to the term "word" in the assembler
3445// manual, which means 16 bits, that usage is found nowhere in HotSpot code.
3446// The term "word" in HotSpot means a 32- or 64-bit machine word.
3447int MacroAssembler::load_signed_short(Register dst, Address src) {
3448  int off;
3449  if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3450    // This is dubious to me since it seems safe to do a signed 16 => 64 bit
3451    // version but this is what 64bit has always done. This seems to imply
3452    // that users are only using 32bits worth.
3453    off = offset();
3454    movswl(dst, src); // movsxw
3455  } else {
3456    off = load_unsigned_short(dst, src);
3457    shll(dst, 16);
3458    sarl(dst, 16);
3459  }
3460  return off;
3461}
3462
3463int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
3464  // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3465  // and "3.9 Partial Register Penalties", p. 22).
3466  int off;
3467  if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
3468    off = offset();
3469    movzbl(dst, src); // movzxb
3470  } else {
3471    xorl(dst, dst);
3472    off = offset();
3473    movb(dst, src);
3474  }
3475  return off;
3476}
3477
3478// Note: load_unsigned_short used to be called load_unsigned_word.
3479int MacroAssembler::load_unsigned_short(Register dst, Address src) {
3480  // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3481  // and "3.9 Partial Register Penalties", p. 22).
3482  int off;
3483  if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
3484    off = offset();
3485    movzwl(dst, src); // movzxw
3486  } else {
3487    xorl(dst, dst);
3488    off = offset();
3489    movw(dst, src);
3490  }
3491  return off;
3492}
3493
3494void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3495  switch (size_in_bytes) {
3496#ifndef _LP64
3497  case  8:
3498    assert(dst2 != noreg, "second dest register required");
3499    movl(dst,  src);
3500    movl(dst2, src.plus_disp(BytesPerInt));
3501    break;
3502#else
3503  case  8:  movq(dst, src); break;
3504#endif
3505  case  4:  movl(dst, src); break;
3506  case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
3507  case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
3508  default:  ShouldNotReachHere();
3509  }
3510}
3511
3512void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
3513  switch (size_in_bytes) {
3514#ifndef _LP64
3515  case  8:
3516    assert(src2 != noreg, "second source register required");
3517    movl(dst,                        src);
3518    movl(dst.plus_disp(BytesPerInt), src2);
3519    break;
3520#else
3521  case  8:  movq(dst, src); break;
3522#endif
3523  case  4:  movl(dst, src); break;
3524  case  2:  movw(dst, src); break;
3525  case  1:  movb(dst, src); break;
3526  default:  ShouldNotReachHere();
3527  }
3528}
3529
3530void MacroAssembler::mov32(AddressLiteral dst, Register src) {
3531  if (reachable(dst)) {
3532    movl(as_Address(dst), src);
3533  } else {
3534    lea(rscratch1, dst);
3535    movl(Address(rscratch1, 0), src);
3536  }
3537}
3538
3539void MacroAssembler::mov32(Register dst, AddressLiteral src) {
3540  if (reachable(src)) {
3541    movl(dst, as_Address(src));
3542  } else {
3543    lea(rscratch1, src);
3544    movl(dst, Address(rscratch1, 0));
3545  }
3546}
3547
3548// C++ bool manipulation
3549
3550void MacroAssembler::movbool(Register dst, Address src) {
3551  if(sizeof(bool) == 1)
3552    movb(dst, src);
3553  else if(sizeof(bool) == 2)
3554    movw(dst, src);
3555  else if(sizeof(bool) == 4)
3556    movl(dst, src);
3557  else
3558    // unsupported
3559    ShouldNotReachHere();
3560}
3561
3562void MacroAssembler::movbool(Address dst, bool boolconst) {
3563  if(sizeof(bool) == 1)
3564    movb(dst, (int) boolconst);
3565  else if(sizeof(bool) == 2)
3566    movw(dst, (int) boolconst);
3567  else if(sizeof(bool) == 4)
3568    movl(dst, (int) boolconst);
3569  else
3570    // unsupported
3571    ShouldNotReachHere();
3572}
3573
3574void MacroAssembler::movbool(Address dst, Register src) {
3575  if(sizeof(bool) == 1)
3576    movb(dst, src);
3577  else if(sizeof(bool) == 2)
3578    movw(dst, src);
3579  else if(sizeof(bool) == 4)
3580    movl(dst, src);
3581  else
3582    // unsupported
3583    ShouldNotReachHere();
3584}
3585
3586void MacroAssembler::movbyte(ArrayAddress dst, int src) {
3587  movb(as_Address(dst), src);
3588}
3589
3590void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
3591  if (reachable(src)) {
3592    movdl(dst, as_Address(src));
3593  } else {
3594    lea(rscratch1, src);
3595    movdl(dst, Address(rscratch1, 0));
3596  }
3597}
3598
3599void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
3600  if (reachable(src)) {
3601    movq(dst, as_Address(src));
3602  } else {
3603    lea(rscratch1, src);
3604    movq(dst, Address(rscratch1, 0));
3605  }
3606}
3607
3608void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
3609  if (reachable(src)) {
3610    if (UseXmmLoadAndClearUpper) {
3611      movsd (dst, as_Address(src));
3612    } else {
3613      movlpd(dst, as_Address(src));
3614    }
3615  } else {
3616    lea(rscratch1, src);
3617    if (UseXmmLoadAndClearUpper) {
3618      movsd (dst, Address(rscratch1, 0));
3619    } else {
3620      movlpd(dst, Address(rscratch1, 0));
3621    }
3622  }
3623}
3624
3625void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
3626  if (reachable(src)) {
3627    movss(dst, as_Address(src));
3628  } else {
3629    lea(rscratch1, src);
3630    movss(dst, Address(rscratch1, 0));
3631  }
3632}
3633
3634void MacroAssembler::movptr(Register dst, Register src) {
3635  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3636}
3637
3638void MacroAssembler::movptr(Register dst, Address src) {
3639  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3640}
3641
3642// src should NEVER be a real pointer. Use AddressLiteral for true pointers
3643void MacroAssembler::movptr(Register dst, intptr_t src) {
3644  LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
3645}
3646
3647void MacroAssembler::movptr(Address dst, Register src) {
3648  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3649}
3650
3651void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
3652  if (reachable(src)) {
3653    Assembler::movdqu(dst, as_Address(src));
3654  } else {
3655    lea(rscratch1, src);
3656    Assembler::movdqu(dst, Address(rscratch1, 0));
3657  }
3658}
3659
3660void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
3661  if (reachable(src)) {
3662    Assembler::movdqa(dst, as_Address(src));
3663  } else {
3664    lea(rscratch1, src);
3665    Assembler::movdqa(dst, Address(rscratch1, 0));
3666  }
3667}
3668
3669void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
3670  if (reachable(src)) {
3671    Assembler::movsd(dst, as_Address(src));
3672  } else {
3673    lea(rscratch1, src);
3674    Assembler::movsd(dst, Address(rscratch1, 0));
3675  }
3676}
3677
3678void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
3679  if (reachable(src)) {
3680    Assembler::movss(dst, as_Address(src));
3681  } else {
3682    lea(rscratch1, src);
3683    Assembler::movss(dst, Address(rscratch1, 0));
3684  }
3685}
3686
3687void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
3688  if (reachable(src)) {
3689    Assembler::mulsd(dst, as_Address(src));
3690  } else {
3691    lea(rscratch1, src);
3692    Assembler::mulsd(dst, Address(rscratch1, 0));
3693  }
3694}
3695
3696void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
3697  if (reachable(src)) {
3698    Assembler::mulss(dst, as_Address(src));
3699  } else {
3700    lea(rscratch1, src);
3701    Assembler::mulss(dst, Address(rscratch1, 0));
3702  }
3703}
3704
3705void MacroAssembler::null_check(Register reg, int offset) {
3706  if (needs_explicit_null_check(offset)) {
3707    // provoke OS NULL exception if reg = NULL by
3708    // accessing M[reg] w/o changing any (non-CC) registers
3709    // NOTE: cmpl is plenty here to provoke a segv
3710    cmpptr(rax, Address(reg, 0));
3711    // Note: should probably use testl(rax, Address(reg, 0));
3712    //       may be shorter code (however, this version of
3713    //       testl needs to be implemented first)
3714  } else {
3715    // nothing to do, (later) access of M[reg + offset]
3716    // will provoke OS NULL exception if reg = NULL
3717  }
3718}
3719
3720void MacroAssembler::os_breakpoint() {
3721  // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3722  // (e.g., MSVC can't call ps() otherwise)
3723  call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3724}
3725
3726void MacroAssembler::pop_CPU_state() {
3727  pop_FPU_state();
3728  pop_IU_state();
3729}
3730
3731void MacroAssembler::pop_FPU_state() {
3732#ifndef _LP64
3733  frstor(Address(rsp, 0));
3734#else
3735  // AVX will continue to use the fxsave area.
3736  // EVEX needs to utilize the xsave area, which is under different
3737  // management.
3738  if(VM_Version::supports_evex()) {
3739    // EDX:EAX describe the XSAVE header and
3740    // are obtained while fetching info for XCR0 via cpuid.
3741    // These two registers make up 64-bits in the header for which bits
3742    // 62:10 are currently reserved for future implementations and unused.  Bit 63
3743    // is unused for our implementation as we do not utilize
3744    // compressed XSAVE areas.  Bits 9..8 are currently ignored as we do not use
3745    // the functionality for PKRU state and MSR tracing.
3746    // Ergo we are primarily concerned with bits 7..0, which define
3747    // which ISA extensions and features are enabled for a given machine and are
3748    // defined in XemXcr0Eax and is used to map the XSAVE area
3749    // for restoring registers as described via XCR0.
3750    movl(rdx,VM_Version::get_xsave_header_upper_segment());
3751    movl(rax,VM_Version::get_xsave_header_lower_segment());
3752    xrstor(Address(rsp, 0));
3753  } else {
3754    fxrstor(Address(rsp, 0));
3755  }
3756#endif
3757  addptr(rsp, FPUStateSizeInWords * wordSize);
3758}
3759
3760void MacroAssembler::pop_IU_state() {
3761  popa();
3762  LP64_ONLY(addq(rsp, 8));
3763  popf();
3764}
3765
3766// Save Integer and Float state
3767// Warning: Stack must be 16 byte aligned (64bit)
3768void MacroAssembler::push_CPU_state() {
3769  push_IU_state();
3770  push_FPU_state();
3771}
3772
3773#ifdef _LP64
3774#define XSTATE_BV 0x200
3775#endif
3776
3777void MacroAssembler::push_FPU_state() {
3778  subptr(rsp, FPUStateSizeInWords * wordSize);
3779#ifndef _LP64
3780  fnsave(Address(rsp, 0));
3781  fwait();
3782#else
3783  // AVX will continue to use the fxsave area.
3784  // EVEX needs to utilize the xsave area, which is under different
3785  // management.
3786  if(VM_Version::supports_evex()) {
3787    // Save a copy of EAX and EDX
3788    push(rax);
3789    push(rdx);
3790    // EDX:EAX describe the XSAVE header and
3791    // are obtained while fetching info for XCR0 via cpuid.
3792    // These two registers make up 64-bits in the header for which bits
3793    // 62:10 are currently reserved for future implementations and unused.  Bit 63
3794    // is unused for our implementation as we do not utilize
3795    // compressed XSAVE areas.  Bits 9..8 are currently ignored as we do not use
3796    // the functionality for PKRU state and MSR tracing.
3797    // Ergo we are primarily concerned with bits 7..0, which define
3798    // which ISA extensions and features are enabled for a given machine and are
3799    // defined in XemXcr0Eax and is used to program XSAVE area
3800    // for saving the required registers as defined in XCR0.
3801    int xcr0_edx = VM_Version::get_xsave_header_upper_segment();
3802    int xcr0_eax = VM_Version::get_xsave_header_lower_segment();
3803    movl(rdx,xcr0_edx);
3804    movl(rax,xcr0_eax);
3805    xsave(Address(rsp, wordSize*2));
3806    // now Apply control bits and clear bytes 8..23 in the header
3807    pop(rdx);
3808    pop(rax);
3809    movl(Address(rsp, XSTATE_BV), xcr0_eax);
3810    movl(Address(rsp, XSTATE_BV+4), xcr0_edx);
3811    andq(Address(rsp, XSTATE_BV+8), 0);
3812    andq(Address(rsp, XSTATE_BV+16), 0);
3813  } else {
3814    fxsave(Address(rsp, 0));
3815  }
3816#endif // LP64
3817}
3818
3819void MacroAssembler::push_IU_state() {
3820  // Push flags first because pusha kills them
3821  pushf();
3822  // Make sure rsp stays 16-byte aligned
3823  LP64_ONLY(subq(rsp, 8));
3824  pusha();
3825}
3826
3827void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
3828  // determine java_thread register
3829  if (!java_thread->is_valid()) {
3830    java_thread = rdi;
3831    get_thread(java_thread);
3832  }
3833  // we must set sp to zero to clear frame
3834  movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
3835  if (clear_fp) {
3836    movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
3837  }
3838
3839  if (clear_pc)
3840    movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
3841
3842}
3843
3844void MacroAssembler::restore_rax(Register tmp) {
3845  if (tmp == noreg) pop(rax);
3846  else if (tmp != rax) mov(rax, tmp);
3847}
3848
3849void MacroAssembler::round_to(Register reg, int modulus) {
3850  addptr(reg, modulus - 1);
3851  andptr(reg, -modulus);
3852}
3853
3854void MacroAssembler::save_rax(Register tmp) {
3855  if (tmp == noreg) push(rax);
3856  else if (tmp != rax) mov(tmp, rax);
3857}
3858
3859// Write serialization page so VM thread can do a pseudo remote membar.
3860// We use the current thread pointer to calculate a thread specific
3861// offset to write to within the page. This minimizes bus traffic
3862// due to cache line collision.
3863void MacroAssembler::serialize_memory(Register thread, Register tmp) {
3864  movl(tmp, thread);
3865  shrl(tmp, os::get_serialize_page_shift_count());
3866  andl(tmp, (os::vm_page_size() - sizeof(int)));
3867
3868  Address index(noreg, tmp, Address::times_1);
3869  ExternalAddress page(os::get_memory_serialize_page());
3870
3871  // Size of store must match masking code above
3872  movl(as_Address(ArrayAddress(page, index)), tmp);
3873}
3874
3875// Calls to C land
3876//
3877// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
3878// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
3879// has to be reset to 0. This is required to allow proper stack traversal.
3880void MacroAssembler::set_last_Java_frame(Register java_thread,
3881                                         Register last_java_sp,
3882                                         Register last_java_fp,
3883                                         address  last_java_pc) {
3884  // determine java_thread register
3885  if (!java_thread->is_valid()) {
3886    java_thread = rdi;
3887    get_thread(java_thread);
3888  }
3889  // determine last_java_sp register
3890  if (!last_java_sp->is_valid()) {
3891    last_java_sp = rsp;
3892  }
3893
3894  // last_java_fp is optional
3895
3896  if (last_java_fp->is_valid()) {
3897    movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
3898  }
3899
3900  // last_java_pc is optional
3901
3902  if (last_java_pc != NULL) {
3903    lea(Address(java_thread,
3904                 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
3905        InternalAddress(last_java_pc));
3906
3907  }
3908  movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
3909}
3910
3911void MacroAssembler::shlptr(Register dst, int imm8) {
3912  LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
3913}
3914
3915void MacroAssembler::shrptr(Register dst, int imm8) {
3916  LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
3917}
3918
3919void MacroAssembler::sign_extend_byte(Register reg) {
3920  if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
3921    movsbl(reg, reg); // movsxb
3922  } else {
3923    shll(reg, 24);
3924    sarl(reg, 24);
3925  }
3926}
3927
3928void MacroAssembler::sign_extend_short(Register reg) {
3929  if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3930    movswl(reg, reg); // movsxw
3931  } else {
3932    shll(reg, 16);
3933    sarl(reg, 16);
3934  }
3935}
3936
3937void MacroAssembler::testl(Register dst, AddressLiteral src) {
3938  assert(reachable(src), "Address should be reachable");
3939  testl(dst, as_Address(src));
3940}
3941
3942void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
3943  if (reachable(src)) {
3944    Assembler::sqrtsd(dst, as_Address(src));
3945  } else {
3946    lea(rscratch1, src);
3947    Assembler::sqrtsd(dst, Address(rscratch1, 0));
3948  }
3949}
3950
3951void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
3952  if (reachable(src)) {
3953    Assembler::sqrtss(dst, as_Address(src));
3954  } else {
3955    lea(rscratch1, src);
3956    Assembler::sqrtss(dst, Address(rscratch1, 0));
3957  }
3958}
3959
3960void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
3961  if (reachable(src)) {
3962    Assembler::subsd(dst, as_Address(src));
3963  } else {
3964    lea(rscratch1, src);
3965    Assembler::subsd(dst, Address(rscratch1, 0));
3966  }
3967}
3968
3969void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
3970  if (reachable(src)) {
3971    Assembler::subss(dst, as_Address(src));
3972  } else {
3973    lea(rscratch1, src);
3974    Assembler::subss(dst, Address(rscratch1, 0));
3975  }
3976}
3977
3978void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
3979  if (reachable(src)) {
3980    Assembler::ucomisd(dst, as_Address(src));
3981  } else {
3982    lea(rscratch1, src);
3983    Assembler::ucomisd(dst, Address(rscratch1, 0));
3984  }
3985}
3986
3987void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
3988  if (reachable(src)) {
3989    Assembler::ucomiss(dst, as_Address(src));
3990  } else {
3991    lea(rscratch1, src);
3992    Assembler::ucomiss(dst, Address(rscratch1, 0));
3993  }
3994}
3995
3996void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
3997  // Used in sign-bit flipping with aligned address.
3998  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3999  if (reachable(src)) {
4000    Assembler::xorpd(dst, as_Address(src));
4001  } else {
4002    lea(rscratch1, src);
4003    Assembler::xorpd(dst, Address(rscratch1, 0));
4004  }
4005}
4006
4007void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
4008  // Used in sign-bit flipping with aligned address.
4009  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
4010  if (reachable(src)) {
4011    Assembler::xorps(dst, as_Address(src));
4012  } else {
4013    lea(rscratch1, src);
4014    Assembler::xorps(dst, Address(rscratch1, 0));
4015  }
4016}
4017
4018void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
4019  // Used in sign-bit flipping with aligned address.
4020  bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
4021  assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
4022  if (reachable(src)) {
4023    Assembler::pshufb(dst, as_Address(src));
4024  } else {
4025    lea(rscratch1, src);
4026    Assembler::pshufb(dst, Address(rscratch1, 0));
4027  }
4028}
4029
4030// AVX 3-operands instructions
4031
4032void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4033  if (reachable(src)) {
4034    vaddsd(dst, nds, as_Address(src));
4035  } else {
4036    lea(rscratch1, src);
4037    vaddsd(dst, nds, Address(rscratch1, 0));
4038  }
4039}
4040
4041void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4042  if (reachable(src)) {
4043    vaddss(dst, nds, as_Address(src));
4044  } else {
4045    lea(rscratch1, src);
4046    vaddss(dst, nds, Address(rscratch1, 0));
4047  }
4048}
4049
4050void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4051  if (reachable(src)) {
4052    vandpd(dst, nds, as_Address(src), vector_len);
4053  } else {
4054    lea(rscratch1, src);
4055    vandpd(dst, nds, Address(rscratch1, 0), vector_len);
4056  }
4057}
4058
4059void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4060  if (reachable(src)) {
4061    vandps(dst, nds, as_Address(src), vector_len);
4062  } else {
4063    lea(rscratch1, src);
4064    vandps(dst, nds, Address(rscratch1, 0), vector_len);
4065  }
4066}
4067
4068void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4069  if (reachable(src)) {
4070    vdivsd(dst, nds, as_Address(src));
4071  } else {
4072    lea(rscratch1, src);
4073    vdivsd(dst, nds, Address(rscratch1, 0));
4074  }
4075}
4076
4077void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4078  if (reachable(src)) {
4079    vdivss(dst, nds, as_Address(src));
4080  } else {
4081    lea(rscratch1, src);
4082    vdivss(dst, nds, Address(rscratch1, 0));
4083  }
4084}
4085
4086void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4087  if (reachable(src)) {
4088    vmulsd(dst, nds, as_Address(src));
4089  } else {
4090    lea(rscratch1, src);
4091    vmulsd(dst, nds, Address(rscratch1, 0));
4092  }
4093}
4094
4095void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4096  if (reachable(src)) {
4097    vmulss(dst, nds, as_Address(src));
4098  } else {
4099    lea(rscratch1, src);
4100    vmulss(dst, nds, Address(rscratch1, 0));
4101  }
4102}
4103
4104void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4105  if (reachable(src)) {
4106    vsubsd(dst, nds, as_Address(src));
4107  } else {
4108    lea(rscratch1, src);
4109    vsubsd(dst, nds, Address(rscratch1, 0));
4110  }
4111}
4112
4113void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4114  if (reachable(src)) {
4115    vsubss(dst, nds, as_Address(src));
4116  } else {
4117    lea(rscratch1, src);
4118    vsubss(dst, nds, Address(rscratch1, 0));
4119  }
4120}
4121
4122void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4123  int nds_enc = nds->encoding();
4124  int dst_enc = dst->encoding();
4125  bool dst_upper_bank = (dst_enc > 15);
4126  bool nds_upper_bank = (nds_enc > 15);
4127  if (VM_Version::supports_avx512novl() &&
4128      (nds_upper_bank || dst_upper_bank)) {
4129    if (dst_upper_bank) {
4130      subptr(rsp, 64);
4131      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4132      movflt(xmm0, nds);
4133      if (reachable(src)) {
4134        vxorps(xmm0, xmm0, as_Address(src), Assembler::AVX_128bit);
4135      } else {
4136        lea(rscratch1, src);
4137        vxorps(xmm0, xmm0, Address(rscratch1, 0), Assembler::AVX_128bit);
4138      }
4139      movflt(dst, xmm0);
4140      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4141      addptr(rsp, 64);
4142    } else {
4143      movflt(dst, nds);
4144      if (reachable(src)) {
4145        vxorps(dst, dst, as_Address(src), Assembler::AVX_128bit);
4146      } else {
4147        lea(rscratch1, src);
4148        vxorps(dst, dst, Address(rscratch1, 0), Assembler::AVX_128bit);
4149      }
4150    }
4151  } else {
4152    if (reachable(src)) {
4153      vxorps(dst, nds, as_Address(src), Assembler::AVX_128bit);
4154    } else {
4155      lea(rscratch1, src);
4156      vxorps(dst, nds, Address(rscratch1, 0), Assembler::AVX_128bit);
4157    }
4158  }
4159}
4160
4161void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4162  int nds_enc = nds->encoding();
4163  int dst_enc = dst->encoding();
4164  bool dst_upper_bank = (dst_enc > 15);
4165  bool nds_upper_bank = (nds_enc > 15);
4166  if (VM_Version::supports_avx512novl() &&
4167      (nds_upper_bank || dst_upper_bank)) {
4168    if (dst_upper_bank) {
4169      subptr(rsp, 64);
4170      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4171      movdbl(xmm0, nds);
4172      if (reachable(src)) {
4173        vxorps(xmm0, xmm0, as_Address(src), Assembler::AVX_128bit);
4174      } else {
4175        lea(rscratch1, src);
4176        vxorps(xmm0, xmm0, Address(rscratch1, 0), Assembler::AVX_128bit);
4177      }
4178      movdbl(dst, xmm0);
4179      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4180      addptr(rsp, 64);
4181    } else {
4182      movdbl(dst, nds);
4183      if (reachable(src)) {
4184        vxorps(dst, dst, as_Address(src), Assembler::AVX_128bit);
4185      } else {
4186        lea(rscratch1, src);
4187        vxorps(dst, dst, Address(rscratch1, 0), Assembler::AVX_128bit);
4188      }
4189    }
4190  } else {
4191    if (reachable(src)) {
4192      vxorpd(dst, nds, as_Address(src), Assembler::AVX_128bit);
4193    } else {
4194      lea(rscratch1, src);
4195      vxorpd(dst, nds, Address(rscratch1, 0), Assembler::AVX_128bit);
4196    }
4197  }
4198}
4199
4200void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4201  if (reachable(src)) {
4202    vxorpd(dst, nds, as_Address(src), vector_len);
4203  } else {
4204    lea(rscratch1, src);
4205    vxorpd(dst, nds, Address(rscratch1, 0), vector_len);
4206  }
4207}
4208
4209void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4210  if (reachable(src)) {
4211    vxorps(dst, nds, as_Address(src), vector_len);
4212  } else {
4213    lea(rscratch1, src);
4214    vxorps(dst, nds, Address(rscratch1, 0), vector_len);
4215  }
4216}
4217
4218
4219//////////////////////////////////////////////////////////////////////////////////
4220#if INCLUDE_ALL_GCS
4221
4222void MacroAssembler::g1_write_barrier_pre(Register obj,
4223                                          Register pre_val,
4224                                          Register thread,
4225                                          Register tmp,
4226                                          bool tosca_live,
4227                                          bool expand_call) {
4228
4229  // If expand_call is true then we expand the call_VM_leaf macro
4230  // directly to skip generating the check by
4231  // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
4232
4233#ifdef _LP64
4234  assert(thread == r15_thread, "must be");
4235#endif // _LP64
4236
4237  Label done;
4238  Label runtime;
4239
4240  assert(pre_val != noreg, "check this code");
4241
4242  if (obj != noreg) {
4243    assert_different_registers(obj, pre_val, tmp);
4244    assert(pre_val != rax, "check this code");
4245  }
4246
4247  Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4248                                       PtrQueue::byte_offset_of_active()));
4249  Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4250                                       PtrQueue::byte_offset_of_index()));
4251  Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4252                                       PtrQueue::byte_offset_of_buf()));
4253
4254
4255  // Is marking active?
4256  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4257    cmpl(in_progress, 0);
4258  } else {
4259    assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
4260    cmpb(in_progress, 0);
4261  }
4262  jcc(Assembler::equal, done);
4263
4264  // Do we need to load the previous value?
4265  if (obj != noreg) {
4266    load_heap_oop(pre_val, Address(obj, 0));
4267  }
4268
4269  // Is the previous value null?
4270  cmpptr(pre_val, (int32_t) NULL_WORD);
4271  jcc(Assembler::equal, done);
4272
4273  // Can we store original value in the thread's buffer?
4274  // Is index == 0?
4275  // (The index field is typed as size_t.)
4276
4277  movptr(tmp, index);                   // tmp := *index_adr
4278  cmpptr(tmp, 0);                       // tmp == 0?
4279  jcc(Assembler::equal, runtime);       // If yes, goto runtime
4280
4281  subptr(tmp, wordSize);                // tmp := tmp - wordSize
4282  movptr(index, tmp);                   // *index_adr := tmp
4283  addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
4284
4285  // Record the previous value
4286  movptr(Address(tmp, 0), pre_val);
4287  jmp(done);
4288
4289  bind(runtime);
4290  // save the live input values
4291  if(tosca_live) push(rax);
4292
4293  if (obj != noreg && obj != rax)
4294    push(obj);
4295
4296  if (pre_val != rax)
4297    push(pre_val);
4298
4299  // Calling the runtime using the regular call_VM_leaf mechanism generates
4300  // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
4301  // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
4302  //
4303  // If we care generating the pre-barrier without a frame (e.g. in the
4304  // intrinsified Reference.get() routine) then ebp might be pointing to
4305  // the caller frame and so this check will most likely fail at runtime.
4306  //
4307  // Expanding the call directly bypasses the generation of the check.
4308  // So when we do not have have a full interpreter frame on the stack
4309  // expand_call should be passed true.
4310
4311  NOT_LP64( push(thread); )
4312
4313  if (expand_call) {
4314    LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
4315    pass_arg1(this, thread);
4316    pass_arg0(this, pre_val);
4317    MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
4318  } else {
4319    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
4320  }
4321
4322  NOT_LP64( pop(thread); )
4323
4324  // save the live input values
4325  if (pre_val != rax)
4326    pop(pre_val);
4327
4328  if (obj != noreg && obj != rax)
4329    pop(obj);
4330
4331  if(tosca_live) pop(rax);
4332
4333  bind(done);
4334}
4335
4336void MacroAssembler::g1_write_barrier_post(Register store_addr,
4337                                           Register new_val,
4338                                           Register thread,
4339                                           Register tmp,
4340                                           Register tmp2) {
4341#ifdef _LP64
4342  assert(thread == r15_thread, "must be");
4343#endif // _LP64
4344
4345  Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4346                                       PtrQueue::byte_offset_of_index()));
4347  Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4348                                       PtrQueue::byte_offset_of_buf()));
4349
4350  CardTableModRefBS* ct =
4351    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
4352  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4353
4354  Label done;
4355  Label runtime;
4356
4357  // Does store cross heap regions?
4358
4359  movptr(tmp, store_addr);
4360  xorptr(tmp, new_val);
4361  shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
4362  jcc(Assembler::equal, done);
4363
4364  // crosses regions, storing NULL?
4365
4366  cmpptr(new_val, (int32_t) NULL_WORD);
4367  jcc(Assembler::equal, done);
4368
4369  // storing region crossing non-NULL, is card already dirty?
4370
4371  const Register card_addr = tmp;
4372  const Register cardtable = tmp2;
4373
4374  movptr(card_addr, store_addr);
4375  shrptr(card_addr, CardTableModRefBS::card_shift);
4376  // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
4377  // a valid address and therefore is not properly handled by the relocation code.
4378  movptr(cardtable, (intptr_t)ct->byte_map_base);
4379  addptr(card_addr, cardtable);
4380
4381  cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
4382  jcc(Assembler::equal, done);
4383
4384  membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4385  cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4386  jcc(Assembler::equal, done);
4387
4388
4389  // storing a region crossing, non-NULL oop, card is clean.
4390  // dirty card and log.
4391
4392  movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4393
4394  cmpl(queue_index, 0);
4395  jcc(Assembler::equal, runtime);
4396  subl(queue_index, wordSize);
4397  movptr(tmp2, buffer);
4398#ifdef _LP64
4399  movslq(rscratch1, queue_index);
4400  addq(tmp2, rscratch1);
4401  movq(Address(tmp2, 0), card_addr);
4402#else
4403  addl(tmp2, queue_index);
4404  movl(Address(tmp2, 0), card_addr);
4405#endif
4406  jmp(done);
4407
4408  bind(runtime);
4409  // save the live input values
4410  push(store_addr);
4411  push(new_val);
4412#ifdef _LP64
4413  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
4414#else
4415  push(thread);
4416  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
4417  pop(thread);
4418#endif
4419  pop(new_val);
4420  pop(store_addr);
4421
4422  bind(done);
4423}
4424
4425#endif // INCLUDE_ALL_GCS
4426//////////////////////////////////////////////////////////////////////////////////
4427
4428
4429void MacroAssembler::store_check(Register obj, Address dst) {
4430  store_check(obj);
4431}
4432
4433void MacroAssembler::store_check(Register obj) {
4434  // Does a store check for the oop in register obj. The content of
4435  // register obj is destroyed afterwards.
4436  BarrierSet* bs = Universe::heap()->barrier_set();
4437  assert(bs->kind() == BarrierSet::CardTableForRS ||
4438         bs->kind() == BarrierSet::CardTableExtension,
4439         "Wrong barrier set kind");
4440
4441  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
4442  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4443
4444  shrptr(obj, CardTableModRefBS::card_shift);
4445
4446  Address card_addr;
4447
4448  // The calculation for byte_map_base is as follows:
4449  // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
4450  // So this essentially converts an address to a displacement and it will
4451  // never need to be relocated. On 64bit however the value may be too
4452  // large for a 32bit displacement.
4453  intptr_t disp = (intptr_t) ct->byte_map_base;
4454  if (is_simm32(disp)) {
4455    card_addr = Address(noreg, obj, Address::times_1, disp);
4456  } else {
4457    // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
4458    // displacement and done in a single instruction given favorable mapping and a
4459    // smarter version of as_Address. However, 'ExternalAddress' generates a relocation
4460    // entry and that entry is not properly handled by the relocation code.
4461    AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
4462    Address index(noreg, obj, Address::times_1);
4463    card_addr = as_Address(ArrayAddress(cardtable, index));
4464  }
4465
4466  int dirty = CardTableModRefBS::dirty_card_val();
4467  if (UseCondCardMark) {
4468    Label L_already_dirty;
4469    if (UseConcMarkSweepGC) {
4470      membar(Assembler::StoreLoad);
4471    }
4472    cmpb(card_addr, dirty);
4473    jcc(Assembler::equal, L_already_dirty);
4474    movb(card_addr, dirty);
4475    bind(L_already_dirty);
4476  } else {
4477    movb(card_addr, dirty);
4478  }
4479}
4480
4481void MacroAssembler::subptr(Register dst, int32_t imm32) {
4482  LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
4483}
4484
4485// Force generation of a 4 byte immediate value even if it fits into 8bit
4486void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
4487  LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
4488}
4489
4490void MacroAssembler::subptr(Register dst, Register src) {
4491  LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
4492}
4493
4494// C++ bool manipulation
4495void MacroAssembler::testbool(Register dst) {
4496  if(sizeof(bool) == 1)
4497    testb(dst, 0xff);
4498  else if(sizeof(bool) == 2) {
4499    // testw implementation needed for two byte bools
4500    ShouldNotReachHere();
4501  } else if(sizeof(bool) == 4)
4502    testl(dst, dst);
4503  else
4504    // unsupported
4505    ShouldNotReachHere();
4506}
4507
4508void MacroAssembler::testptr(Register dst, Register src) {
4509  LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
4510}
4511
4512// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4513void MacroAssembler::tlab_allocate(Register obj,
4514                                   Register var_size_in_bytes,
4515                                   int con_size_in_bytes,
4516                                   Register t1,
4517                                   Register t2,
4518                                   Label& slow_case) {
4519  assert_different_registers(obj, t1, t2);
4520  assert_different_registers(obj, var_size_in_bytes, t1);
4521  Register end = t2;
4522  Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
4523
4524  verify_tlab();
4525
4526  NOT_LP64(get_thread(thread));
4527
4528  movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
4529  if (var_size_in_bytes == noreg) {
4530    lea(end, Address(obj, con_size_in_bytes));
4531  } else {
4532    lea(end, Address(obj, var_size_in_bytes, Address::times_1));
4533  }
4534  cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
4535  jcc(Assembler::above, slow_case);
4536
4537  // update the tlab top pointer
4538  movptr(Address(thread, JavaThread::tlab_top_offset()), end);
4539
4540  // recover var_size_in_bytes if necessary
4541  if (var_size_in_bytes == end) {
4542    subptr(var_size_in_bytes, obj);
4543  }
4544  verify_tlab();
4545}
4546
4547// Preserves rbx, and rdx.
4548Register MacroAssembler::tlab_refill(Label& retry,
4549                                     Label& try_eden,
4550                                     Label& slow_case) {
4551  Register top = rax;
4552  Register t1  = rcx;
4553  Register t2  = rsi;
4554  Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
4555  assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
4556  Label do_refill, discard_tlab;
4557
4558  if (!Universe::heap()->supports_inline_contig_alloc()) {
4559    // No allocation in the shared eden.
4560    jmp(slow_case);
4561  }
4562
4563  NOT_LP64(get_thread(thread_reg));
4564
4565  movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4566  movptr(t1,  Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4567
4568  // calculate amount of free space
4569  subptr(t1, top);
4570  shrptr(t1, LogHeapWordSize);
4571
4572  // Retain tlab and allocate object in shared space if
4573  // the amount free in the tlab is too large to discard.
4574  cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
4575  jcc(Assembler::lessEqual, discard_tlab);
4576
4577  // Retain
4578  // %%% yuck as movptr...
4579  movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
4580  addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
4581  if (TLABStats) {
4582    // increment number of slow_allocations
4583    addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
4584  }
4585  jmp(try_eden);
4586
4587  bind(discard_tlab);
4588  if (TLABStats) {
4589    // increment number of refills
4590    addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
4591    // accumulate wastage -- t1 is amount free in tlab
4592    addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
4593  }
4594
4595  // if tlab is currently allocated (top or end != null) then
4596  // fill [top, end + alignment_reserve) with array object
4597  testptr(top, top);
4598  jcc(Assembler::zero, do_refill);
4599
4600  // set up the mark word
4601  movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
4602  // set the length to the remaining space
4603  subptr(t1, typeArrayOopDesc::header_size(T_INT));
4604  addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
4605  shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
4606  movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
4607  // set klass to intArrayKlass
4608  // dubious reloc why not an oop reloc?
4609  movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
4610  // store klass last.  concurrent gcs assumes klass length is valid if
4611  // klass field is not null.
4612  store_klass(top, t1);
4613
4614  movptr(t1, top);
4615  subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
4616  incr_allocated_bytes(thread_reg, t1, 0);
4617
4618  // refill the tlab with an eden allocation
4619  bind(do_refill);
4620  movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4621  shlptr(t1, LogHeapWordSize);
4622  // allocate new tlab, address returned in top
4623  eden_allocate(top, t1, 0, t2, slow_case);
4624
4625  // Check that t1 was preserved in eden_allocate.
4626#ifdef ASSERT
4627  if (UseTLAB) {
4628    Label ok;
4629    Register tsize = rsi;
4630    assert_different_registers(tsize, thread_reg, t1);
4631    push(tsize);
4632    movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4633    shlptr(tsize, LogHeapWordSize);
4634    cmpptr(t1, tsize);
4635    jcc(Assembler::equal, ok);
4636    STOP("assert(t1 != tlab size)");
4637    should_not_reach_here();
4638
4639    bind(ok);
4640    pop(tsize);
4641  }
4642#endif
4643  movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
4644  movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
4645  addptr(top, t1);
4646  subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
4647  movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
4648  verify_tlab();
4649  jmp(retry);
4650
4651  return thread_reg; // for use by caller
4652}
4653
4654void MacroAssembler::incr_allocated_bytes(Register thread,
4655                                          Register var_size_in_bytes,
4656                                          int con_size_in_bytes,
4657                                          Register t1) {
4658  if (!thread->is_valid()) {
4659#ifdef _LP64
4660    thread = r15_thread;
4661#else
4662    assert(t1->is_valid(), "need temp reg");
4663    thread = t1;
4664    get_thread(thread);
4665#endif
4666  }
4667
4668#ifdef _LP64
4669  if (var_size_in_bytes->is_valid()) {
4670    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4671  } else {
4672    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4673  }
4674#else
4675  if (var_size_in_bytes->is_valid()) {
4676    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4677  } else {
4678    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4679  }
4680  adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
4681#endif
4682}
4683
4684void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
4685  pusha();
4686
4687  // if we are coming from c1, xmm registers may be live
4688  int off = 0;
4689  int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8);
4690  if (UseAVX > 2) {
4691    num_xmm_regs = LP64_ONLY(32) NOT_LP64(8);
4692  }
4693
4694  if (UseSSE == 1)  {
4695    subptr(rsp, sizeof(jdouble)*8);
4696    for (int n = 0; n < 8; n++) {
4697      movflt(Address(rsp, off++*sizeof(jdouble)), as_XMMRegister(n));
4698    }
4699  } else if (UseSSE >= 2)  {
4700    if (UseAVX > 2) {
4701      push(rbx);
4702      movl(rbx, 0xffff);
4703      kmovwl(k1, rbx);
4704      pop(rbx);
4705    }
4706#ifdef COMPILER2
4707    if (MaxVectorSize > 16) {
4708      if(UseAVX > 2) {
4709        // Save upper half of ZMM registes
4710        subptr(rsp, 32*num_xmm_regs);
4711        for (int n = 0; n < num_xmm_regs; n++) {
4712          vextractf64x4h(Address(rsp, off++*32), as_XMMRegister(n));
4713        }
4714        off = 0;
4715      }
4716      assert(UseAVX > 0, "256 bit vectors are supported only with AVX");
4717      // Save upper half of YMM registes
4718      subptr(rsp, 16*num_xmm_regs);
4719      for (int n = 0; n < num_xmm_regs; n++) {
4720        vextractf128h(Address(rsp, off++*16), as_XMMRegister(n));
4721      }
4722    }
4723#endif
4724    // Save whole 128bit (16 bytes) XMM registers
4725    subptr(rsp, 16*num_xmm_regs);
4726    off = 0;
4727#ifdef _LP64
4728    if (VM_Version::supports_avx512novl()) {
4729      for (int n = 0; n < num_xmm_regs; n++) {
4730        vextractf32x4h(Address(rsp, off++*16), as_XMMRegister(n), 0);
4731      }
4732    } else {
4733      for (int n = 0; n < num_xmm_regs; n++) {
4734        movdqu(Address(rsp, off++*16), as_XMMRegister(n));
4735      }
4736    }
4737#else
4738    for (int n = 0; n < num_xmm_regs; n++) {
4739      movdqu(Address(rsp, off++*16), as_XMMRegister(n));
4740    }
4741#endif
4742  }
4743
4744  // Preserve registers across runtime call
4745  int incoming_argument_and_return_value_offset = -1;
4746  if (num_fpu_regs_in_use > 1) {
4747    // Must preserve all other FPU regs (could alternatively convert
4748    // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
4749    // FPU state, but can not trust C compiler)
4750    NEEDS_CLEANUP;
4751    // NOTE that in this case we also push the incoming argument(s) to
4752    // the stack and restore it later; we also use this stack slot to
4753    // hold the return value from dsin, dcos etc.
4754    for (int i = 0; i < num_fpu_regs_in_use; i++) {
4755      subptr(rsp, sizeof(jdouble));
4756      fstp_d(Address(rsp, 0));
4757    }
4758    incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
4759    for (int i = nb_args-1; i >= 0; i--) {
4760      fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
4761    }
4762  }
4763
4764  subptr(rsp, nb_args*sizeof(jdouble));
4765  for (int i = 0; i < nb_args; i++) {
4766    fstp_d(Address(rsp, i*sizeof(jdouble)));
4767  }
4768
4769#ifdef _LP64
4770  if (nb_args > 0) {
4771    movdbl(xmm0, Address(rsp, 0));
4772  }
4773  if (nb_args > 1) {
4774    movdbl(xmm1, Address(rsp, sizeof(jdouble)));
4775  }
4776  assert(nb_args <= 2, "unsupported number of args");
4777#endif // _LP64
4778
4779  // NOTE: we must not use call_VM_leaf here because that requires a
4780  // complete interpreter frame in debug mode -- same bug as 4387334
4781  // MacroAssembler::call_VM_leaf_base is perfectly safe and will
4782  // do proper 64bit abi
4783
4784  NEEDS_CLEANUP;
4785  // Need to add stack banging before this runtime call if it needs to
4786  // be taken; however, there is no generic stack banging routine at
4787  // the MacroAssembler level
4788
4789  MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
4790
4791#ifdef _LP64
4792  movsd(Address(rsp, 0), xmm0);
4793  fld_d(Address(rsp, 0));
4794#endif // _LP64
4795  addptr(rsp, sizeof(jdouble)*nb_args);
4796  if (num_fpu_regs_in_use > 1) {
4797    // Must save return value to stack and then restore entire FPU
4798    // stack except incoming arguments
4799    fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
4800    for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
4801      fld_d(Address(rsp, 0));
4802      addptr(rsp, sizeof(jdouble));
4803    }
4804    fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
4805    addptr(rsp, sizeof(jdouble)*nb_args);
4806  }
4807
4808  off = 0;
4809  if (UseSSE == 1)  {
4810    for (int n = 0; n < 8; n++) {
4811      movflt(as_XMMRegister(n), Address(rsp, off++*sizeof(jdouble)));
4812    }
4813    addptr(rsp, sizeof(jdouble)*8);
4814  } else if (UseSSE >= 2)  {
4815    // Restore whole 128bit (16 bytes) XMM regiters
4816#ifdef _LP64
4817    if (VM_Version::supports_avx512novl()) {
4818      for (int n = 0; n < num_xmm_regs; n++) {
4819        vinsertf32x4h(as_XMMRegister(n), Address(rsp, off++*16), 0);
4820      }
4821    }
4822    else {
4823      for (int n = 0; n < num_xmm_regs; n++) {
4824        movdqu(as_XMMRegister(n), Address(rsp, off++*16));
4825      }
4826    }
4827#else
4828    for (int n = 0; n < num_xmm_regs; n++) {
4829      movdqu(as_XMMRegister(n), Address(rsp, off++ * 16));
4830    }
4831#endif
4832    addptr(rsp, 16*num_xmm_regs);
4833
4834#ifdef COMPILER2
4835    if (MaxVectorSize > 16) {
4836      // Restore upper half of YMM registes.
4837      off = 0;
4838      for (int n = 0; n < num_xmm_regs; n++) {
4839        vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16));
4840      }
4841      addptr(rsp, 16*num_xmm_regs);
4842      if(UseAVX > 2) {
4843        off = 0;
4844        for (int n = 0; n < num_xmm_regs; n++) {
4845          vinsertf64x4h(as_XMMRegister(n), Address(rsp, off++*32));
4846        }
4847        addptr(rsp, 32*num_xmm_regs);
4848      }
4849    }
4850#endif
4851  }
4852  popa();
4853}
4854
4855static const double     pi_4 =  0.7853981633974483;
4856
4857void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
4858  // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
4859  // was attempted in this code; unfortunately it appears that the
4860  // switch to 80-bit precision and back causes this to be
4861  // unprofitable compared with simply performing a runtime call if
4862  // the argument is out of the (-pi/4, pi/4) range.
4863
4864  Register tmp = noreg;
4865  if (!VM_Version::supports_cmov()) {
4866    // fcmp needs a temporary so preserve rbx,
4867    tmp = rbx;
4868    push(tmp);
4869  }
4870
4871  Label slow_case, done;
4872
4873  ExternalAddress pi4_adr = (address)&pi_4;
4874  if (reachable(pi4_adr)) {
4875    // x ?<= pi/4
4876    fld_d(pi4_adr);
4877    fld_s(1);                // Stack:  X  PI/4  X
4878    fabs();                  // Stack: |X| PI/4  X
4879    fcmp(tmp);
4880    jcc(Assembler::above, slow_case);
4881
4882    // fastest case: -pi/4 <= x <= pi/4
4883    switch(trig) {
4884    case 's':
4885      fsin();
4886      break;
4887    case 'c':
4888      fcos();
4889      break;
4890    case 't':
4891      ftan();
4892      break;
4893    default:
4894      assert(false, "bad intrinsic");
4895      break;
4896    }
4897    jmp(done);
4898  }
4899
4900  // slow case: runtime call
4901  bind(slow_case);
4902
4903  switch(trig) {
4904  case 's':
4905    {
4906      fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
4907    }
4908    break;
4909  case 'c':
4910    {
4911      fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
4912    }
4913    break;
4914  case 't':
4915    {
4916      fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
4917    }
4918    break;
4919  default:
4920    assert(false, "bad intrinsic");
4921    break;
4922  }
4923
4924  // Come here with result in F-TOS
4925  bind(done);
4926
4927  if (tmp != noreg) {
4928    pop(tmp);
4929  }
4930}
4931
4932
4933// Look up the method for a megamorphic invokeinterface call.
4934// The target method is determined by <intf_klass, itable_index>.
4935// The receiver klass is in recv_klass.
4936// On success, the result will be in method_result, and execution falls through.
4937// On failure, execution transfers to the given label.
4938void MacroAssembler::lookup_interface_method(Register recv_klass,
4939                                             Register intf_klass,
4940                                             RegisterOrConstant itable_index,
4941                                             Register method_result,
4942                                             Register scan_temp,
4943                                             Label& L_no_such_interface) {
4944  assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
4945  assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4946         "caller must use same register for non-constant itable index as for method");
4947
4948  // Compute start of first itableOffsetEntry (which is at the end of the vtable)
4949  int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
4950  int itentry_off = itableMethodEntry::method_offset_in_bytes();
4951  int scan_step   = itableOffsetEntry::size() * wordSize;
4952  int vte_size    = vtableEntry::size() * wordSize;
4953  Address::ScaleFactor times_vte_scale = Address::times_ptr;
4954  assert(vte_size == wordSize, "else adjust times_vte_scale");
4955
4956  movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
4957
4958  // %%% Could store the aligned, prescaled offset in the klassoop.
4959  lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
4960  if (HeapWordsPerLong > 1) {
4961    // Round up to align_object_offset boundary
4962    // see code for InstanceKlass::start_of_itable!
4963    round_to(scan_temp, BytesPerLong);
4964  }
4965
4966  // Adjust recv_klass by scaled itable_index, so we can free itable_index.
4967  assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4968  lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
4969
4970  // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
4971  //   if (scan->interface() == intf) {
4972  //     result = (klass + scan->offset() + itable_index);
4973  //   }
4974  // }
4975  Label search, found_method;
4976
4977  for (int peel = 1; peel >= 0; peel--) {
4978    movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
4979    cmpptr(intf_klass, method_result);
4980
4981    if (peel) {
4982      jccb(Assembler::equal, found_method);
4983    } else {
4984      jccb(Assembler::notEqual, search);
4985      // (invert the test to fall through to found_method...)
4986    }
4987
4988    if (!peel)  break;
4989
4990    bind(search);
4991
4992    // Check that the previous entry is non-null.  A null entry means that
4993    // the receiver class doesn't implement the interface, and wasn't the
4994    // same as when the caller was compiled.
4995    testptr(method_result, method_result);
4996    jcc(Assembler::zero, L_no_such_interface);
4997    addptr(scan_temp, scan_step);
4998  }
4999
5000  bind(found_method);
5001
5002  // Got a hit.
5003  movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
5004  movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
5005}
5006
5007
5008// virtual method calling
5009void MacroAssembler::lookup_virtual_method(Register recv_klass,
5010                                           RegisterOrConstant vtable_index,
5011                                           Register method_result) {
5012  const int base = InstanceKlass::vtable_start_offset() * wordSize;
5013  assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
5014  Address vtable_entry_addr(recv_klass,
5015                            vtable_index, Address::times_ptr,
5016                            base + vtableEntry::method_offset_in_bytes());
5017  movptr(method_result, vtable_entry_addr);
5018}
5019
5020
5021void MacroAssembler::check_klass_subtype(Register sub_klass,
5022                           Register super_klass,
5023                           Register temp_reg,
5024                           Label& L_success) {
5025  Label L_failure;
5026  check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
5027  check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
5028  bind(L_failure);
5029}
5030
5031
5032void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
5033                                                   Register super_klass,
5034                                                   Register temp_reg,
5035                                                   Label* L_success,
5036                                                   Label* L_failure,
5037                                                   Label* L_slow_path,
5038                                        RegisterOrConstant super_check_offset) {
5039  assert_different_registers(sub_klass, super_klass, temp_reg);
5040  bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
5041  if (super_check_offset.is_register()) {
5042    assert_different_registers(sub_klass, super_klass,
5043                               super_check_offset.as_register());
5044  } else if (must_load_sco) {
5045    assert(temp_reg != noreg, "supply either a temp or a register offset");
5046  }
5047
5048  Label L_fallthrough;
5049  int label_nulls = 0;
5050  if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
5051  if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
5052  if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
5053  assert(label_nulls <= 1, "at most one NULL in the batch");
5054
5055  int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
5056  int sco_offset = in_bytes(Klass::super_check_offset_offset());
5057  Address super_check_offset_addr(super_klass, sco_offset);
5058
5059  // Hacked jcc, which "knows" that L_fallthrough, at least, is in
5060  // range of a jccb.  If this routine grows larger, reconsider at
5061  // least some of these.
5062#define local_jcc(assembler_cond, label)                                \
5063  if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
5064  else                             jcc( assembler_cond, label) /*omit semi*/
5065
5066  // Hacked jmp, which may only be used just before L_fallthrough.
5067#define final_jmp(label)                                                \
5068  if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
5069  else                            jmp(label)                /*omit semi*/
5070
5071  // If the pointers are equal, we are done (e.g., String[] elements).
5072  // This self-check enables sharing of secondary supertype arrays among
5073  // non-primary types such as array-of-interface.  Otherwise, each such
5074  // type would need its own customized SSA.
5075  // We move this check to the front of the fast path because many
5076  // type checks are in fact trivially successful in this manner,
5077  // so we get a nicely predicted branch right at the start of the check.
5078  cmpptr(sub_klass, super_klass);
5079  local_jcc(Assembler::equal, *L_success);
5080
5081  // Check the supertype display:
5082  if (must_load_sco) {
5083    // Positive movl does right thing on LP64.
5084    movl(temp_reg, super_check_offset_addr);
5085    super_check_offset = RegisterOrConstant(temp_reg);
5086  }
5087  Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
5088  cmpptr(super_klass, super_check_addr); // load displayed supertype
5089
5090  // This check has worked decisively for primary supers.
5091  // Secondary supers are sought in the super_cache ('super_cache_addr').
5092  // (Secondary supers are interfaces and very deeply nested subtypes.)
5093  // This works in the same check above because of a tricky aliasing
5094  // between the super_cache and the primary super display elements.
5095  // (The 'super_check_addr' can address either, as the case requires.)
5096  // Note that the cache is updated below if it does not help us find
5097  // what we need immediately.
5098  // So if it was a primary super, we can just fail immediately.
5099  // Otherwise, it's the slow path for us (no success at this point).
5100
5101  if (super_check_offset.is_register()) {
5102    local_jcc(Assembler::equal, *L_success);
5103    cmpl(super_check_offset.as_register(), sc_offset);
5104    if (L_failure == &L_fallthrough) {
5105      local_jcc(Assembler::equal, *L_slow_path);
5106    } else {
5107      local_jcc(Assembler::notEqual, *L_failure);
5108      final_jmp(*L_slow_path);
5109    }
5110  } else if (super_check_offset.as_constant() == sc_offset) {
5111    // Need a slow path; fast failure is impossible.
5112    if (L_slow_path == &L_fallthrough) {
5113      local_jcc(Assembler::equal, *L_success);
5114    } else {
5115      local_jcc(Assembler::notEqual, *L_slow_path);
5116      final_jmp(*L_success);
5117    }
5118  } else {
5119    // No slow path; it's a fast decision.
5120    if (L_failure == &L_fallthrough) {
5121      local_jcc(Assembler::equal, *L_success);
5122    } else {
5123      local_jcc(Assembler::notEqual, *L_failure);
5124      final_jmp(*L_success);
5125    }
5126  }
5127
5128  bind(L_fallthrough);
5129
5130#undef local_jcc
5131#undef final_jmp
5132}
5133
5134
5135void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
5136                                                   Register super_klass,
5137                                                   Register temp_reg,
5138                                                   Register temp2_reg,
5139                                                   Label* L_success,
5140                                                   Label* L_failure,
5141                                                   bool set_cond_codes) {
5142  assert_different_registers(sub_klass, super_klass, temp_reg);
5143  if (temp2_reg != noreg)
5144    assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
5145#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
5146
5147  Label L_fallthrough;
5148  int label_nulls = 0;
5149  if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
5150  if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
5151  assert(label_nulls <= 1, "at most one NULL in the batch");
5152
5153  // a couple of useful fields in sub_klass:
5154  int ss_offset = in_bytes(Klass::secondary_supers_offset());
5155  int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
5156  Address secondary_supers_addr(sub_klass, ss_offset);
5157  Address super_cache_addr(     sub_klass, sc_offset);
5158
5159  // Do a linear scan of the secondary super-klass chain.
5160  // This code is rarely used, so simplicity is a virtue here.
5161  // The repne_scan instruction uses fixed registers, which we must spill.
5162  // Don't worry too much about pre-existing connections with the input regs.
5163
5164  assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
5165  assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
5166
5167  // Get super_klass value into rax (even if it was in rdi or rcx).
5168  bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
5169  if (super_klass != rax || UseCompressedOops) {
5170    if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
5171    mov(rax, super_klass);
5172  }
5173  if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
5174  if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
5175
5176#ifndef PRODUCT
5177  int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
5178  ExternalAddress pst_counter_addr((address) pst_counter);
5179  NOT_LP64(  incrementl(pst_counter_addr) );
5180  LP64_ONLY( lea(rcx, pst_counter_addr) );
5181  LP64_ONLY( incrementl(Address(rcx, 0)) );
5182#endif //PRODUCT
5183
5184  // We will consult the secondary-super array.
5185  movptr(rdi, secondary_supers_addr);
5186  // Load the array length.  (Positive movl does right thing on LP64.)
5187  movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
5188  // Skip to start of data.
5189  addptr(rdi, Array<Klass*>::base_offset_in_bytes());
5190
5191  // Scan RCX words at [RDI] for an occurrence of RAX.
5192  // Set NZ/Z based on last compare.
5193  // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
5194  // not change flags (only scas instruction which is repeated sets flags).
5195  // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
5196
5197    testptr(rax,rax); // Set Z = 0
5198    repne_scan();
5199
5200  // Unspill the temp. registers:
5201  if (pushed_rdi)  pop(rdi);
5202  if (pushed_rcx)  pop(rcx);
5203  if (pushed_rax)  pop(rax);
5204
5205  if (set_cond_codes) {
5206    // Special hack for the AD files:  rdi is guaranteed non-zero.
5207    assert(!pushed_rdi, "rdi must be left non-NULL");
5208    // Also, the condition codes are properly set Z/NZ on succeed/failure.
5209  }
5210
5211  if (L_failure == &L_fallthrough)
5212        jccb(Assembler::notEqual, *L_failure);
5213  else  jcc(Assembler::notEqual, *L_failure);
5214
5215  // Success.  Cache the super we found and proceed in triumph.
5216  movptr(super_cache_addr, super_klass);
5217
5218  if (L_success != &L_fallthrough) {
5219    jmp(*L_success);
5220  }
5221
5222#undef IS_A_TEMP
5223
5224  bind(L_fallthrough);
5225}
5226
5227
5228void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
5229  if (VM_Version::supports_cmov()) {
5230    cmovl(cc, dst, src);
5231  } else {
5232    Label L;
5233    jccb(negate_condition(cc), L);
5234    movl(dst, src);
5235    bind(L);
5236  }
5237}
5238
5239void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5240  if (VM_Version::supports_cmov()) {
5241    cmovl(cc, dst, src);
5242  } else {
5243    Label L;
5244    jccb(negate_condition(cc), L);
5245    movl(dst, src);
5246    bind(L);
5247  }
5248}
5249
5250void MacroAssembler::verify_oop(Register reg, const char* s) {
5251  if (!VerifyOops) return;
5252
5253  // Pass register number to verify_oop_subroutine
5254  const char* b = NULL;
5255  {
5256    ResourceMark rm;
5257    stringStream ss;
5258    ss.print("verify_oop: %s: %s", reg->name(), s);
5259    b = code_string(ss.as_string());
5260  }
5261  BLOCK_COMMENT("verify_oop {");
5262#ifdef _LP64
5263  push(rscratch1);                    // save r10, trashed by movptr()
5264#endif
5265  push(rax);                          // save rax,
5266  push(reg);                          // pass register argument
5267  ExternalAddress buffer((address) b);
5268  // avoid using pushptr, as it modifies scratch registers
5269  // and our contract is not to modify anything
5270  movptr(rax, buffer.addr());
5271  push(rax);
5272  // call indirectly to solve generation ordering problem
5273  movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5274  call(rax);
5275  // Caller pops the arguments (oop, message) and restores rax, r10
5276  BLOCK_COMMENT("} verify_oop");
5277}
5278
5279
5280RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
5281                                                      Register tmp,
5282                                                      int offset) {
5283  intptr_t value = *delayed_value_addr;
5284  if (value != 0)
5285    return RegisterOrConstant(value + offset);
5286
5287  // load indirectly to solve generation ordering problem
5288  movptr(tmp, ExternalAddress((address) delayed_value_addr));
5289
5290#ifdef ASSERT
5291  { Label L;
5292    testptr(tmp, tmp);
5293    if (WizardMode) {
5294      const char* buf = NULL;
5295      {
5296        ResourceMark rm;
5297        stringStream ss;
5298        ss.print("DelayedValue=" INTPTR_FORMAT, delayed_value_addr[1]);
5299        buf = code_string(ss.as_string());
5300      }
5301      jcc(Assembler::notZero, L);
5302      STOP(buf);
5303    } else {
5304      jccb(Assembler::notZero, L);
5305      hlt();
5306    }
5307    bind(L);
5308  }
5309#endif
5310
5311  if (offset != 0)
5312    addptr(tmp, offset);
5313
5314  return RegisterOrConstant(tmp);
5315}
5316
5317
5318Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
5319                                         int extra_slot_offset) {
5320  // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5321  int stackElementSize = Interpreter::stackElementSize;
5322  int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5323#ifdef ASSERT
5324  int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5325  assert(offset1 - offset == stackElementSize, "correct arithmetic");
5326#endif
5327  Register             scale_reg    = noreg;
5328  Address::ScaleFactor scale_factor = Address::no_scale;
5329  if (arg_slot.is_constant()) {
5330    offset += arg_slot.as_constant() * stackElementSize;
5331  } else {
5332    scale_reg    = arg_slot.as_register();
5333    scale_factor = Address::times(stackElementSize);
5334  }
5335  offset += wordSize;           // return PC is on stack
5336  return Address(rsp, scale_reg, scale_factor, offset);
5337}
5338
5339
5340void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
5341  if (!VerifyOops) return;
5342
5343  // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
5344  // Pass register number to verify_oop_subroutine
5345  const char* b = NULL;
5346  {
5347    ResourceMark rm;
5348    stringStream ss;
5349    ss.print("verify_oop_addr: %s", s);
5350    b = code_string(ss.as_string());
5351  }
5352#ifdef _LP64
5353  push(rscratch1);                    // save r10, trashed by movptr()
5354#endif
5355  push(rax);                          // save rax,
5356  // addr may contain rsp so we will have to adjust it based on the push
5357  // we just did (and on 64 bit we do two pushes)
5358  // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5359  // stores rax into addr which is backwards of what was intended.
5360  if (addr.uses(rsp)) {
5361    lea(rax, addr);
5362    pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
5363  } else {
5364    pushptr(addr);
5365  }
5366
5367  ExternalAddress buffer((address) b);
5368  // pass msg argument
5369  // avoid using pushptr, as it modifies scratch registers
5370  // and our contract is not to modify anything
5371  movptr(rax, buffer.addr());
5372  push(rax);
5373
5374  // call indirectly to solve generation ordering problem
5375  movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5376  call(rax);
5377  // Caller pops the arguments (addr, message) and restores rax, r10.
5378}
5379
5380void MacroAssembler::verify_tlab() {
5381#ifdef ASSERT
5382  if (UseTLAB && VerifyOops) {
5383    Label next, ok;
5384    Register t1 = rsi;
5385    Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
5386
5387    push(t1);
5388    NOT_LP64(push(thread_reg));
5389    NOT_LP64(get_thread(thread_reg));
5390
5391    movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5392    cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
5393    jcc(Assembler::aboveEqual, next);
5394    STOP("assert(top >= start)");
5395    should_not_reach_here();
5396
5397    bind(next);
5398    movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
5399    cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5400    jcc(Assembler::aboveEqual, ok);
5401    STOP("assert(top <= end)");
5402    should_not_reach_here();
5403
5404    bind(ok);
5405    NOT_LP64(pop(thread_reg));
5406    pop(t1);
5407  }
5408#endif
5409}
5410
5411class ControlWord {
5412 public:
5413  int32_t _value;
5414
5415  int  rounding_control() const        { return  (_value >> 10) & 3      ; }
5416  int  precision_control() const       { return  (_value >>  8) & 3      ; }
5417  bool precision() const               { return ((_value >>  5) & 1) != 0; }
5418  bool underflow() const               { return ((_value >>  4) & 1) != 0; }
5419  bool overflow() const                { return ((_value >>  3) & 1) != 0; }
5420  bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
5421  bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
5422  bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
5423
5424  void print() const {
5425    // rounding control
5426    const char* rc;
5427    switch (rounding_control()) {
5428      case 0: rc = "round near"; break;
5429      case 1: rc = "round down"; break;
5430      case 2: rc = "round up  "; break;
5431      case 3: rc = "chop      "; break;
5432    };
5433    // precision control
5434    const char* pc;
5435    switch (precision_control()) {
5436      case 0: pc = "24 bits "; break;
5437      case 1: pc = "reserved"; break;
5438      case 2: pc = "53 bits "; break;
5439      case 3: pc = "64 bits "; break;
5440    };
5441    // flags
5442    char f[9];
5443    f[0] = ' ';
5444    f[1] = ' ';
5445    f[2] = (precision   ()) ? 'P' : 'p';
5446    f[3] = (underflow   ()) ? 'U' : 'u';
5447    f[4] = (overflow    ()) ? 'O' : 'o';
5448    f[5] = (zero_divide ()) ? 'Z' : 'z';
5449    f[6] = (denormalized()) ? 'D' : 'd';
5450    f[7] = (invalid     ()) ? 'I' : 'i';
5451    f[8] = '\x0';
5452    // output
5453    printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5454  }
5455
5456};
5457
5458class StatusWord {
5459 public:
5460  int32_t _value;
5461
5462  bool busy() const                    { return ((_value >> 15) & 1) != 0; }
5463  bool C3() const                      { return ((_value >> 14) & 1) != 0; }
5464  bool C2() const                      { return ((_value >> 10) & 1) != 0; }
5465  bool C1() const                      { return ((_value >>  9) & 1) != 0; }
5466  bool C0() const                      { return ((_value >>  8) & 1) != 0; }
5467  int  top() const                     { return  (_value >> 11) & 7      ; }
5468  bool error_status() const            { return ((_value >>  7) & 1) != 0; }
5469  bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
5470  bool precision() const               { return ((_value >>  5) & 1) != 0; }
5471  bool underflow() const               { return ((_value >>  4) & 1) != 0; }
5472  bool overflow() const                { return ((_value >>  3) & 1) != 0; }
5473  bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
5474  bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
5475  bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
5476
5477  void print() const {
5478    // condition codes
5479    char c[5];
5480    c[0] = (C3()) ? '3' : '-';
5481    c[1] = (C2()) ? '2' : '-';
5482    c[2] = (C1()) ? '1' : '-';
5483    c[3] = (C0()) ? '0' : '-';
5484    c[4] = '\x0';
5485    // flags
5486    char f[9];
5487    f[0] = (error_status()) ? 'E' : '-';
5488    f[1] = (stack_fault ()) ? 'S' : '-';
5489    f[2] = (precision   ()) ? 'P' : '-';
5490    f[3] = (underflow   ()) ? 'U' : '-';
5491    f[4] = (overflow    ()) ? 'O' : '-';
5492    f[5] = (zero_divide ()) ? 'Z' : '-';
5493    f[6] = (denormalized()) ? 'D' : '-';
5494    f[7] = (invalid     ()) ? 'I' : '-';
5495    f[8] = '\x0';
5496    // output
5497    printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
5498  }
5499
5500};
5501
5502class TagWord {
5503 public:
5504  int32_t _value;
5505
5506  int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
5507
5508  void print() const {
5509    printf("%04x", _value & 0xFFFF);
5510  }
5511
5512};
5513
5514class FPU_Register {
5515 public:
5516  int32_t _m0;
5517  int32_t _m1;
5518  int16_t _ex;
5519
5520  bool is_indefinite() const           {
5521    return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5522  }
5523
5524  void print() const {
5525    char  sign = (_ex < 0) ? '-' : '+';
5526    const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
5527    printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
5528  };
5529
5530};
5531
5532class FPU_State {
5533 public:
5534  enum {
5535    register_size       = 10,
5536    number_of_registers =  8,
5537    register_mask       =  7
5538  };
5539
5540  ControlWord  _control_word;
5541  StatusWord   _status_word;
5542  TagWord      _tag_word;
5543  int32_t      _error_offset;
5544  int32_t      _error_selector;
5545  int32_t      _data_offset;
5546  int32_t      _data_selector;
5547  int8_t       _register[register_size * number_of_registers];
5548
5549  int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5550  FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
5551
5552  const char* tag_as_string(int tag) const {
5553    switch (tag) {
5554      case 0: return "valid";
5555      case 1: return "zero";
5556      case 2: return "special";
5557      case 3: return "empty";
5558    }
5559    ShouldNotReachHere();
5560    return NULL;
5561  }
5562
5563  void print() const {
5564    // print computation registers
5565    { int t = _status_word.top();
5566      for (int i = 0; i < number_of_registers; i++) {
5567        int j = (i - t) & register_mask;
5568        printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5569        st(j)->print();
5570        printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5571      }
5572    }
5573    printf("\n");
5574    // print control registers
5575    printf("ctrl = "); _control_word.print(); printf("\n");
5576    printf("stat = "); _status_word .print(); printf("\n");
5577    printf("tags = "); _tag_word    .print(); printf("\n");
5578  }
5579
5580};
5581
5582class Flag_Register {
5583 public:
5584  int32_t _value;
5585
5586  bool overflow() const                { return ((_value >> 11) & 1) != 0; }
5587  bool direction() const               { return ((_value >> 10) & 1) != 0; }
5588  bool sign() const                    { return ((_value >>  7) & 1) != 0; }
5589  bool zero() const                    { return ((_value >>  6) & 1) != 0; }
5590  bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
5591  bool parity() const                  { return ((_value >>  2) & 1) != 0; }
5592  bool carry() const                   { return ((_value >>  0) & 1) != 0; }
5593
5594  void print() const {
5595    // flags
5596    char f[8];
5597    f[0] = (overflow       ()) ? 'O' : '-';
5598    f[1] = (direction      ()) ? 'D' : '-';
5599    f[2] = (sign           ()) ? 'S' : '-';
5600    f[3] = (zero           ()) ? 'Z' : '-';
5601    f[4] = (auxiliary_carry()) ? 'A' : '-';
5602    f[5] = (parity         ()) ? 'P' : '-';
5603    f[6] = (carry          ()) ? 'C' : '-';
5604    f[7] = '\x0';
5605    // output
5606    printf("%08x  flags = %s", _value, f);
5607  }
5608
5609};
5610
5611class IU_Register {
5612 public:
5613  int32_t _value;
5614
5615  void print() const {
5616    printf("%08x  %11d", _value, _value);
5617  }
5618
5619};
5620
5621class IU_State {
5622 public:
5623  Flag_Register _eflags;
5624  IU_Register   _rdi;
5625  IU_Register   _rsi;
5626  IU_Register   _rbp;
5627  IU_Register   _rsp;
5628  IU_Register   _rbx;
5629  IU_Register   _rdx;
5630  IU_Register   _rcx;
5631  IU_Register   _rax;
5632
5633  void print() const {
5634    // computation registers
5635    printf("rax,  = "); _rax.print(); printf("\n");
5636    printf("rbx,  = "); _rbx.print(); printf("\n");
5637    printf("rcx  = "); _rcx.print(); printf("\n");
5638    printf("rdx  = "); _rdx.print(); printf("\n");
5639    printf("rdi  = "); _rdi.print(); printf("\n");
5640    printf("rsi  = "); _rsi.print(); printf("\n");
5641    printf("rbp,  = "); _rbp.print(); printf("\n");
5642    printf("rsp  = "); _rsp.print(); printf("\n");
5643    printf("\n");
5644    // control registers
5645    printf("flgs = "); _eflags.print(); printf("\n");
5646  }
5647};
5648
5649
5650class CPU_State {
5651 public:
5652  FPU_State _fpu_state;
5653  IU_State  _iu_state;
5654
5655  void print() const {
5656    printf("--------------------------------------------------\n");
5657    _iu_state .print();
5658    printf("\n");
5659    _fpu_state.print();
5660    printf("--------------------------------------------------\n");
5661  }
5662
5663};
5664
5665
5666static void _print_CPU_state(CPU_State* state) {
5667  state->print();
5668};
5669
5670
5671void MacroAssembler::print_CPU_state() {
5672  push_CPU_state();
5673  push(rsp);                // pass CPU state
5674  call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5675  addptr(rsp, wordSize);       // discard argument
5676  pop_CPU_state();
5677}
5678
5679
5680static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
5681  static int counter = 0;
5682  FPU_State* fs = &state->_fpu_state;
5683  counter++;
5684  // For leaf calls, only verify that the top few elements remain empty.
5685  // We only need 1 empty at the top for C2 code.
5686  if( stack_depth < 0 ) {
5687    if( fs->tag_for_st(7) != 3 ) {
5688      printf("FPR7 not empty\n");
5689      state->print();
5690      assert(false, "error");
5691      return false;
5692    }
5693    return true;                // All other stack states do not matter
5694  }
5695
5696  assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
5697         "bad FPU control word");
5698
5699  // compute stack depth
5700  int i = 0;
5701  while (i < FPU_State::number_of_registers && fs->tag_for_st(i)  < 3) i++;
5702  int d = i;
5703  while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
5704  // verify findings
5705  if (i != FPU_State::number_of_registers) {
5706    // stack not contiguous
5707    printf("%s: stack not contiguous at ST%d\n", s, i);
5708    state->print();
5709    assert(false, "error");
5710    return false;
5711  }
5712  // check if computed stack depth corresponds to expected stack depth
5713  if (stack_depth < 0) {
5714    // expected stack depth is -stack_depth or less
5715    if (d > -stack_depth) {
5716      // too many elements on the stack
5717      printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
5718      state->print();
5719      assert(false, "error");
5720      return false;
5721    }
5722  } else {
5723    // expected stack depth is stack_depth
5724    if (d != stack_depth) {
5725      // wrong stack depth
5726      printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
5727      state->print();
5728      assert(false, "error");
5729      return false;
5730    }
5731  }
5732  // everything is cool
5733  return true;
5734}
5735
5736
5737void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
5738  if (!VerifyFPU) return;
5739  push_CPU_state();
5740  push(rsp);                // pass CPU state
5741  ExternalAddress msg((address) s);
5742  // pass message string s
5743  pushptr(msg.addr());
5744  push(stack_depth);        // pass stack depth
5745  call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
5746  addptr(rsp, 3 * wordSize);   // discard arguments
5747  // check for error
5748  { Label L;
5749    testl(rax, rax);
5750    jcc(Assembler::notZero, L);
5751    int3();                  // break if error condition
5752    bind(L);
5753  }
5754  pop_CPU_state();
5755}
5756
5757void MacroAssembler::restore_cpu_control_state_after_jni() {
5758  // Either restore the MXCSR register after returning from the JNI Call
5759  // or verify that it wasn't changed (with -Xcheck:jni flag).
5760  if (VM_Version::supports_sse()) {
5761    if (RestoreMXCSROnJNICalls) {
5762      ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
5763    } else if (CheckJNICalls) {
5764      call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5765    }
5766  }
5767  if (VM_Version::supports_avx()) {
5768    // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5769    vzeroupper();
5770  }
5771
5772#ifndef _LP64
5773  // Either restore the x87 floating pointer control word after returning
5774  // from the JNI call or verify that it wasn't changed.
5775  if (CheckJNICalls) {
5776    call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
5777  }
5778#endif // _LP64
5779}
5780
5781
5782void MacroAssembler::load_klass(Register dst, Register src) {
5783#ifdef _LP64
5784  if (UseCompressedClassPointers) {
5785    movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5786    decode_klass_not_null(dst);
5787  } else
5788#endif
5789    movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5790}
5791
5792void MacroAssembler::load_prototype_header(Register dst, Register src) {
5793  load_klass(dst, src);
5794  movptr(dst, Address(dst, Klass::prototype_header_offset()));
5795}
5796
5797void MacroAssembler::store_klass(Register dst, Register src) {
5798#ifdef _LP64
5799  if (UseCompressedClassPointers) {
5800    encode_klass_not_null(src);
5801    movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5802  } else
5803#endif
5804    movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5805}
5806
5807void MacroAssembler::load_heap_oop(Register dst, Address src) {
5808#ifdef _LP64
5809  // FIXME: Must change all places where we try to load the klass.
5810  if (UseCompressedOops) {
5811    movl(dst, src);
5812    decode_heap_oop(dst);
5813  } else
5814#endif
5815    movptr(dst, src);
5816}
5817
5818// Doesn't do verfication, generates fixed size code
5819void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
5820#ifdef _LP64
5821  if (UseCompressedOops) {
5822    movl(dst, src);
5823    decode_heap_oop_not_null(dst);
5824  } else
5825#endif
5826    movptr(dst, src);
5827}
5828
5829void MacroAssembler::store_heap_oop(Address dst, Register src) {
5830#ifdef _LP64
5831  if (UseCompressedOops) {
5832    assert(!dst.uses(src), "not enough registers");
5833    encode_heap_oop(src);
5834    movl(dst, src);
5835  } else
5836#endif
5837    movptr(dst, src);
5838}
5839
5840void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
5841  assert_different_registers(src1, tmp);
5842#ifdef _LP64
5843  if (UseCompressedOops) {
5844    bool did_push = false;
5845    if (tmp == noreg) {
5846      tmp = rax;
5847      push(tmp);
5848      did_push = true;
5849      assert(!src2.uses(rsp), "can't push");
5850    }
5851    load_heap_oop(tmp, src2);
5852    cmpptr(src1, tmp);
5853    if (did_push)  pop(tmp);
5854  } else
5855#endif
5856    cmpptr(src1, src2);
5857}
5858
5859// Used for storing NULLs.
5860void MacroAssembler::store_heap_oop_null(Address dst) {
5861#ifdef _LP64
5862  if (UseCompressedOops) {
5863    movl(dst, (int32_t)NULL_WORD);
5864  } else {
5865    movslq(dst, (int32_t)NULL_WORD);
5866  }
5867#else
5868  movl(dst, (int32_t)NULL_WORD);
5869#endif
5870}
5871
5872#ifdef _LP64
5873void MacroAssembler::store_klass_gap(Register dst, Register src) {
5874  if (UseCompressedClassPointers) {
5875    // Store to klass gap in destination
5876    movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5877  }
5878}
5879
5880#ifdef ASSERT
5881void MacroAssembler::verify_heapbase(const char* msg) {
5882  assert (UseCompressedOops, "should be compressed");
5883  assert (Universe::heap() != NULL, "java heap should be initialized");
5884  if (CheckCompressedOops) {
5885    Label ok;
5886    push(rscratch1); // cmpptr trashes rscratch1
5887    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
5888    jcc(Assembler::equal, ok);
5889    STOP(msg);
5890    bind(ok);
5891    pop(rscratch1);
5892  }
5893}
5894#endif
5895
5896// Algorithm must match oop.inline.hpp encode_heap_oop.
5897void MacroAssembler::encode_heap_oop(Register r) {
5898#ifdef ASSERT
5899  verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5900#endif
5901  verify_oop(r, "broken oop in encode_heap_oop");
5902  if (Universe::narrow_oop_base() == NULL) {
5903    if (Universe::narrow_oop_shift() != 0) {
5904      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5905      shrq(r, LogMinObjAlignmentInBytes);
5906    }
5907    return;
5908  }
5909  testq(r, r);
5910  cmovq(Assembler::equal, r, r12_heapbase);
5911  subq(r, r12_heapbase);
5912  shrq(r, LogMinObjAlignmentInBytes);
5913}
5914
5915void MacroAssembler::encode_heap_oop_not_null(Register r) {
5916#ifdef ASSERT
5917  verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5918  if (CheckCompressedOops) {
5919    Label ok;
5920    testq(r, r);
5921    jcc(Assembler::notEqual, ok);
5922    STOP("null oop passed to encode_heap_oop_not_null");
5923    bind(ok);
5924  }
5925#endif
5926  verify_oop(r, "broken oop in encode_heap_oop_not_null");
5927  if (Universe::narrow_oop_base() != NULL) {
5928    subq(r, r12_heapbase);
5929  }
5930  if (Universe::narrow_oop_shift() != 0) {
5931    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5932    shrq(r, LogMinObjAlignmentInBytes);
5933  }
5934}
5935
5936void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5937#ifdef ASSERT
5938  verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5939  if (CheckCompressedOops) {
5940    Label ok;
5941    testq(src, src);
5942    jcc(Assembler::notEqual, ok);
5943    STOP("null oop passed to encode_heap_oop_not_null2");
5944    bind(ok);
5945  }
5946#endif
5947  verify_oop(src, "broken oop in encode_heap_oop_not_null2");
5948  if (dst != src) {
5949    movq(dst, src);
5950  }
5951  if (Universe::narrow_oop_base() != NULL) {
5952    subq(dst, r12_heapbase);
5953  }
5954  if (Universe::narrow_oop_shift() != 0) {
5955    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5956    shrq(dst, LogMinObjAlignmentInBytes);
5957  }
5958}
5959
5960void  MacroAssembler::decode_heap_oop(Register r) {
5961#ifdef ASSERT
5962  verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5963#endif
5964  if (Universe::narrow_oop_base() == NULL) {
5965    if (Universe::narrow_oop_shift() != 0) {
5966      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5967      shlq(r, LogMinObjAlignmentInBytes);
5968    }
5969  } else {
5970    Label done;
5971    shlq(r, LogMinObjAlignmentInBytes);
5972    jccb(Assembler::equal, done);
5973    addq(r, r12_heapbase);
5974    bind(done);
5975  }
5976  verify_oop(r, "broken oop in decode_heap_oop");
5977}
5978
5979void  MacroAssembler::decode_heap_oop_not_null(Register r) {
5980  // Note: it will change flags
5981  assert (UseCompressedOops, "should only be used for compressed headers");
5982  assert (Universe::heap() != NULL, "java heap should be initialized");
5983  // Cannot assert, unverified entry point counts instructions (see .ad file)
5984  // vtableStubs also counts instructions in pd_code_size_limit.
5985  // Also do not verify_oop as this is called by verify_oop.
5986  if (Universe::narrow_oop_shift() != 0) {
5987    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5988    shlq(r, LogMinObjAlignmentInBytes);
5989    if (Universe::narrow_oop_base() != NULL) {
5990      addq(r, r12_heapbase);
5991    }
5992  } else {
5993    assert (Universe::narrow_oop_base() == NULL, "sanity");
5994  }
5995}
5996
5997void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5998  // Note: it will change flags
5999  assert (UseCompressedOops, "should only be used for compressed headers");
6000  assert (Universe::heap() != NULL, "java heap should be initialized");
6001  // Cannot assert, unverified entry point counts instructions (see .ad file)
6002  // vtableStubs also counts instructions in pd_code_size_limit.
6003  // Also do not verify_oop as this is called by verify_oop.
6004  if (Universe::narrow_oop_shift() != 0) {
6005    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6006    if (LogMinObjAlignmentInBytes == Address::times_8) {
6007      leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
6008    } else {
6009      if (dst != src) {
6010        movq(dst, src);
6011      }
6012      shlq(dst, LogMinObjAlignmentInBytes);
6013      if (Universe::narrow_oop_base() != NULL) {
6014        addq(dst, r12_heapbase);
6015      }
6016    }
6017  } else {
6018    assert (Universe::narrow_oop_base() == NULL, "sanity");
6019    if (dst != src) {
6020      movq(dst, src);
6021    }
6022  }
6023}
6024
6025void MacroAssembler::encode_klass_not_null(Register r) {
6026  if (Universe::narrow_klass_base() != NULL) {
6027    // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
6028    assert(r != r12_heapbase, "Encoding a klass in r12");
6029    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
6030    subq(r, r12_heapbase);
6031  }
6032  if (Universe::narrow_klass_shift() != 0) {
6033    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6034    shrq(r, LogKlassAlignmentInBytes);
6035  }
6036  if (Universe::narrow_klass_base() != NULL) {
6037    reinit_heapbase();
6038  }
6039}
6040
6041void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
6042  if (dst == src) {
6043    encode_klass_not_null(src);
6044  } else {
6045    if (Universe::narrow_klass_base() != NULL) {
6046      mov64(dst, (int64_t)Universe::narrow_klass_base());
6047      negq(dst);
6048      addq(dst, src);
6049    } else {
6050      movptr(dst, src);
6051    }
6052    if (Universe::narrow_klass_shift() != 0) {
6053      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6054      shrq(dst, LogKlassAlignmentInBytes);
6055    }
6056  }
6057}
6058
6059// Function instr_size_for_decode_klass_not_null() counts the instructions
6060// generated by decode_klass_not_null(register r) and reinit_heapbase(),
6061// when (Universe::heap() != NULL).  Hence, if the instructions they
6062// generate change, then this method needs to be updated.
6063int MacroAssembler::instr_size_for_decode_klass_not_null() {
6064  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
6065  if (Universe::narrow_klass_base() != NULL) {
6066    // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
6067    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
6068  } else {
6069    // longest load decode klass function, mov64, leaq
6070    return 16;
6071  }
6072}
6073
6074// !!! If the instructions that get generated here change then function
6075// instr_size_for_decode_klass_not_null() needs to get updated.
6076void  MacroAssembler::decode_klass_not_null(Register r) {
6077  // Note: it will change flags
6078  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6079  assert(r != r12_heapbase, "Decoding a klass in r12");
6080  // Cannot assert, unverified entry point counts instructions (see .ad file)
6081  // vtableStubs also counts instructions in pd_code_size_limit.
6082  // Also do not verify_oop as this is called by verify_oop.
6083  if (Universe::narrow_klass_shift() != 0) {
6084    assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6085    shlq(r, LogKlassAlignmentInBytes);
6086  }
6087  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
6088  if (Universe::narrow_klass_base() != NULL) {
6089    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
6090    addq(r, r12_heapbase);
6091    reinit_heapbase();
6092  }
6093}
6094
6095void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
6096  // Note: it will change flags
6097  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6098  if (dst == src) {
6099    decode_klass_not_null(dst);
6100  } else {
6101    // Cannot assert, unverified entry point counts instructions (see .ad file)
6102    // vtableStubs also counts instructions in pd_code_size_limit.
6103    // Also do not verify_oop as this is called by verify_oop.
6104    mov64(dst, (int64_t)Universe::narrow_klass_base());
6105    if (Universe::narrow_klass_shift() != 0) {
6106      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6107      assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
6108      leaq(dst, Address(dst, src, Address::times_8, 0));
6109    } else {
6110      addq(dst, src);
6111    }
6112  }
6113}
6114
6115void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
6116  assert (UseCompressedOops, "should only be used for compressed headers");
6117  assert (Universe::heap() != NULL, "java heap should be initialized");
6118  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6119  int oop_index = oop_recorder()->find_index(obj);
6120  RelocationHolder rspec = oop_Relocation::spec(oop_index);
6121  mov_narrow_oop(dst, oop_index, rspec);
6122}
6123
6124void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
6125  assert (UseCompressedOops, "should only be used for compressed headers");
6126  assert (Universe::heap() != NULL, "java heap should be initialized");
6127  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6128  int oop_index = oop_recorder()->find_index(obj);
6129  RelocationHolder rspec = oop_Relocation::spec(oop_index);
6130  mov_narrow_oop(dst, oop_index, rspec);
6131}
6132
6133void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
6134  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6135  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6136  int klass_index = oop_recorder()->find_index(k);
6137  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6138  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
6139}
6140
6141void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
6142  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6143  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6144  int klass_index = oop_recorder()->find_index(k);
6145  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6146  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
6147}
6148
6149void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6150  assert (UseCompressedOops, "should only be used for compressed headers");
6151  assert (Universe::heap() != NULL, "java heap should be initialized");
6152  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6153  int oop_index = oop_recorder()->find_index(obj);
6154  RelocationHolder rspec = oop_Relocation::spec(oop_index);
6155  Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6156}
6157
6158void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6159  assert (UseCompressedOops, "should only be used for compressed headers");
6160  assert (Universe::heap() != NULL, "java heap should be initialized");
6161  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6162  int oop_index = oop_recorder()->find_index(obj);
6163  RelocationHolder rspec = oop_Relocation::spec(oop_index);
6164  Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6165}
6166
6167void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6168  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6169  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6170  int klass_index = oop_recorder()->find_index(k);
6171  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6172  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6173}
6174
6175void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6176  assert (UseCompressedClassPointers, "should only be used for compressed headers");
6177  assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6178  int klass_index = oop_recorder()->find_index(k);
6179  RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6180  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6181}
6182
6183void MacroAssembler::reinit_heapbase() {
6184  if (UseCompressedOops || UseCompressedClassPointers) {
6185    if (Universe::heap() != NULL) {
6186      if (Universe::narrow_oop_base() == NULL) {
6187        MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6188      } else {
6189        mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
6190      }
6191    } else {
6192      movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
6193    }
6194  }
6195}
6196
6197#endif // _LP64
6198
6199
6200// C2 compiled method's prolog code.
6201void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
6202
6203  // WARNING: Initial instruction MUST be 5 bytes or longer so that
6204  // NativeJump::patch_verified_entry will be able to patch out the entry
6205  // code safely. The push to verify stack depth is ok at 5 bytes,
6206  // the frame allocation can be either 3 or 6 bytes. So if we don't do
6207  // stack bang then we must use the 6 byte frame allocation even if
6208  // we have no frame. :-(
6209  assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
6210
6211  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6212  // Remove word for return addr
6213  framesize -= wordSize;
6214  stack_bang_size -= wordSize;
6215
6216  // Calls to C2R adapters often do not accept exceptional returns.
6217  // We require that their callers must bang for them.  But be careful, because
6218  // some VM calls (such as call site linkage) can use several kilobytes of
6219  // stack.  But the stack safety zone should account for that.
6220  // See bugs 4446381, 4468289, 4497237.
6221  if (stack_bang_size > 0) {
6222    generate_stack_overflow_check(stack_bang_size);
6223
6224    // We always push rbp, so that on return to interpreter rbp, will be
6225    // restored correctly and we can correct the stack.
6226    push(rbp);
6227    // Save caller's stack pointer into RBP if the frame pointer is preserved.
6228    if (PreserveFramePointer) {
6229      mov(rbp, rsp);
6230    }
6231    // Remove word for ebp
6232    framesize -= wordSize;
6233
6234    // Create frame
6235    if (framesize) {
6236      subptr(rsp, framesize);
6237    }
6238  } else {
6239    // Create frame (force generation of a 4 byte immediate value)
6240    subptr_imm32(rsp, framesize);
6241
6242    // Save RBP register now.
6243    framesize -= wordSize;
6244    movptr(Address(rsp, framesize), rbp);
6245    // Save caller's stack pointer into RBP if the frame pointer is preserved.
6246    if (PreserveFramePointer) {
6247      movptr(rbp, rsp);
6248      if (framesize > 0) {
6249        addptr(rbp, framesize);
6250      }
6251    }
6252  }
6253
6254  if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
6255    framesize -= wordSize;
6256    movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
6257  }
6258
6259#ifndef _LP64
6260  // If method sets FPU control word do it now
6261  if (fp_mode_24b) {
6262    fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
6263  }
6264  if (UseSSE >= 2 && VerifyFPU) {
6265    verify_FPU(0, "FPU stack must be clean on entry");
6266  }
6267#endif
6268
6269#ifdef ASSERT
6270  if (VerifyStackAtCalls) {
6271    Label L;
6272    push(rax);
6273    mov(rax, rsp);
6274    andptr(rax, StackAlignmentInBytes-1);
6275    cmpptr(rax, StackAlignmentInBytes-wordSize);
6276    pop(rax);
6277    jcc(Assembler::equal, L);
6278    STOP("Stack is not properly aligned!");
6279    bind(L);
6280  }
6281#endif
6282
6283}
6284
6285void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) {
6286  // cnt - number of qwords (8-byte words).
6287  // base - start address, qword aligned.
6288  assert(base==rdi, "base register must be edi for rep stos");
6289  assert(tmp==rax,   "tmp register must be eax for rep stos");
6290  assert(cnt==rcx,   "cnt register must be ecx for rep stos");
6291
6292  xorptr(tmp, tmp);
6293  if (UseFastStosb) {
6294    shlptr(cnt,3); // convert to number of bytes
6295    rep_stosb();
6296  } else {
6297    NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM
6298    rep_stos();
6299  }
6300}
6301
6302// IndexOf for constant substrings with size >= 8 chars
6303// which don't need to be loaded through stack.
6304void MacroAssembler::string_indexofC8(Register str1, Register str2,
6305                                      Register cnt1, Register cnt2,
6306                                      int int_cnt2,  Register result,
6307                                      XMMRegister vec, Register tmp) {
6308  ShortBranchVerifier sbv(this);
6309  assert(UseSSE42Intrinsics, "SSE4.2 is required");
6310
6311  // This method uses pcmpestri instruction with bound registers
6312  //   inputs:
6313  //     xmm - substring
6314  //     rax - substring length (elements count)
6315  //     mem - scanned string
6316  //     rdx - string length (elements count)
6317  //     0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6318  //   outputs:
6319  //     rcx - matched index in string
6320  assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6321
6322  Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
6323        RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
6324        MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
6325
6326  // Note, inline_string_indexOf() generates checks:
6327  // if (substr.count > string.count) return -1;
6328  // if (substr.count == 0) return 0;
6329  assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
6330
6331  // Load substring.
6332  movdqu(vec, Address(str2, 0));
6333  movl(cnt2, int_cnt2);
6334  movptr(result, str1); // string addr
6335
6336  if (int_cnt2 > 8) {
6337    jmpb(SCAN_TO_SUBSTR);
6338
6339    // Reload substr for rescan, this code
6340    // is executed only for large substrings (> 8 chars)
6341    bind(RELOAD_SUBSTR);
6342    movdqu(vec, Address(str2, 0));
6343    negptr(cnt2); // Jumped here with negative cnt2, convert to positive
6344
6345    bind(RELOAD_STR);
6346    // We came here after the beginning of the substring was
6347    // matched but the rest of it was not so we need to search
6348    // again. Start from the next element after the previous match.
6349
6350    // cnt2 is number of substring reminding elements and
6351    // cnt1 is number of string reminding elements when cmp failed.
6352    // Restored cnt1 = cnt1 - cnt2 + int_cnt2
6353    subl(cnt1, cnt2);
6354    addl(cnt1, int_cnt2);
6355    movl(cnt2, int_cnt2); // Now restore cnt2
6356
6357    decrementl(cnt1);     // Shift to next element
6358    cmpl(cnt1, cnt2);
6359    jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6360
6361    addptr(result, 2);
6362
6363  } // (int_cnt2 > 8)
6364
6365  // Scan string for start of substr in 16-byte vectors
6366  bind(SCAN_TO_SUBSTR);
6367  pcmpestri(vec, Address(result, 0), 0x0d);
6368  jccb(Assembler::below, FOUND_CANDIDATE);   // CF == 1
6369  subl(cnt1, 8);
6370  jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6371  cmpl(cnt1, cnt2);
6372  jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6373  addptr(result, 16);
6374  jmpb(SCAN_TO_SUBSTR);
6375
6376  // Found a potential substr
6377  bind(FOUND_CANDIDATE);
6378  // Matched whole vector if first element matched (tmp(rcx) == 0).
6379  if (int_cnt2 == 8) {
6380    jccb(Assembler::overflow, RET_FOUND);    // OF == 1
6381  } else { // int_cnt2 > 8
6382    jccb(Assembler::overflow, FOUND_SUBSTR);
6383  }
6384  // After pcmpestri tmp(rcx) contains matched element index
6385  // Compute start addr of substr
6386  lea(result, Address(result, tmp, Address::times_2));
6387
6388  // Make sure string is still long enough
6389  subl(cnt1, tmp);
6390  cmpl(cnt1, cnt2);
6391  if (int_cnt2 == 8) {
6392    jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6393  } else { // int_cnt2 > 8
6394    jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
6395  }
6396  // Left less then substring.
6397
6398  bind(RET_NOT_FOUND);
6399  movl(result, -1);
6400  jmpb(EXIT);
6401
6402  if (int_cnt2 > 8) {
6403    // This code is optimized for the case when whole substring
6404    // is matched if its head is matched.
6405    bind(MATCH_SUBSTR_HEAD);
6406    pcmpestri(vec, Address(result, 0), 0x0d);
6407    // Reload only string if does not match
6408    jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
6409
6410    Label CONT_SCAN_SUBSTR;
6411    // Compare the rest of substring (> 8 chars).
6412    bind(FOUND_SUBSTR);
6413    // First 8 chars are already matched.
6414    negptr(cnt2);
6415    addptr(cnt2, 8);
6416
6417    bind(SCAN_SUBSTR);
6418    subl(cnt1, 8);
6419    cmpl(cnt2, -8); // Do not read beyond substring
6420    jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
6421    // Back-up strings to avoid reading beyond substring:
6422    // cnt1 = cnt1 - cnt2 + 8
6423    addl(cnt1, cnt2); // cnt2 is negative
6424    addl(cnt1, 8);
6425    movl(cnt2, 8); negptr(cnt2);
6426    bind(CONT_SCAN_SUBSTR);
6427    if (int_cnt2 < (int)G) {
6428      movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
6429      pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
6430    } else {
6431      // calculate index in register to avoid integer overflow (int_cnt2*2)
6432      movl(tmp, int_cnt2);
6433      addptr(tmp, cnt2);
6434      movdqu(vec, Address(str2, tmp, Address::times_2, 0));
6435      pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
6436    }
6437    // Need to reload strings pointers if not matched whole vector
6438    jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6439    addptr(cnt2, 8);
6440    jcc(Assembler::negative, SCAN_SUBSTR);
6441    // Fall through if found full substring
6442
6443  } // (int_cnt2 > 8)
6444
6445  bind(RET_FOUND);
6446  // Found result if we matched full small substring.
6447  // Compute substr offset
6448  subptr(result, str1);
6449  shrl(result, 1); // index
6450  bind(EXIT);
6451
6452} // string_indexofC8
6453
6454// Small strings are loaded through stack if they cross page boundary.
6455void MacroAssembler::string_indexof(Register str1, Register str2,
6456                                    Register cnt1, Register cnt2,
6457                                    int int_cnt2,  Register result,
6458                                    XMMRegister vec, Register tmp) {
6459  ShortBranchVerifier sbv(this);
6460  assert(UseSSE42Intrinsics, "SSE4.2 is required");
6461  //
6462  // int_cnt2 is length of small (< 8 chars) constant substring
6463  // or (-1) for non constant substring in which case its length
6464  // is in cnt2 register.
6465  //
6466  // Note, inline_string_indexOf() generates checks:
6467  // if (substr.count > string.count) return -1;
6468  // if (substr.count == 0) return 0;
6469  //
6470  assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
6471
6472  // This method uses pcmpestri instruction with bound registers
6473  //   inputs:
6474  //     xmm - substring
6475  //     rax - substring length (elements count)
6476  //     mem - scanned string
6477  //     rdx - string length (elements count)
6478  //     0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6479  //   outputs:
6480  //     rcx - matched index in string
6481  assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6482
6483  Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
6484        RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
6485        FOUND_CANDIDATE;
6486
6487  { //========================================================
6488    // We don't know where these strings are located
6489    // and we can't read beyond them. Load them through stack.
6490    Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
6491
6492    movptr(tmp, rsp); // save old SP
6493
6494    if (int_cnt2 > 0) {     // small (< 8 chars) constant substring
6495      if (int_cnt2 == 1) {  // One char
6496        load_unsigned_short(result, Address(str2, 0));
6497        movdl(vec, result); // move 32 bits
6498      } else if (int_cnt2 == 2) { // Two chars
6499        movdl(vec, Address(str2, 0)); // move 32 bits
6500      } else if (int_cnt2 == 4) { // Four chars
6501        movq(vec, Address(str2, 0));  // move 64 bits
6502      } else { // cnt2 = { 3, 5, 6, 7 }
6503        // Array header size is 12 bytes in 32-bit VM
6504        // + 6 bytes for 3 chars == 18 bytes,
6505        // enough space to load vec and shift.
6506        assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
6507        movdqu(vec, Address(str2, (int_cnt2*2)-16));
6508        psrldq(vec, 16-(int_cnt2*2));
6509      }
6510    } else { // not constant substring
6511      cmpl(cnt2, 8);
6512      jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
6513
6514      // We can read beyond string if srt+16 does not cross page boundary
6515      // since heaps are aligned and mapped by pages.
6516      assert(os::vm_page_size() < (int)G, "default page should be small");
6517      movl(result, str2); // We need only low 32 bits
6518      andl(result, (os::vm_page_size()-1));
6519      cmpl(result, (os::vm_page_size()-16));
6520      jccb(Assembler::belowEqual, CHECK_STR);
6521
6522      // Move small strings to stack to allow load 16 bytes into vec.
6523      subptr(rsp, 16);
6524      int stk_offset = wordSize-2;
6525      push(cnt2);
6526
6527      bind(COPY_SUBSTR);
6528      load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
6529      movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6530      decrement(cnt2);
6531      jccb(Assembler::notZero, COPY_SUBSTR);
6532
6533      pop(cnt2);
6534      movptr(str2, rsp);  // New substring address
6535    } // non constant
6536
6537    bind(CHECK_STR);
6538    cmpl(cnt1, 8);
6539    jccb(Assembler::aboveEqual, BIG_STRINGS);
6540
6541    // Check cross page boundary.
6542    movl(result, str1); // We need only low 32 bits
6543    andl(result, (os::vm_page_size()-1));
6544    cmpl(result, (os::vm_page_size()-16));
6545    jccb(Assembler::belowEqual, BIG_STRINGS);
6546
6547    subptr(rsp, 16);
6548    int stk_offset = -2;
6549    if (int_cnt2 < 0) { // not constant
6550      push(cnt2);
6551      stk_offset += wordSize;
6552    }
6553    movl(cnt2, cnt1);
6554
6555    bind(COPY_STR);
6556    load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
6557    movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6558    decrement(cnt2);
6559    jccb(Assembler::notZero, COPY_STR);
6560
6561    if (int_cnt2 < 0) { // not constant
6562      pop(cnt2);
6563    }
6564    movptr(str1, rsp);  // New string address
6565
6566    bind(BIG_STRINGS);
6567    // Load substring.
6568    if (int_cnt2 < 0) { // -1
6569      movdqu(vec, Address(str2, 0));
6570      push(cnt2);       // substr count
6571      push(str2);       // substr addr
6572      push(str1);       // string addr
6573    } else {
6574      // Small (< 8 chars) constant substrings are loaded already.
6575      movl(cnt2, int_cnt2);
6576    }
6577    push(tmp);  // original SP
6578
6579  } // Finished loading
6580
6581  //========================================================
6582  // Start search
6583  //
6584
6585  movptr(result, str1); // string addr
6586
6587  if (int_cnt2  < 0) {  // Only for non constant substring
6588    jmpb(SCAN_TO_SUBSTR);
6589
6590    // SP saved at sp+0
6591    // String saved at sp+1*wordSize
6592    // Substr saved at sp+2*wordSize
6593    // Substr count saved at sp+3*wordSize
6594
6595    // Reload substr for rescan, this code
6596    // is executed only for large substrings (> 8 chars)
6597    bind(RELOAD_SUBSTR);
6598    movptr(str2, Address(rsp, 2*wordSize));
6599    movl(cnt2, Address(rsp, 3*wordSize));
6600    movdqu(vec, Address(str2, 0));
6601    // We came here after the beginning of the substring was
6602    // matched but the rest of it was not so we need to search
6603    // again. Start from the next element after the previous match.
6604    subptr(str1, result); // Restore counter
6605    shrl(str1, 1);
6606    addl(cnt1, str1);
6607    decrementl(cnt1);   // Shift to next element
6608    cmpl(cnt1, cnt2);
6609    jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6610
6611    addptr(result, 2);
6612  } // non constant
6613
6614  // Scan string for start of substr in 16-byte vectors
6615  bind(SCAN_TO_SUBSTR);
6616  assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6617  pcmpestri(vec, Address(result, 0), 0x0d);
6618  jccb(Assembler::below, FOUND_CANDIDATE);   // CF == 1
6619  subl(cnt1, 8);
6620  jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6621  cmpl(cnt1, cnt2);
6622  jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
6623  addptr(result, 16);
6624
6625  bind(ADJUST_STR);
6626  cmpl(cnt1, 8); // Do not read beyond string
6627  jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6628  // Back-up string to avoid reading beyond string.
6629  lea(result, Address(result, cnt1, Address::times_2, -16));
6630  movl(cnt1, 8);
6631  jmpb(SCAN_TO_SUBSTR);
6632
6633  // Found a potential substr
6634  bind(FOUND_CANDIDATE);
6635  // After pcmpestri tmp(rcx) contains matched element index
6636
6637  // Make sure string is still long enough
6638  subl(cnt1, tmp);
6639  cmpl(cnt1, cnt2);
6640  jccb(Assembler::greaterEqual, FOUND_SUBSTR);
6641  // Left less then substring.
6642
6643  bind(RET_NOT_FOUND);
6644  movl(result, -1);
6645  jmpb(CLEANUP);
6646
6647  bind(FOUND_SUBSTR);
6648  // Compute start addr of substr
6649  lea(result, Address(result, tmp, Address::times_2));
6650
6651  if (int_cnt2 > 0) { // Constant substring
6652    // Repeat search for small substring (< 8 chars)
6653    // from new point without reloading substring.
6654    // Have to check that we don't read beyond string.
6655    cmpl(tmp, 8-int_cnt2);
6656    jccb(Assembler::greater, ADJUST_STR);
6657    // Fall through if matched whole substring.
6658  } else { // non constant
6659    assert(int_cnt2 == -1, "should be != 0");
6660
6661    addl(tmp, cnt2);
6662    // Found result if we matched whole substring.
6663    cmpl(tmp, 8);
6664    jccb(Assembler::lessEqual, RET_FOUND);
6665
6666    // Repeat search for small substring (<= 8 chars)
6667    // from new point 'str1' without reloading substring.
6668    cmpl(cnt2, 8);
6669    // Have to check that we don't read beyond string.
6670    jccb(Assembler::lessEqual, ADJUST_STR);
6671
6672    Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
6673    // Compare the rest of substring (> 8 chars).
6674    movptr(str1, result);
6675
6676    cmpl(tmp, cnt2);
6677    // First 8 chars are already matched.
6678    jccb(Assembler::equal, CHECK_NEXT);
6679
6680    bind(SCAN_SUBSTR);
6681    pcmpestri(vec, Address(str1, 0), 0x0d);
6682    // Need to reload strings pointers if not matched whole vector
6683    jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6684
6685    bind(CHECK_NEXT);
6686    subl(cnt2, 8);
6687    jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
6688    addptr(str1, 16);
6689    addptr(str2, 16);
6690    subl(cnt1, 8);
6691    cmpl(cnt2, 8); // Do not read beyond substring
6692    jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
6693    // Back-up strings to avoid reading beyond substring.
6694    lea(str2, Address(str2, cnt2, Address::times_2, -16));
6695    lea(str1, Address(str1, cnt2, Address::times_2, -16));
6696    subl(cnt1, cnt2);
6697    movl(cnt2, 8);
6698    addl(cnt1, 8);
6699    bind(CONT_SCAN_SUBSTR);
6700    movdqu(vec, Address(str2, 0));
6701    jmpb(SCAN_SUBSTR);
6702
6703    bind(RET_FOUND_LONG);
6704    movptr(str1, Address(rsp, wordSize));
6705  } // non constant
6706
6707  bind(RET_FOUND);
6708  // Compute substr offset
6709  subptr(result, str1);
6710  shrl(result, 1); // index
6711
6712  bind(CLEANUP);
6713  pop(rsp); // restore SP
6714
6715} // string_indexof
6716
6717// Compare strings.
6718void MacroAssembler::string_compare(Register str1, Register str2,
6719                                    Register cnt1, Register cnt2, Register result,
6720                                    XMMRegister vec1) {
6721  ShortBranchVerifier sbv(this);
6722  Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
6723
6724  // Compute the minimum of the string lengths and the
6725  // difference of the string lengths (stack).
6726  // Do the conditional move stuff
6727  movl(result, cnt1);
6728  subl(cnt1, cnt2);
6729  push(cnt1);
6730  cmov32(Assembler::lessEqual, cnt2, result);
6731
6732  // Is the minimum length zero?
6733  testl(cnt2, cnt2);
6734  jcc(Assembler::zero, LENGTH_DIFF_LABEL);
6735
6736  // Compare first characters
6737  load_unsigned_short(result, Address(str1, 0));
6738  load_unsigned_short(cnt1, Address(str2, 0));
6739  subl(result, cnt1);
6740  jcc(Assembler::notZero,  POP_LABEL);
6741  cmpl(cnt2, 1);
6742  jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6743
6744  // Check if the strings start at the same location.
6745  cmpptr(str1, str2);
6746  jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6747
6748  Address::ScaleFactor scale = Address::times_2;
6749  int stride = 8;
6750
6751  if (UseAVX >= 2 && UseSSE42Intrinsics) {
6752    Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
6753    Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
6754    Label COMPARE_TAIL_LONG;
6755    int pcmpmask = 0x19;
6756
6757    // Setup to compare 16-chars (32-bytes) vectors,
6758    // start from first character again because it has aligned address.
6759    int stride2 = 16;
6760    int adr_stride  = stride  << scale;
6761
6762    assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6763    // rax and rdx are used by pcmpestri as elements counters
6764    movl(result, cnt2);
6765    andl(cnt2, ~(stride2-1));   // cnt2 holds the vector count
6766    jcc(Assembler::zero, COMPARE_TAIL_LONG);
6767
6768    // fast path : compare first 2 8-char vectors.
6769    bind(COMPARE_16_CHARS);
6770    movdqu(vec1, Address(str1, 0));
6771    pcmpestri(vec1, Address(str2, 0), pcmpmask);
6772    jccb(Assembler::below, COMPARE_INDEX_CHAR);
6773
6774    movdqu(vec1, Address(str1, adr_stride));
6775    pcmpestri(vec1, Address(str2, adr_stride), pcmpmask);
6776    jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS);
6777    addl(cnt1, stride);
6778
6779    // Compare the characters at index in cnt1
6780    bind(COMPARE_INDEX_CHAR); //cnt1 has the offset of the mismatching character
6781    load_unsigned_short(result, Address(str1, cnt1, scale));
6782    load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6783    subl(result, cnt2);
6784    jmp(POP_LABEL);
6785
6786    // Setup the registers to start vector comparison loop
6787    bind(COMPARE_WIDE_VECTORS);
6788    lea(str1, Address(str1, result, scale));
6789    lea(str2, Address(str2, result, scale));
6790    subl(result, stride2);
6791    subl(cnt2, stride2);
6792    jccb(Assembler::zero, COMPARE_WIDE_TAIL);
6793    negptr(result);
6794
6795    //  In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest)
6796    bind(COMPARE_WIDE_VECTORS_LOOP);
6797    vmovdqu(vec1, Address(str1, result, scale));
6798    vpxor(vec1, Address(str2, result, scale));
6799    vptest(vec1, vec1);
6800    jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
6801    addptr(result, stride2);
6802    subl(cnt2, stride2);
6803    jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
6804    // clean upper bits of YMM registers
6805    vpxor(vec1, vec1);
6806
6807    // compare wide vectors tail
6808    bind(COMPARE_WIDE_TAIL);
6809    testptr(result, result);
6810    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6811
6812    movl(result, stride2);
6813    movl(cnt2, result);
6814    negptr(result);
6815    jmpb(COMPARE_WIDE_VECTORS_LOOP);
6816
6817    // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
6818    bind(VECTOR_NOT_EQUAL);
6819    // clean upper bits of YMM registers
6820    vpxor(vec1, vec1);
6821    lea(str1, Address(str1, result, scale));
6822    lea(str2, Address(str2, result, scale));
6823    jmp(COMPARE_16_CHARS);
6824
6825    // Compare tail chars, length between 1 to 15 chars
6826    bind(COMPARE_TAIL_LONG);
6827    movl(cnt2, result);
6828    cmpl(cnt2, stride);
6829    jccb(Assembler::less, COMPARE_SMALL_STR);
6830
6831    movdqu(vec1, Address(str1, 0));
6832    pcmpestri(vec1, Address(str2, 0), pcmpmask);
6833    jcc(Assembler::below, COMPARE_INDEX_CHAR);
6834    subptr(cnt2, stride);
6835    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6836    lea(str1, Address(str1, result, scale));
6837    lea(str2, Address(str2, result, scale));
6838    negptr(cnt2);
6839    jmpb(WHILE_HEAD_LABEL);
6840
6841    bind(COMPARE_SMALL_STR);
6842  } else if (UseSSE42Intrinsics) {
6843    Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
6844    int pcmpmask = 0x19;
6845    // Setup to compare 8-char (16-byte) vectors,
6846    // start from first character again because it has aligned address.
6847    movl(result, cnt2);
6848    andl(cnt2, ~(stride - 1));   // cnt2 holds the vector count
6849    jccb(Assembler::zero, COMPARE_TAIL);
6850
6851    lea(str1, Address(str1, result, scale));
6852    lea(str2, Address(str2, result, scale));
6853    negptr(result);
6854
6855    // pcmpestri
6856    //   inputs:
6857    //     vec1- substring
6858    //     rax - negative string length (elements count)
6859    //     mem - scanned string
6860    //     rdx - string length (elements count)
6861    //     pcmpmask - cmp mode: 11000 (string compare with negated result)
6862    //               + 00 (unsigned bytes) or  + 01 (unsigned shorts)
6863    //   outputs:
6864    //     rcx - first mismatched element index
6865    assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6866
6867    bind(COMPARE_WIDE_VECTORS);
6868    movdqu(vec1, Address(str1, result, scale));
6869    pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6870    // After pcmpestri cnt1(rcx) contains mismatched element index
6871
6872    jccb(Assembler::below, VECTOR_NOT_EQUAL);  // CF==1
6873    addptr(result, stride);
6874    subptr(cnt2, stride);
6875    jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
6876
6877    // compare wide vectors tail
6878    testptr(result, result);
6879    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6880
6881    movl(cnt2, stride);
6882    movl(result, stride);
6883    negptr(result);
6884    movdqu(vec1, Address(str1, result, scale));
6885    pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6886    jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
6887
6888    // Mismatched characters in the vectors
6889    bind(VECTOR_NOT_EQUAL);
6890    addptr(cnt1, result);
6891    load_unsigned_short(result, Address(str1, cnt1, scale));
6892    load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6893    subl(result, cnt2);
6894    jmpb(POP_LABEL);
6895
6896    bind(COMPARE_TAIL); // limit is zero
6897    movl(cnt2, result);
6898    // Fallthru to tail compare
6899  }
6900  // Shift str2 and str1 to the end of the arrays, negate min
6901  lea(str1, Address(str1, cnt2, scale));
6902  lea(str2, Address(str2, cnt2, scale));
6903  decrementl(cnt2);  // first character was compared already
6904  negptr(cnt2);
6905
6906  // Compare the rest of the elements
6907  bind(WHILE_HEAD_LABEL);
6908  load_unsigned_short(result, Address(str1, cnt2, scale, 0));
6909  load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
6910  subl(result, cnt1);
6911  jccb(Assembler::notZero, POP_LABEL);
6912  increment(cnt2);
6913  jccb(Assembler::notZero, WHILE_HEAD_LABEL);
6914
6915  // Strings are equal up to min length.  Return the length difference.
6916  bind(LENGTH_DIFF_LABEL);
6917  pop(result);
6918  jmpb(DONE_LABEL);
6919
6920  // Discard the stored length difference
6921  bind(POP_LABEL);
6922  pop(cnt1);
6923
6924  // That's it
6925  bind(DONE_LABEL);
6926}
6927
6928// Compare char[] arrays aligned to 4 bytes or substrings.
6929void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
6930                                        Register limit, Register result, Register chr,
6931                                        XMMRegister vec1, XMMRegister vec2) {
6932  ShortBranchVerifier sbv(this);
6933  Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
6934
6935  int length_offset  = arrayOopDesc::length_offset_in_bytes();
6936  int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
6937
6938  // Check the input args
6939  cmpptr(ary1, ary2);
6940  jcc(Assembler::equal, TRUE_LABEL);
6941
6942  if (is_array_equ) {
6943    // Need additional checks for arrays_equals.
6944    testptr(ary1, ary1);
6945    jcc(Assembler::zero, FALSE_LABEL);
6946    testptr(ary2, ary2);
6947    jcc(Assembler::zero, FALSE_LABEL);
6948
6949    // Check the lengths
6950    movl(limit, Address(ary1, length_offset));
6951    cmpl(limit, Address(ary2, length_offset));
6952    jcc(Assembler::notEqual, FALSE_LABEL);
6953  }
6954
6955  // count == 0
6956  testl(limit, limit);
6957  jcc(Assembler::zero, TRUE_LABEL);
6958
6959  if (is_array_equ) {
6960    // Load array address
6961    lea(ary1, Address(ary1, base_offset));
6962    lea(ary2, Address(ary2, base_offset));
6963  }
6964
6965  shll(limit, 1);      // byte count != 0
6966  movl(result, limit); // copy
6967
6968  if (UseAVX >= 2) {
6969    // With AVX2, use 32-byte vector compare
6970    Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6971
6972    // Compare 32-byte vectors
6973    andl(result, 0x0000001e);  //   tail count (in bytes)
6974    andl(limit, 0xffffffe0);   // vector count (in bytes)
6975    jccb(Assembler::zero, COMPARE_TAIL);
6976
6977    lea(ary1, Address(ary1, limit, Address::times_1));
6978    lea(ary2, Address(ary2, limit, Address::times_1));
6979    negptr(limit);
6980
6981    bind(COMPARE_WIDE_VECTORS);
6982    vmovdqu(vec1, Address(ary1, limit, Address::times_1));
6983    vmovdqu(vec2, Address(ary2, limit, Address::times_1));
6984    vpxor(vec1, vec2);
6985
6986    vptest(vec1, vec1);
6987    jccb(Assembler::notZero, FALSE_LABEL);
6988    addptr(limit, 32);
6989    jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6990
6991    testl(result, result);
6992    jccb(Assembler::zero, TRUE_LABEL);
6993
6994    vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
6995    vmovdqu(vec2, Address(ary2, result, Address::times_1, -32));
6996    vpxor(vec1, vec2);
6997
6998    vptest(vec1, vec1);
6999    jccb(Assembler::notZero, FALSE_LABEL);
7000    jmpb(TRUE_LABEL);
7001
7002    bind(COMPARE_TAIL); // limit is zero
7003    movl(limit, result);
7004    // Fallthru to tail compare
7005  } else if (UseSSE42Intrinsics) {
7006    // With SSE4.2, use double quad vector compare
7007    Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
7008
7009    // Compare 16-byte vectors
7010    andl(result, 0x0000000e);  //   tail count (in bytes)
7011    andl(limit, 0xfffffff0);   // vector count (in bytes)
7012    jccb(Assembler::zero, COMPARE_TAIL);
7013
7014    lea(ary1, Address(ary1, limit, Address::times_1));
7015    lea(ary2, Address(ary2, limit, Address::times_1));
7016    negptr(limit);
7017
7018    bind(COMPARE_WIDE_VECTORS);
7019    movdqu(vec1, Address(ary1, limit, Address::times_1));
7020    movdqu(vec2, Address(ary2, limit, Address::times_1));
7021    pxor(vec1, vec2);
7022
7023    ptest(vec1, vec1);
7024    jccb(Assembler::notZero, FALSE_LABEL);
7025    addptr(limit, 16);
7026    jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
7027
7028    testl(result, result);
7029    jccb(Assembler::zero, TRUE_LABEL);
7030
7031    movdqu(vec1, Address(ary1, result, Address::times_1, -16));
7032    movdqu(vec2, Address(ary2, result, Address::times_1, -16));
7033    pxor(vec1, vec2);
7034
7035    ptest(vec1, vec1);
7036    jccb(Assembler::notZero, FALSE_LABEL);
7037    jmpb(TRUE_LABEL);
7038
7039    bind(COMPARE_TAIL); // limit is zero
7040    movl(limit, result);
7041    // Fallthru to tail compare
7042  }
7043
7044  // Compare 4-byte vectors
7045  andl(limit, 0xfffffffc); // vector count (in bytes)
7046  jccb(Assembler::zero, COMPARE_CHAR);
7047
7048  lea(ary1, Address(ary1, limit, Address::times_1));
7049  lea(ary2, Address(ary2, limit, Address::times_1));
7050  negptr(limit);
7051
7052  bind(COMPARE_VECTORS);
7053  movl(chr, Address(ary1, limit, Address::times_1));
7054  cmpl(chr, Address(ary2, limit, Address::times_1));
7055  jccb(Assembler::notEqual, FALSE_LABEL);
7056  addptr(limit, 4);
7057  jcc(Assembler::notZero, COMPARE_VECTORS);
7058
7059  // Compare trailing char (final 2 bytes), if any
7060  bind(COMPARE_CHAR);
7061  testl(result, 0x2);   // tail  char
7062  jccb(Assembler::zero, TRUE_LABEL);
7063  load_unsigned_short(chr, Address(ary1, 0));
7064  load_unsigned_short(limit, Address(ary2, 0));
7065  cmpl(chr, limit);
7066  jccb(Assembler::notEqual, FALSE_LABEL);
7067
7068  bind(TRUE_LABEL);
7069  movl(result, 1);   // return true
7070  jmpb(DONE);
7071
7072  bind(FALSE_LABEL);
7073  xorl(result, result); // return false
7074
7075  // That's it
7076  bind(DONE);
7077  if (UseAVX >= 2) {
7078    // clean upper bits of YMM registers
7079    vpxor(vec1, vec1);
7080    vpxor(vec2, vec2);
7081  }
7082}
7083
7084void MacroAssembler::generate_fill(BasicType t, bool aligned,
7085                                   Register to, Register value, Register count,
7086                                   Register rtmp, XMMRegister xtmp) {
7087  ShortBranchVerifier sbv(this);
7088  assert_different_registers(to, value, count, rtmp);
7089  Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
7090  Label L_fill_2_bytes, L_fill_4_bytes;
7091
7092  int shift = -1;
7093  switch (t) {
7094    case T_BYTE:
7095      shift = 2;
7096      break;
7097    case T_SHORT:
7098      shift = 1;
7099      break;
7100    case T_INT:
7101      shift = 0;
7102      break;
7103    default: ShouldNotReachHere();
7104  }
7105
7106  if (t == T_BYTE) {
7107    andl(value, 0xff);
7108    movl(rtmp, value);
7109    shll(rtmp, 8);
7110    orl(value, rtmp);
7111  }
7112  if (t == T_SHORT) {
7113    andl(value, 0xffff);
7114  }
7115  if (t == T_BYTE || t == T_SHORT) {
7116    movl(rtmp, value);
7117    shll(rtmp, 16);
7118    orl(value, rtmp);
7119  }
7120
7121  cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
7122  jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
7123  if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
7124    // align source address at 4 bytes address boundary
7125    if (t == T_BYTE) {
7126      // One byte misalignment happens only for byte arrays
7127      testptr(to, 1);
7128      jccb(Assembler::zero, L_skip_align1);
7129      movb(Address(to, 0), value);
7130      increment(to);
7131      decrement(count);
7132      BIND(L_skip_align1);
7133    }
7134    // Two bytes misalignment happens only for byte and short (char) arrays
7135    testptr(to, 2);
7136    jccb(Assembler::zero, L_skip_align2);
7137    movw(Address(to, 0), value);
7138    addptr(to, 2);
7139    subl(count, 1<<(shift-1));
7140    BIND(L_skip_align2);
7141  }
7142  if (UseSSE < 2) {
7143    Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7144    // Fill 32-byte chunks
7145    subl(count, 8 << shift);
7146    jcc(Assembler::less, L_check_fill_8_bytes);
7147    align(16);
7148
7149    BIND(L_fill_32_bytes_loop);
7150
7151    for (int i = 0; i < 32; i += 4) {
7152      movl(Address(to, i), value);
7153    }
7154
7155    addptr(to, 32);
7156    subl(count, 8 << shift);
7157    jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7158    BIND(L_check_fill_8_bytes);
7159    addl(count, 8 << shift);
7160    jccb(Assembler::zero, L_exit);
7161    jmpb(L_fill_8_bytes);
7162
7163    //
7164    // length is too short, just fill qwords
7165    //
7166    BIND(L_fill_8_bytes_loop);
7167    movl(Address(to, 0), value);
7168    movl(Address(to, 4), value);
7169    addptr(to, 8);
7170    BIND(L_fill_8_bytes);
7171    subl(count, 1 << (shift + 1));
7172    jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7173    // fall through to fill 4 bytes
7174  } else {
7175    Label L_fill_32_bytes;
7176    if (!UseUnalignedLoadStores) {
7177      // align to 8 bytes, we know we are 4 byte aligned to start
7178      testptr(to, 4);
7179      jccb(Assembler::zero, L_fill_32_bytes);
7180      movl(Address(to, 0), value);
7181      addptr(to, 4);
7182      subl(count, 1<<shift);
7183    }
7184    BIND(L_fill_32_bytes);
7185    {
7186      assert( UseSSE >= 2, "supported cpu only" );
7187      Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7188      if (UseAVX > 2) {
7189        movl(rtmp, 0xffff);
7190        kmovwl(k1, rtmp);
7191      }
7192      movdl(xtmp, value);
7193      if (UseAVX > 2 && UseUnalignedLoadStores) {
7194        // Fill 64-byte chunks
7195        Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
7196        evpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
7197
7198        subl(count, 16 << shift);
7199        jcc(Assembler::less, L_check_fill_32_bytes);
7200        align(16);
7201
7202        BIND(L_fill_64_bytes_loop);
7203        evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
7204        addptr(to, 64);
7205        subl(count, 16 << shift);
7206        jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7207
7208        BIND(L_check_fill_32_bytes);
7209        addl(count, 8 << shift);
7210        jccb(Assembler::less, L_check_fill_8_bytes);
7211        evmovdqul(Address(to, 0), xtmp, Assembler::AVX_256bit);
7212        addptr(to, 32);
7213        subl(count, 8 << shift);
7214
7215        BIND(L_check_fill_8_bytes);
7216      } else if (UseAVX == 2 && UseUnalignedLoadStores) {
7217        // Fill 64-byte chunks
7218        Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
7219        vpbroadcastd(xtmp, xtmp);
7220
7221        subl(count, 16 << shift);
7222        jcc(Assembler::less, L_check_fill_32_bytes);
7223        align(16);
7224
7225        BIND(L_fill_64_bytes_loop);
7226        vmovdqu(Address(to, 0), xtmp);
7227        vmovdqu(Address(to, 32), xtmp);
7228        addptr(to, 64);
7229        subl(count, 16 << shift);
7230        jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7231
7232        BIND(L_check_fill_32_bytes);
7233        addl(count, 8 << shift);
7234        jccb(Assembler::less, L_check_fill_8_bytes);
7235        vmovdqu(Address(to, 0), xtmp);
7236        addptr(to, 32);
7237        subl(count, 8 << shift);
7238
7239        BIND(L_check_fill_8_bytes);
7240        // clean upper bits of YMM registers
7241        movdl(xtmp, value);
7242        pshufd(xtmp, xtmp, 0);
7243      } else {
7244        // Fill 32-byte chunks
7245        pshufd(xtmp, xtmp, 0);
7246
7247        subl(count, 8 << shift);
7248        jcc(Assembler::less, L_check_fill_8_bytes);
7249        align(16);
7250
7251        BIND(L_fill_32_bytes_loop);
7252
7253        if (UseUnalignedLoadStores) {
7254          movdqu(Address(to, 0), xtmp);
7255          movdqu(Address(to, 16), xtmp);
7256        } else {
7257          movq(Address(to, 0), xtmp);
7258          movq(Address(to, 8), xtmp);
7259          movq(Address(to, 16), xtmp);
7260          movq(Address(to, 24), xtmp);
7261        }
7262
7263        addptr(to, 32);
7264        subl(count, 8 << shift);
7265        jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7266
7267        BIND(L_check_fill_8_bytes);
7268      }
7269      addl(count, 8 << shift);
7270      jccb(Assembler::zero, L_exit);
7271      jmpb(L_fill_8_bytes);
7272
7273      //
7274      // length is too short, just fill qwords
7275      //
7276      BIND(L_fill_8_bytes_loop);
7277      movq(Address(to, 0), xtmp);
7278      addptr(to, 8);
7279      BIND(L_fill_8_bytes);
7280      subl(count, 1 << (shift + 1));
7281      jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7282    }
7283  }
7284  // fill trailing 4 bytes
7285  BIND(L_fill_4_bytes);
7286  testl(count, 1<<shift);
7287  jccb(Assembler::zero, L_fill_2_bytes);
7288  movl(Address(to, 0), value);
7289  if (t == T_BYTE || t == T_SHORT) {
7290    addptr(to, 4);
7291    BIND(L_fill_2_bytes);
7292    // fill trailing 2 bytes
7293    testl(count, 1<<(shift-1));
7294    jccb(Assembler::zero, L_fill_byte);
7295    movw(Address(to, 0), value);
7296    if (t == T_BYTE) {
7297      addptr(to, 2);
7298      BIND(L_fill_byte);
7299      // fill trailing byte
7300      testl(count, 1);
7301      jccb(Assembler::zero, L_exit);
7302      movb(Address(to, 0), value);
7303    } else {
7304      BIND(L_fill_byte);
7305    }
7306  } else {
7307    BIND(L_fill_2_bytes);
7308  }
7309  BIND(L_exit);
7310}
7311
7312// encode char[] to byte[] in ISO_8859_1
7313void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7314                                      XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7315                                      XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7316                                      Register tmp5, Register result) {
7317  // rsi: src
7318  // rdi: dst
7319  // rdx: len
7320  // rcx: tmp5
7321  // rax: result
7322  ShortBranchVerifier sbv(this);
7323  assert_different_registers(src, dst, len, tmp5, result);
7324  Label L_done, L_copy_1_char, L_copy_1_char_exit;
7325
7326  // set result
7327  xorl(result, result);
7328  // check for zero length
7329  testl(len, len);
7330  jcc(Assembler::zero, L_done);
7331  movl(result, len);
7332
7333  // Setup pointers
7334  lea(src, Address(src, len, Address::times_2)); // char[]
7335  lea(dst, Address(dst, len, Address::times_1)); // byte[]
7336  negptr(len);
7337
7338  if (UseSSE42Intrinsics || UseAVX >= 2) {
7339    Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
7340    Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7341
7342    if (UseAVX >= 2) {
7343      Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7344      movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vector
7345      movdl(tmp1Reg, tmp5);
7346      vpbroadcastd(tmp1Reg, tmp1Reg);
7347      jmpb(L_chars_32_check);
7348
7349      bind(L_copy_32_chars);
7350      vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7351      vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7352      vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7353      vptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in  vector
7354      jccb(Assembler::notZero, L_copy_32_chars_exit);
7355      vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
7356      vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
7357      vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7358
7359      bind(L_chars_32_check);
7360      addptr(len, 32);
7361      jccb(Assembler::lessEqual, L_copy_32_chars);
7362
7363      bind(L_copy_32_chars_exit);
7364      subptr(len, 16);
7365      jccb(Assembler::greater, L_copy_16_chars_exit);
7366
7367    } else if (UseSSE42Intrinsics) {
7368      movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vector
7369      movdl(tmp1Reg, tmp5);
7370      pshufd(tmp1Reg, tmp1Reg, 0);
7371      jmpb(L_chars_16_check);
7372    }
7373
7374    bind(L_copy_16_chars);
7375    if (UseAVX >= 2) {
7376      vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7377      vptest(tmp2Reg, tmp1Reg);
7378      jccb(Assembler::notZero, L_copy_16_chars_exit);
7379      vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
7380      vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
7381    } else {
7382      if (UseAVX > 0) {
7383        movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7384        movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7385        vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
7386      } else {
7387        movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7388        por(tmp2Reg, tmp3Reg);
7389        movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7390        por(tmp2Reg, tmp4Reg);
7391      }
7392      ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in  vector
7393      jccb(Assembler::notZero, L_copy_16_chars_exit);
7394      packuswb(tmp3Reg, tmp4Reg);
7395    }
7396    movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7397
7398    bind(L_chars_16_check);
7399    addptr(len, 16);
7400    jccb(Assembler::lessEqual, L_copy_16_chars);
7401
7402    bind(L_copy_16_chars_exit);
7403    if (UseAVX >= 2) {
7404      // clean upper bits of YMM registers
7405      vpxor(tmp2Reg, tmp2Reg);
7406      vpxor(tmp3Reg, tmp3Reg);
7407      vpxor(tmp4Reg, tmp4Reg);
7408      movdl(tmp1Reg, tmp5);
7409      pshufd(tmp1Reg, tmp1Reg, 0);
7410    }
7411    subptr(len, 8);
7412    jccb(Assembler::greater, L_copy_8_chars_exit);
7413
7414    bind(L_copy_8_chars);
7415    movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7416    ptest(tmp3Reg, tmp1Reg);
7417    jccb(Assembler::notZero, L_copy_8_chars_exit);
7418    packuswb(tmp3Reg, tmp1Reg);
7419    movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7420    addptr(len, 8);
7421    jccb(Assembler::lessEqual, L_copy_8_chars);
7422
7423    bind(L_copy_8_chars_exit);
7424    subptr(len, 8);
7425    jccb(Assembler::zero, L_done);
7426  }
7427
7428  bind(L_copy_1_char);
7429  load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7430  testl(tmp5, 0xff00);      // check if Unicode char
7431  jccb(Assembler::notZero, L_copy_1_char_exit);
7432  movb(Address(dst, len, Address::times_1, 0), tmp5);
7433  addptr(len, 1);
7434  jccb(Assembler::less, L_copy_1_char);
7435
7436  bind(L_copy_1_char_exit);
7437  addptr(result, len); // len is negative count of not processed elements
7438  bind(L_done);
7439}
7440
7441#ifdef _LP64
7442/**
7443 * Helper for multiply_to_len().
7444 */
7445void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7446  addq(dest_lo, src1);
7447  adcq(dest_hi, 0);
7448  addq(dest_lo, src2);
7449  adcq(dest_hi, 0);
7450}
7451
7452/**
7453 * Multiply 64 bit by 64 bit first loop.
7454 */
7455void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7456                                           Register y, Register y_idx, Register z,
7457                                           Register carry, Register product,
7458                                           Register idx, Register kdx) {
7459  //
7460  //  jlong carry, x[], y[], z[];
7461  //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7462  //    huge_128 product = y[idx] * x[xstart] + carry;
7463  //    z[kdx] = (jlong)product;
7464  //    carry  = (jlong)(product >>> 64);
7465  //  }
7466  //  z[xstart] = carry;
7467  //
7468
7469  Label L_first_loop, L_first_loop_exit;
7470  Label L_one_x, L_one_y, L_multiply;
7471
7472  decrementl(xstart);
7473  jcc(Assembler::negative, L_one_x);
7474
7475  movq(x_xstart, Address(x, xstart, Address::times_4,  0));
7476  rorq(x_xstart, 32); // convert big-endian to little-endian
7477
7478  bind(L_first_loop);
7479  decrementl(idx);
7480  jcc(Assembler::negative, L_first_loop_exit);
7481  decrementl(idx);
7482  jcc(Assembler::negative, L_one_y);
7483  movq(y_idx, Address(y, idx, Address::times_4,  0));
7484  rorq(y_idx, 32); // convert big-endian to little-endian
7485  bind(L_multiply);
7486  movq(product, x_xstart);
7487  mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7488  addq(product, carry);
7489  adcq(rdx, 0);
7490  subl(kdx, 2);
7491  movl(Address(z, kdx, Address::times_4,  4), product);
7492  shrq(product, 32);
7493  movl(Address(z, kdx, Address::times_4,  0), product);
7494  movq(carry, rdx);
7495  jmp(L_first_loop);
7496
7497  bind(L_one_y);
7498  movl(y_idx, Address(y,  0));
7499  jmp(L_multiply);
7500
7501  bind(L_one_x);
7502  movl(x_xstart, Address(x,  0));
7503  jmp(L_first_loop);
7504
7505  bind(L_first_loop_exit);
7506}
7507
7508/**
7509 * Multiply 64 bit by 64 bit and add 128 bit.
7510 */
7511void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7512                                            Register yz_idx, Register idx,
7513                                            Register carry, Register product, int offset) {
7514  //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7515  //     z[kdx] = (jlong)product;
7516
7517  movq(yz_idx, Address(y, idx, Address::times_4,  offset));
7518  rorq(yz_idx, 32); // convert big-endian to little-endian
7519  movq(product, x_xstart);
7520  mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
7521  movq(yz_idx, Address(z, idx, Address::times_4,  offset));
7522  rorq(yz_idx, 32); // convert big-endian to little-endian
7523
7524  add2_with_carry(rdx, product, carry, yz_idx);
7525
7526  movl(Address(z, idx, Address::times_4,  offset+4), product);
7527  shrq(product, 32);
7528  movl(Address(z, idx, Address::times_4,  offset), product);
7529
7530}
7531
7532/**
7533 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7534 */
7535void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7536                                             Register yz_idx, Register idx, Register jdx,
7537                                             Register carry, Register product,
7538                                             Register carry2) {
7539  //   jlong carry, x[], y[], z[];
7540  //   int kdx = ystart+1;
7541  //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7542  //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7543  //     z[kdx+idx+1] = (jlong)product;
7544  //     jlong carry2  = (jlong)(product >>> 64);
7545  //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7546  //     z[kdx+idx] = (jlong)product;
7547  //     carry  = (jlong)(product >>> 64);
7548  //   }
7549  //   idx += 2;
7550  //   if (idx > 0) {
7551  //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7552  //     z[kdx+idx] = (jlong)product;
7553  //     carry  = (jlong)(product >>> 64);
7554  //   }
7555  //
7556
7557  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7558
7559  movl(jdx, idx);
7560  andl(jdx, 0xFFFFFFFC);
7561  shrl(jdx, 2);
7562
7563  bind(L_third_loop);
7564  subl(jdx, 1);
7565  jcc(Assembler::negative, L_third_loop_exit);
7566  subl(idx, 4);
7567
7568  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7569  movq(carry2, rdx);
7570
7571  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7572  movq(carry, rdx);
7573  jmp(L_third_loop);
7574
7575  bind (L_third_loop_exit);
7576
7577  andl (idx, 0x3);
7578  jcc(Assembler::zero, L_post_third_loop_done);
7579
7580  Label L_check_1;
7581  subl(idx, 2);
7582  jcc(Assembler::negative, L_check_1);
7583
7584  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7585  movq(carry, rdx);
7586
7587  bind (L_check_1);
7588  addl (idx, 0x2);
7589  andl (idx, 0x1);
7590  subl(idx, 1);
7591  jcc(Assembler::negative, L_post_third_loop_done);
7592
7593  movl(yz_idx, Address(y, idx, Address::times_4,  0));
7594  movq(product, x_xstart);
7595  mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7596  movl(yz_idx, Address(z, idx, Address::times_4,  0));
7597
7598  add2_with_carry(rdx, product, yz_idx, carry);
7599
7600  movl(Address(z, idx, Address::times_4,  0), product);
7601  shrq(product, 32);
7602
7603  shlq(rdx, 32);
7604  orq(product, rdx);
7605  movq(carry, product);
7606
7607  bind(L_post_third_loop_done);
7608}
7609
7610/**
7611 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7612 *
7613 */
7614void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7615                                                  Register carry, Register carry2,
7616                                                  Register idx, Register jdx,
7617                                                  Register yz_idx1, Register yz_idx2,
7618                                                  Register tmp, Register tmp3, Register tmp4) {
7619  assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7620
7621  //   jlong carry, x[], y[], z[];
7622  //   int kdx = ystart+1;
7623  //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7624  //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7625  //     jlong carry2  = (jlong)(tmp3 >>> 64);
7626  //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
7627  //     carry  = (jlong)(tmp4 >>> 64);
7628  //     z[kdx+idx+1] = (jlong)tmp3;
7629  //     z[kdx+idx] = (jlong)tmp4;
7630  //   }
7631  //   idx += 2;
7632  //   if (idx > 0) {
7633  //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7634  //     z[kdx+idx] = (jlong)yz_idx1;
7635  //     carry  = (jlong)(yz_idx1 >>> 64);
7636  //   }
7637  //
7638
7639  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7640
7641  movl(jdx, idx);
7642  andl(jdx, 0xFFFFFFFC);
7643  shrl(jdx, 2);
7644
7645  bind(L_third_loop);
7646  subl(jdx, 1);
7647  jcc(Assembler::negative, L_third_loop_exit);
7648  subl(idx, 4);
7649
7650  movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
7651  rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7652  movq(yz_idx2, Address(y, idx, Address::times_4,  0));
7653  rorxq(yz_idx2, yz_idx2, 32);
7654
7655  mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
7656  mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
7657
7658  movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
7659  rorxq(yz_idx1, yz_idx1, 32);
7660  movq(yz_idx2, Address(z, idx, Address::times_4,  0));
7661  rorxq(yz_idx2, yz_idx2, 32);
7662
7663  if (VM_Version::supports_adx()) {
7664    adcxq(tmp3, carry);
7665    adoxq(tmp3, yz_idx1);
7666
7667    adcxq(tmp4, tmp);
7668    adoxq(tmp4, yz_idx2);
7669
7670    movl(carry, 0); // does not affect flags
7671    adcxq(carry2, carry);
7672    adoxq(carry2, carry);
7673  } else {
7674    add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7675    add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7676  }
7677  movq(carry, carry2);
7678
7679  movl(Address(z, idx, Address::times_4, 12), tmp3);
7680  shrq(tmp3, 32);
7681  movl(Address(z, idx, Address::times_4,  8), tmp3);
7682
7683  movl(Address(z, idx, Address::times_4,  4), tmp4);
7684  shrq(tmp4, 32);
7685  movl(Address(z, idx, Address::times_4,  0), tmp4);
7686
7687  jmp(L_third_loop);
7688
7689  bind (L_third_loop_exit);
7690
7691  andl (idx, 0x3);
7692  jcc(Assembler::zero, L_post_third_loop_done);
7693
7694  Label L_check_1;
7695  subl(idx, 2);
7696  jcc(Assembler::negative, L_check_1);
7697
7698  movq(yz_idx1, Address(y, idx, Address::times_4,  0));
7699  rorxq(yz_idx1, yz_idx1, 32);
7700  mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
7701  movq(yz_idx2, Address(z, idx, Address::times_4,  0));
7702  rorxq(yz_idx2, yz_idx2, 32);
7703
7704  add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7705
7706  movl(Address(z, idx, Address::times_4,  4), tmp3);
7707  shrq(tmp3, 32);
7708  movl(Address(z, idx, Address::times_4,  0), tmp3);
7709  movq(carry, tmp4);
7710
7711  bind (L_check_1);
7712  addl (idx, 0x2);
7713  andl (idx, 0x1);
7714  subl(idx, 1);
7715  jcc(Assembler::negative, L_post_third_loop_done);
7716  movl(tmp4, Address(y, idx, Address::times_4,  0));
7717  mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
7718  movl(tmp4, Address(z, idx, Address::times_4,  0));
7719
7720  add2_with_carry(carry2, tmp3, tmp4, carry);
7721
7722  movl(Address(z, idx, Address::times_4,  0), tmp3);
7723  shrq(tmp3, 32);
7724
7725  shlq(carry2, 32);
7726  orq(tmp3, carry2);
7727  movq(carry, tmp3);
7728
7729  bind(L_post_third_loop_done);
7730}
7731
7732/**
7733 * Code for BigInteger::multiplyToLen() instrinsic.
7734 *
7735 * rdi: x
7736 * rax: xlen
7737 * rsi: y
7738 * rcx: ylen
7739 * r8:  z
7740 * r11: zlen
7741 * r12: tmp1
7742 * r13: tmp2
7743 * r14: tmp3
7744 * r15: tmp4
7745 * rbx: tmp5
7746 *
7747 */
7748void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
7749                                     Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7750  ShortBranchVerifier sbv(this);
7751  assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7752
7753  push(tmp1);
7754  push(tmp2);
7755  push(tmp3);
7756  push(tmp4);
7757  push(tmp5);
7758
7759  push(xlen);
7760  push(zlen);
7761
7762  const Register idx = tmp1;
7763  const Register kdx = tmp2;
7764  const Register xstart = tmp3;
7765
7766  const Register y_idx = tmp4;
7767  const Register carry = tmp5;
7768  const Register product  = xlen;
7769  const Register x_xstart = zlen;  // reuse register
7770
7771  // First Loop.
7772  //
7773  //  final static long LONG_MASK = 0xffffffffL;
7774  //  int xstart = xlen - 1;
7775  //  int ystart = ylen - 1;
7776  //  long carry = 0;
7777  //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7778  //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7779  //    z[kdx] = (int)product;
7780  //    carry = product >>> 32;
7781  //  }
7782  //  z[xstart] = (int)carry;
7783  //
7784
7785  movl(idx, ylen);      // idx = ylen;
7786  movl(kdx, zlen);      // kdx = xlen+ylen;
7787  xorq(carry, carry);   // carry = 0;
7788
7789  Label L_done;
7790
7791  movl(xstart, xlen);
7792  decrementl(xstart);
7793  jcc(Assembler::negative, L_done);
7794
7795  multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7796
7797  Label L_second_loop;
7798  testl(kdx, kdx);
7799  jcc(Assembler::zero, L_second_loop);
7800
7801  Label L_carry;
7802  subl(kdx, 1);
7803  jcc(Assembler::zero, L_carry);
7804
7805  movl(Address(z, kdx, Address::times_4,  0), carry);
7806  shrq(carry, 32);
7807  subl(kdx, 1);
7808
7809  bind(L_carry);
7810  movl(Address(z, kdx, Address::times_4,  0), carry);
7811
7812  // Second and third (nested) loops.
7813  //
7814  // for (int i = xstart-1; i >= 0; i--) { // Second loop
7815  //   carry = 0;
7816  //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7817  //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7818  //                    (z[k] & LONG_MASK) + carry;
7819  //     z[k] = (int)product;
7820  //     carry = product >>> 32;
7821  //   }
7822  //   z[i] = (int)carry;
7823  // }
7824  //
7825  // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7826
7827  const Register jdx = tmp1;
7828
7829  bind(L_second_loop);
7830  xorl(carry, carry);    // carry = 0;
7831  movl(jdx, ylen);       // j = ystart+1
7832
7833  subl(xstart, 1);       // i = xstart-1;
7834  jcc(Assembler::negative, L_done);
7835
7836  push (z);
7837
7838  Label L_last_x;
7839  lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7840  subl(xstart, 1);       // i = xstart-1;
7841  jcc(Assembler::negative, L_last_x);
7842
7843  if (UseBMI2Instructions) {
7844    movq(rdx,  Address(x, xstart, Address::times_4,  0));
7845    rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7846  } else {
7847    movq(x_xstart, Address(x, xstart, Address::times_4,  0));
7848    rorq(x_xstart, 32);  // convert big-endian to little-endian
7849  }
7850
7851  Label L_third_loop_prologue;
7852  bind(L_third_loop_prologue);
7853
7854  push (x);
7855  push (xstart);
7856  push (ylen);
7857
7858
7859  if (UseBMI2Instructions) {
7860    multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7861  } else { // !UseBMI2Instructions
7862    multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7863  }
7864
7865  pop(ylen);
7866  pop(xlen);
7867  pop(x);
7868  pop(z);
7869
7870  movl(tmp3, xlen);
7871  addl(tmp3, 1);
7872  movl(Address(z, tmp3, Address::times_4,  0), carry);
7873  subl(tmp3, 1);
7874  jccb(Assembler::negative, L_done);
7875
7876  shrq(carry, 32);
7877  movl(Address(z, tmp3, Address::times_4,  0), carry);
7878  jmp(L_second_loop);
7879
7880  // Next infrequent code is moved outside loops.
7881  bind(L_last_x);
7882  if (UseBMI2Instructions) {
7883    movl(rdx, Address(x,  0));
7884  } else {
7885    movl(x_xstart, Address(x,  0));
7886  }
7887  jmp(L_third_loop_prologue);
7888
7889  bind(L_done);
7890
7891  pop(zlen);
7892  pop(xlen);
7893
7894  pop(tmp5);
7895  pop(tmp4);
7896  pop(tmp3);
7897  pop(tmp2);
7898  pop(tmp1);
7899}
7900
7901//Helper functions for square_to_len()
7902
7903/**
7904 * Store the squares of x[], right shifted one bit (divided by 2) into z[]
7905 * Preserves x and z and modifies rest of the registers.
7906 */
7907
7908void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7909  // Perform square and right shift by 1
7910  // Handle odd xlen case first, then for even xlen do the following
7911  // jlong carry = 0;
7912  // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
7913  //     huge_128 product = x[j:j+1] * x[j:j+1];
7914  //     z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
7915  //     z[i+2:i+3] = (jlong)(product >>> 1);
7916  //     carry = (jlong)product;
7917  // }
7918
7919  xorq(tmp5, tmp5);     // carry
7920  xorq(rdxReg, rdxReg);
7921  xorl(tmp1, tmp1);     // index for x
7922  xorl(tmp4, tmp4);     // index for z
7923
7924  Label L_first_loop, L_first_loop_exit;
7925
7926  testl(xlen, 1);
7927  jccb(Assembler::zero, L_first_loop); //jump if xlen is even
7928
7929  // Square and right shift by 1 the odd element using 32 bit multiply
7930  movl(raxReg, Address(x, tmp1, Address::times_4, 0));
7931  imulq(raxReg, raxReg);
7932  shrq(raxReg, 1);
7933  adcq(tmp5, 0);
7934  movq(Address(z, tmp4, Address::times_4, 0), raxReg);
7935  incrementl(tmp1);
7936  addl(tmp4, 2);
7937
7938  // Square and  right shift by 1 the rest using 64 bit multiply
7939  bind(L_first_loop);
7940  cmpptr(tmp1, xlen);
7941  jccb(Assembler::equal, L_first_loop_exit);
7942
7943  // Square
7944  movq(raxReg, Address(x, tmp1, Address::times_4,  0));
7945  rorq(raxReg, 32);    // convert big-endian to little-endian
7946  mulq(raxReg);        // 64-bit multiply rax * rax -> rdx:rax
7947
7948  // Right shift by 1 and save carry
7949  shrq(tmp5, 1);       // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
7950  rcrq(rdxReg, 1);
7951  rcrq(raxReg, 1);
7952  adcq(tmp5, 0);
7953
7954  // Store result in z
7955  movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
7956  movq(Address(z, tmp4, Address::times_4, 8), raxReg);
7957
7958  // Update indices for x and z
7959  addl(tmp1, 2);
7960  addl(tmp4, 4);
7961  jmp(L_first_loop);
7962
7963  bind(L_first_loop_exit);
7964}
7965
7966
7967/**
7968 * Perform the following multiply add operation using BMI2 instructions
7969 * carry:sum = sum + op1*op2 + carry
7970 * op2 should be in rdx
7971 * op2 is preserved, all other registers are modified
7972 */
7973void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
7974  // assert op2 is rdx
7975  mulxq(tmp2, op1, op1);  //  op1 * op2 -> tmp2:op1
7976  addq(sum, carry);
7977  adcq(tmp2, 0);
7978  addq(sum, op1);
7979  adcq(tmp2, 0);
7980  movq(carry, tmp2);
7981}
7982
7983/**
7984 * Perform the following multiply add operation:
7985 * carry:sum = sum + op1*op2 + carry
7986 * Preserves op1, op2 and modifies rest of registers
7987 */
7988void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
7989  // rdx:rax = op1 * op2
7990  movq(raxReg, op2);
7991  mulq(op1);
7992
7993  //  rdx:rax = sum + carry + rdx:rax
7994  addq(sum, carry);
7995  adcq(rdxReg, 0);
7996  addq(sum, raxReg);
7997  adcq(rdxReg, 0);
7998
7999  // carry:sum = rdx:sum
8000  movq(carry, rdxReg);
8001}
8002
8003/**
8004 * Add 64 bit long carry into z[] with carry propogation.
8005 * Preserves z and carry register values and modifies rest of registers.
8006 *
8007 */
8008void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
8009  Label L_fourth_loop, L_fourth_loop_exit;
8010
8011  movl(tmp1, 1);
8012  subl(zlen, 2);
8013  addq(Address(z, zlen, Address::times_4, 0), carry);
8014
8015  bind(L_fourth_loop);
8016  jccb(Assembler::carryClear, L_fourth_loop_exit);
8017  subl(zlen, 2);
8018  jccb(Assembler::negative, L_fourth_loop_exit);
8019  addq(Address(z, zlen, Address::times_4, 0), tmp1);
8020  jmp(L_fourth_loop);
8021  bind(L_fourth_loop_exit);
8022}
8023
8024/**
8025 * Shift z[] left by 1 bit.
8026 * Preserves x, len, z and zlen registers and modifies rest of the registers.
8027 *
8028 */
8029void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
8030
8031  Label L_fifth_loop, L_fifth_loop_exit;
8032
8033  // Fifth loop
8034  // Perform primitiveLeftShift(z, zlen, 1)
8035
8036  const Register prev_carry = tmp1;
8037  const Register new_carry = tmp4;
8038  const Register value = tmp2;
8039  const Register zidx = tmp3;
8040
8041  // int zidx, carry;
8042  // long value;
8043  // carry = 0;
8044  // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
8045  //    (carry:value)  = (z[i] << 1) | carry ;
8046  //    z[i] = value;
8047  // }
8048
8049  movl(zidx, zlen);
8050  xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
8051
8052  bind(L_fifth_loop);
8053  decl(zidx);  // Use decl to preserve carry flag
8054  decl(zidx);
8055  jccb(Assembler::negative, L_fifth_loop_exit);
8056
8057  if (UseBMI2Instructions) {
8058     movq(value, Address(z, zidx, Address::times_4, 0));
8059     rclq(value, 1);
8060     rorxq(value, value, 32);
8061     movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
8062  }
8063  else {
8064    // clear new_carry
8065    xorl(new_carry, new_carry);
8066
8067    // Shift z[i] by 1, or in previous carry and save new carry
8068    movq(value, Address(z, zidx, Address::times_4, 0));
8069    shlq(value, 1);
8070    adcl(new_carry, 0);
8071
8072    orq(value, prev_carry);
8073    rorq(value, 0x20);
8074    movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
8075
8076    // Set previous carry = new carry
8077    movl(prev_carry, new_carry);
8078  }
8079  jmp(L_fifth_loop);
8080
8081  bind(L_fifth_loop_exit);
8082}
8083
8084
8085/**
8086 * Code for BigInteger::squareToLen() intrinsic
8087 *
8088 * rdi: x
8089 * rsi: len
8090 * r8:  z
8091 * rcx: zlen
8092 * r12: tmp1
8093 * r13: tmp2
8094 * r14: tmp3
8095 * r15: tmp4
8096 * rbx: tmp5
8097 *
8098 */
8099void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8100
8101  Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, fifth_loop, fifth_loop_exit, L_last_x, L_multiply;
8102  push(tmp1);
8103  push(tmp2);
8104  push(tmp3);
8105  push(tmp4);
8106  push(tmp5);
8107
8108  // First loop
8109  // Store the squares, right shifted one bit (i.e., divided by 2).
8110  square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
8111
8112  // Add in off-diagonal sums.
8113  //
8114  // Second, third (nested) and fourth loops.
8115  // zlen +=2;
8116  // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
8117  //    carry = 0;
8118  //    long op2 = x[xidx:xidx+1];
8119  //    for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
8120  //       k -= 2;
8121  //       long op1 = x[j:j+1];
8122  //       long sum = z[k:k+1];
8123  //       carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
8124  //       z[k:k+1] = sum;
8125  //    }
8126  //    add_one_64(z, k, carry, tmp_regs);
8127  // }
8128
8129  const Register carry = tmp5;
8130  const Register sum = tmp3;
8131  const Register op1 = tmp4;
8132  Register op2 = tmp2;
8133
8134  push(zlen);
8135  push(len);
8136  addl(zlen,2);
8137  bind(L_second_loop);
8138  xorq(carry, carry);
8139  subl(zlen, 4);
8140  subl(len, 2);
8141  push(zlen);
8142  push(len);
8143  cmpl(len, 0);
8144  jccb(Assembler::lessEqual, L_second_loop_exit);
8145
8146  // Multiply an array by one 64 bit long.
8147  if (UseBMI2Instructions) {
8148    op2 = rdxReg;
8149    movq(op2, Address(x, len, Address::times_4,  0));
8150    rorxq(op2, op2, 32);
8151  }
8152  else {
8153    movq(op2, Address(x, len, Address::times_4,  0));
8154    rorq(op2, 32);
8155  }
8156
8157  bind(L_third_loop);
8158  decrementl(len);
8159  jccb(Assembler::negative, L_third_loop_exit);
8160  decrementl(len);
8161  jccb(Assembler::negative, L_last_x);
8162
8163  movq(op1, Address(x, len, Address::times_4,  0));
8164  rorq(op1, 32);
8165
8166  bind(L_multiply);
8167  subl(zlen, 2);
8168  movq(sum, Address(z, zlen, Address::times_4,  0));
8169
8170  // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
8171  if (UseBMI2Instructions) {
8172    multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
8173  }
8174  else {
8175    multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8176  }
8177
8178  movq(Address(z, zlen, Address::times_4, 0), sum);
8179
8180  jmp(L_third_loop);
8181  bind(L_third_loop_exit);
8182
8183  // Fourth loop
8184  // Add 64 bit long carry into z with carry propogation.
8185  // Uses offsetted zlen.
8186  add_one_64(z, zlen, carry, tmp1);
8187
8188  pop(len);
8189  pop(zlen);
8190  jmp(L_second_loop);
8191
8192  // Next infrequent code is moved outside loops.
8193  bind(L_last_x);
8194  movl(op1, Address(x, 0));
8195  jmp(L_multiply);
8196
8197  bind(L_second_loop_exit);
8198  pop(len);
8199  pop(zlen);
8200  pop(len);
8201  pop(zlen);
8202
8203  // Fifth loop
8204  // Shift z left 1 bit.
8205  lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
8206
8207  // z[zlen-1] |= x[len-1] & 1;
8208  movl(tmp3, Address(x, len, Address::times_4, -4));
8209  andl(tmp3, 1);
8210  orl(Address(z, zlen, Address::times_4,  -4), tmp3);
8211
8212  pop(tmp5);
8213  pop(tmp4);
8214  pop(tmp3);
8215  pop(tmp2);
8216  pop(tmp1);
8217}
8218
8219/**
8220 * Helper function for mul_add()
8221 * Multiply the in[] by int k and add to out[] starting at offset offs using
8222 * 128 bit by 32 bit multiply and return the carry in tmp5.
8223 * Only quad int aligned length of in[] is operated on in this function.
8224 * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
8225 * This function preserves out, in and k registers.
8226 * len and offset point to the appropriate index in "in" & "out" correspondingly
8227 * tmp5 has the carry.
8228 * other registers are temporary and are modified.
8229 *
8230 */
8231void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
8232  Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
8233  Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8234
8235  Label L_first_loop, L_first_loop_exit;
8236
8237  movl(tmp1, len);
8238  shrl(tmp1, 2);
8239
8240  bind(L_first_loop);
8241  subl(tmp1, 1);
8242  jccb(Assembler::negative, L_first_loop_exit);
8243
8244  subl(len, 4);
8245  subl(offset, 4);
8246
8247  Register op2 = tmp2;
8248  const Register sum = tmp3;
8249  const Register op1 = tmp4;
8250  const Register carry = tmp5;
8251
8252  if (UseBMI2Instructions) {
8253    op2 = rdxReg;
8254  }
8255
8256  movq(op1, Address(in, len, Address::times_4,  8));
8257  rorq(op1, 32);
8258  movq(sum, Address(out, offset, Address::times_4,  8));
8259  rorq(sum, 32);
8260  if (UseBMI2Instructions) {
8261    multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8262  }
8263  else {
8264    multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8265  }
8266  // Store back in big endian from little endian
8267  rorq(sum, 0x20);
8268  movq(Address(out, offset, Address::times_4,  8), sum);
8269
8270  movq(op1, Address(in, len, Address::times_4,  0));
8271  rorq(op1, 32);
8272  movq(sum, Address(out, offset, Address::times_4,  0));
8273  rorq(sum, 32);
8274  if (UseBMI2Instructions) {
8275    multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8276  }
8277  else {
8278    multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8279  }
8280  // Store back in big endian from little endian
8281  rorq(sum, 0x20);
8282  movq(Address(out, offset, Address::times_4,  0), sum);
8283
8284  jmp(L_first_loop);
8285  bind(L_first_loop_exit);
8286}
8287
8288/**
8289 * Code for BigInteger::mulAdd() intrinsic
8290 *
8291 * rdi: out
8292 * rsi: in
8293 * r11: offs (out.length - offset)
8294 * rcx: len
8295 * r8:  k
8296 * r12: tmp1
8297 * r13: tmp2
8298 * r14: tmp3
8299 * r15: tmp4
8300 * rbx: tmp5
8301 * Multiply the in[] by word k and add to out[], return the carry in rax
8302 */
8303void MacroAssembler::mul_add(Register out, Register in, Register offs,
8304   Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
8305   Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8306
8307  Label L_carry, L_last_in, L_done;
8308
8309// carry = 0;
8310// for (int j=len-1; j >= 0; j--) {
8311//    long product = (in[j] & LONG_MASK) * kLong +
8312//                   (out[offs] & LONG_MASK) + carry;
8313//    out[offs--] = (int)product;
8314//    carry = product >>> 32;
8315// }
8316//
8317  push(tmp1);
8318  push(tmp2);
8319  push(tmp3);
8320  push(tmp4);
8321  push(tmp5);
8322
8323  Register op2 = tmp2;
8324  const Register sum = tmp3;
8325  const Register op1 = tmp4;
8326  const Register carry =  tmp5;
8327
8328  if (UseBMI2Instructions) {
8329    op2 = rdxReg;
8330    movl(op2, k);
8331  }
8332  else {
8333    movl(op2, k);
8334  }
8335
8336  xorq(carry, carry);
8337
8338  //First loop
8339
8340  //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
8341  //The carry is in tmp5
8342  mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
8343
8344  //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
8345  decrementl(len);
8346  jccb(Assembler::negative, L_carry);
8347  decrementl(len);
8348  jccb(Assembler::negative, L_last_in);
8349
8350  movq(op1, Address(in, len, Address::times_4,  0));
8351  rorq(op1, 32);
8352
8353  subl(offs, 2);
8354  movq(sum, Address(out, offs, Address::times_4,  0));
8355  rorq(sum, 32);
8356
8357  if (UseBMI2Instructions) {
8358    multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8359  }
8360  else {
8361    multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8362  }
8363
8364  // Store back in big endian from little endian
8365  rorq(sum, 0x20);
8366  movq(Address(out, offs, Address::times_4,  0), sum);
8367
8368  testl(len, len);
8369  jccb(Assembler::zero, L_carry);
8370
8371  //Multiply the last in[] entry, if any
8372  bind(L_last_in);
8373  movl(op1, Address(in, 0));
8374  movl(sum, Address(out, offs, Address::times_4,  -4));
8375
8376  movl(raxReg, k);
8377  mull(op1); //tmp4 * eax -> edx:eax
8378  addl(sum, carry);
8379  adcl(rdxReg, 0);
8380  addl(sum, raxReg);
8381  adcl(rdxReg, 0);
8382  movl(carry, rdxReg);
8383
8384  movl(Address(out, offs, Address::times_4,  -4), sum);
8385
8386  bind(L_carry);
8387  //return tmp5/carry as carry in rax
8388  movl(rax, carry);
8389
8390  bind(L_done);
8391  pop(tmp5);
8392  pop(tmp4);
8393  pop(tmp3);
8394  pop(tmp2);
8395  pop(tmp1);
8396}
8397#endif
8398
8399/**
8400 * Emits code to update CRC-32 with a byte value according to constants in table
8401 *
8402 * @param [in,out]crc   Register containing the crc.
8403 * @param [in]val       Register containing the byte to fold into the CRC.
8404 * @param [in]table     Register containing the table of crc constants.
8405 *
8406 * uint32_t crc;
8407 * val = crc_table[(val ^ crc) & 0xFF];
8408 * crc = val ^ (crc >> 8);
8409 *
8410 */
8411void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
8412  xorl(val, crc);
8413  andl(val, 0xFF);
8414  shrl(crc, 8); // unsigned shift
8415  xorl(crc, Address(table, val, Address::times_4, 0));
8416}
8417
8418/**
8419 * Fold 128-bit data chunk
8420 */
8421void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
8422  if (UseAVX > 0) {
8423    vpclmulhdq(xtmp, xK, xcrc); // [123:64]
8424    vpclmulldq(xcrc, xK, xcrc); // [63:0]
8425    vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
8426    pxor(xcrc, xtmp);
8427  } else {
8428    movdqa(xtmp, xcrc);
8429    pclmulhdq(xtmp, xK);   // [123:64]
8430    pclmulldq(xcrc, xK);   // [63:0]
8431    pxor(xcrc, xtmp);
8432    movdqu(xtmp, Address(buf, offset));
8433    pxor(xcrc, xtmp);
8434  }
8435}
8436
8437void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
8438  if (UseAVX > 0) {
8439    vpclmulhdq(xtmp, xK, xcrc);
8440    vpclmulldq(xcrc, xK, xcrc);
8441    pxor(xcrc, xbuf);
8442    pxor(xcrc, xtmp);
8443  } else {
8444    movdqa(xtmp, xcrc);
8445    pclmulhdq(xtmp, xK);
8446    pclmulldq(xcrc, xK);
8447    pxor(xcrc, xbuf);
8448    pxor(xcrc, xtmp);
8449  }
8450}
8451
8452/**
8453 * 8-bit folds to compute 32-bit CRC
8454 *
8455 * uint64_t xcrc;
8456 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
8457 */
8458void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
8459  movdl(tmp, xcrc);
8460  andl(tmp, 0xFF);
8461  movdl(xtmp, Address(table, tmp, Address::times_4, 0));
8462  psrldq(xcrc, 1); // unsigned shift one byte
8463  pxor(xcrc, xtmp);
8464}
8465
8466/**
8467 * uint32_t crc;
8468 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
8469 */
8470void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
8471  movl(tmp, crc);
8472  andl(tmp, 0xFF);
8473  shrl(crc, 8);
8474  xorl(crc, Address(table, tmp, Address::times_4, 0));
8475}
8476
8477/**
8478 * @param crc   register containing existing CRC (32-bit)
8479 * @param buf   register pointing to input byte buffer (byte*)
8480 * @param len   register containing number of bytes
8481 * @param table register that will contain address of CRC table
8482 * @param tmp   scratch register
8483 */
8484void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
8485  assert_different_registers(crc, buf, len, table, tmp, rax);
8486
8487  Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8488  Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8489
8490  // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
8491  // context for the registers used, where all instructions below are using 128-bit mode
8492  // On EVEX without VL and BW, these instructions will all be AVX.
8493  if (VM_Version::supports_avx512vlbw()) {
8494    movl(tmp, 0xffff);
8495    kmovwl(k1, tmp);
8496  }
8497
8498  lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
8499  notl(crc); // ~crc
8500  cmpl(len, 16);
8501  jcc(Assembler::less, L_tail);
8502
8503  // Align buffer to 16 bytes
8504  movl(tmp, buf);
8505  andl(tmp, 0xF);
8506  jccb(Assembler::zero, L_aligned);
8507  subl(tmp,  16);
8508  addl(len, tmp);
8509
8510  align(4);
8511  BIND(L_align_loop);
8512  movsbl(rax, Address(buf, 0)); // load byte with sign extension
8513  update_byte_crc32(crc, rax, table);
8514  increment(buf);
8515  incrementl(tmp);
8516  jccb(Assembler::less, L_align_loop);
8517
8518  BIND(L_aligned);
8519  movl(tmp, len); // save
8520  shrl(len, 4);
8521  jcc(Assembler::zero, L_tail_restore);
8522
8523  // Fold crc into first bytes of vector
8524  movdqa(xmm1, Address(buf, 0));
8525  movdl(rax, xmm1);
8526  xorl(crc, rax);
8527  pinsrd(xmm1, crc, 0);
8528  addptr(buf, 16);
8529  subl(len, 4); // len > 0
8530  jcc(Assembler::less, L_fold_tail);
8531
8532  movdqa(xmm2, Address(buf,  0));
8533  movdqa(xmm3, Address(buf, 16));
8534  movdqa(xmm4, Address(buf, 32));
8535  addptr(buf, 48);
8536  subl(len, 3);
8537  jcc(Assembler::lessEqual, L_fold_512b);
8538
8539  // Fold total 512 bits of polynomial on each iteration,
8540  // 128 bits per each of 4 parallel streams.
8541  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
8542
8543  align(32);
8544  BIND(L_fold_512b_loop);
8545  fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
8546  fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
8547  fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
8548  fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
8549  addptr(buf, 64);
8550  subl(len, 4);
8551  jcc(Assembler::greater, L_fold_512b_loop);
8552
8553  // Fold 512 bits to 128 bits.
8554  BIND(L_fold_512b);
8555  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
8556  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
8557  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
8558  fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
8559
8560  // Fold the rest of 128 bits data chunks
8561  BIND(L_fold_tail);
8562  addl(len, 3);
8563  jccb(Assembler::lessEqual, L_fold_128b);
8564  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
8565
8566  BIND(L_fold_tail_loop);
8567  fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
8568  addptr(buf, 16);
8569  decrementl(len);
8570  jccb(Assembler::greater, L_fold_tail_loop);
8571
8572  // Fold 128 bits in xmm1 down into 32 bits in crc register.
8573  BIND(L_fold_128b);
8574  movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
8575  if (UseAVX > 0) {
8576    vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
8577    vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
8578    vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
8579  } else {
8580    movdqa(xmm2, xmm0);
8581    pclmulqdq(xmm2, xmm1, 0x1);
8582    movdqa(xmm3, xmm0);
8583    pand(xmm3, xmm2);
8584    pclmulqdq(xmm0, xmm3, 0x1);
8585  }
8586  psrldq(xmm1, 8);
8587  psrldq(xmm2, 4);
8588  pxor(xmm0, xmm1);
8589  pxor(xmm0, xmm2);
8590
8591  // 8 8-bit folds to compute 32-bit CRC.
8592  for (int j = 0; j < 4; j++) {
8593    fold_8bit_crc32(xmm0, table, xmm1, rax);
8594  }
8595  movdl(crc, xmm0); // mov 32 bits to general register
8596  for (int j = 0; j < 4; j++) {
8597    fold_8bit_crc32(crc, table, rax);
8598  }
8599
8600  BIND(L_tail_restore);
8601  movl(len, tmp); // restore
8602  BIND(L_tail);
8603  andl(len, 0xf);
8604  jccb(Assembler::zero, L_exit);
8605
8606  // Fold the rest of bytes
8607  align(4);
8608  BIND(L_tail_loop);
8609  movsbl(rax, Address(buf, 0)); // load byte with sign extension
8610  update_byte_crc32(crc, rax, table);
8611  increment(buf);
8612  decrementl(len);
8613  jccb(Assembler::greater, L_tail_loop);
8614
8615  BIND(L_exit);
8616  notl(crc); // ~c
8617}
8618
8619#ifdef _LP64
8620// S. Gueron / Information Processing Letters 112 (2012) 184
8621// Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
8622// Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
8623// Output: the 64-bit carry-less product of B * CONST
8624void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
8625                                     Register tmp1, Register tmp2, Register tmp3) {
8626  lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
8627  if (n > 0) {
8628    addq(tmp3, n * 256 * 8);
8629  }
8630  //    Q1 = TABLEExt[n][B & 0xFF];
8631  movl(tmp1, in);
8632  andl(tmp1, 0x000000FF);
8633  shll(tmp1, 3);
8634  addq(tmp1, tmp3);
8635  movq(tmp1, Address(tmp1, 0));
8636
8637  //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
8638  movl(tmp2, in);
8639  shrl(tmp2, 8);
8640  andl(tmp2, 0x000000FF);
8641  shll(tmp2, 3);
8642  addq(tmp2, tmp3);
8643  movq(tmp2, Address(tmp2, 0));
8644
8645  shlq(tmp2, 8);
8646  xorq(tmp1, tmp2);
8647
8648  //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
8649  movl(tmp2, in);
8650  shrl(tmp2, 16);
8651  andl(tmp2, 0x000000FF);
8652  shll(tmp2, 3);
8653  addq(tmp2, tmp3);
8654  movq(tmp2, Address(tmp2, 0));
8655
8656  shlq(tmp2, 16);
8657  xorq(tmp1, tmp2);
8658
8659  //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
8660  shrl(in, 24);
8661  andl(in, 0x000000FF);
8662  shll(in, 3);
8663  addq(in, tmp3);
8664  movq(in, Address(in, 0));
8665
8666  shlq(in, 24);
8667  xorq(in, tmp1);
8668  //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
8669}
8670
8671void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
8672                                      Register in_out,
8673                                      uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
8674                                      XMMRegister w_xtmp2,
8675                                      Register tmp1,
8676                                      Register n_tmp2, Register n_tmp3) {
8677  if (is_pclmulqdq_supported) {
8678    movdl(w_xtmp1, in_out); // modified blindly
8679
8680    movl(tmp1, const_or_pre_comp_const_index);
8681    movdl(w_xtmp2, tmp1);
8682    pclmulqdq(w_xtmp1, w_xtmp2, 0);
8683
8684    movdq(in_out, w_xtmp1);
8685  } else {
8686    crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
8687  }
8688}
8689
8690// Recombination Alternative 2: No bit-reflections
8691// T1 = (CRC_A * U1) << 1
8692// T2 = (CRC_B * U2) << 1
8693// C1 = T1 >> 32
8694// C2 = T2 >> 32
8695// T1 = T1 & 0xFFFFFFFF
8696// T2 = T2 & 0xFFFFFFFF
8697// T1 = CRC32(0, T1)
8698// T2 = CRC32(0, T2)
8699// C1 = C1 ^ T1
8700// C2 = C2 ^ T2
8701// CRC = C1 ^ C2 ^ CRC_C
8702void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
8703                                     XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8704                                     Register tmp1, Register tmp2,
8705                                     Register n_tmp3) {
8706  crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
8707  crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
8708  shlq(in_out, 1);
8709  movl(tmp1, in_out);
8710  shrq(in_out, 32);
8711  xorl(tmp2, tmp2);
8712  crc32(tmp2, tmp1, 4);
8713  xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
8714  shlq(in1, 1);
8715  movl(tmp1, in1);
8716  shrq(in1, 32);
8717  xorl(tmp2, tmp2);
8718  crc32(tmp2, tmp1, 4);
8719  xorl(in1, tmp2);
8720  xorl(in_out, in1);
8721  xorl(in_out, in2);
8722}
8723
8724// Set N to predefined value
8725// Subtract from a lenght of a buffer
8726// execute in a loop:
8727// CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
8728// for i = 1 to N do
8729//  CRC_A = CRC32(CRC_A, A[i])
8730//  CRC_B = CRC32(CRC_B, B[i])
8731//  CRC_C = CRC32(CRC_C, C[i])
8732// end for
8733// Recombine
8734void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
8735                                       Register in_out1, Register in_out2, Register in_out3,
8736                                       Register tmp1, Register tmp2, Register tmp3,
8737                                       XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8738                                       Register tmp4, Register tmp5,
8739                                       Register n_tmp6) {
8740  Label L_processPartitions;
8741  Label L_processPartition;
8742  Label L_exit;
8743
8744  bind(L_processPartitions);
8745  cmpl(in_out1, 3 * size);
8746  jcc(Assembler::less, L_exit);
8747    xorl(tmp1, tmp1);
8748    xorl(tmp2, tmp2);
8749    movq(tmp3, in_out2);
8750    addq(tmp3, size);
8751
8752    bind(L_processPartition);
8753      crc32(in_out3, Address(in_out2, 0), 8);
8754      crc32(tmp1, Address(in_out2, size), 8);
8755      crc32(tmp2, Address(in_out2, size * 2), 8);
8756      addq(in_out2, 8);
8757      cmpq(in_out2, tmp3);
8758      jcc(Assembler::less, L_processPartition);
8759    crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
8760            w_xtmp1, w_xtmp2, w_xtmp3,
8761            tmp4, tmp5,
8762            n_tmp6);
8763    addq(in_out2, 2 * size);
8764    subl(in_out1, 3 * size);
8765    jmp(L_processPartitions);
8766
8767  bind(L_exit);
8768}
8769#else
8770void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n,
8771                                     Register tmp1, Register tmp2, Register tmp3,
8772                                     XMMRegister xtmp1, XMMRegister xtmp2) {
8773  lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
8774  if (n > 0) {
8775    addl(tmp3, n * 256 * 8);
8776  }
8777  //    Q1 = TABLEExt[n][B & 0xFF];
8778  movl(tmp1, in_out);
8779  andl(tmp1, 0x000000FF);
8780  shll(tmp1, 3);
8781  addl(tmp1, tmp3);
8782  movq(xtmp1, Address(tmp1, 0));
8783
8784  //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
8785  movl(tmp2, in_out);
8786  shrl(tmp2, 8);
8787  andl(tmp2, 0x000000FF);
8788  shll(tmp2, 3);
8789  addl(tmp2, tmp3);
8790  movq(xtmp2, Address(tmp2, 0));
8791
8792  psllq(xtmp2, 8);
8793  pxor(xtmp1, xtmp2);
8794
8795  //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
8796  movl(tmp2, in_out);
8797  shrl(tmp2, 16);
8798  andl(tmp2, 0x000000FF);
8799  shll(tmp2, 3);
8800  addl(tmp2, tmp3);
8801  movq(xtmp2, Address(tmp2, 0));
8802
8803  psllq(xtmp2, 16);
8804  pxor(xtmp1, xtmp2);
8805
8806  //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
8807  shrl(in_out, 24);
8808  andl(in_out, 0x000000FF);
8809  shll(in_out, 3);
8810  addl(in_out, tmp3);
8811  movq(xtmp2, Address(in_out, 0));
8812
8813  psllq(xtmp2, 24);
8814  pxor(xtmp1, xtmp2); // Result in CXMM
8815  //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
8816}
8817
8818void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
8819                                      Register in_out,
8820                                      uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
8821                                      XMMRegister w_xtmp2,
8822                                      Register tmp1,
8823                                      Register n_tmp2, Register n_tmp3) {
8824  if (is_pclmulqdq_supported) {
8825    movdl(w_xtmp1, in_out);
8826
8827    movl(tmp1, const_or_pre_comp_const_index);
8828    movdl(w_xtmp2, tmp1);
8829    pclmulqdq(w_xtmp1, w_xtmp2, 0);
8830    // Keep result in XMM since GPR is 32 bit in length
8831  } else {
8832    crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2);
8833  }
8834}
8835
8836void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
8837                                     XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8838                                     Register tmp1, Register tmp2,
8839                                     Register n_tmp3) {
8840  crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
8841  crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
8842
8843  psllq(w_xtmp1, 1);
8844  movdl(tmp1, w_xtmp1);
8845  psrlq(w_xtmp1, 32);
8846  movdl(in_out, w_xtmp1);
8847
8848  xorl(tmp2, tmp2);
8849  crc32(tmp2, tmp1, 4);
8850  xorl(in_out, tmp2);
8851
8852  psllq(w_xtmp2, 1);
8853  movdl(tmp1, w_xtmp2);
8854  psrlq(w_xtmp2, 32);
8855  movdl(in1, w_xtmp2);
8856
8857  xorl(tmp2, tmp2);
8858  crc32(tmp2, tmp1, 4);
8859  xorl(in1, tmp2);
8860  xorl(in_out, in1);
8861  xorl(in_out, in2);
8862}
8863
8864void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
8865                                       Register in_out1, Register in_out2, Register in_out3,
8866                                       Register tmp1, Register tmp2, Register tmp3,
8867                                       XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8868                                       Register tmp4, Register tmp5,
8869                                       Register n_tmp6) {
8870  Label L_processPartitions;
8871  Label L_processPartition;
8872  Label L_exit;
8873
8874  bind(L_processPartitions);
8875  cmpl(in_out1, 3 * size);
8876  jcc(Assembler::less, L_exit);
8877    xorl(tmp1, tmp1);
8878    xorl(tmp2, tmp2);
8879    movl(tmp3, in_out2);
8880    addl(tmp3, size);
8881
8882    bind(L_processPartition);
8883      crc32(in_out3, Address(in_out2, 0), 4);
8884      crc32(tmp1, Address(in_out2, size), 4);
8885      crc32(tmp2, Address(in_out2, size*2), 4);
8886      crc32(in_out3, Address(in_out2, 0+4), 4);
8887      crc32(tmp1, Address(in_out2, size+4), 4);
8888      crc32(tmp2, Address(in_out2, size*2+4), 4);
8889      addl(in_out2, 8);
8890      cmpl(in_out2, tmp3);
8891      jcc(Assembler::less, L_processPartition);
8892
8893        push(tmp3);
8894        push(in_out1);
8895        push(in_out2);
8896        tmp4 = tmp3;
8897        tmp5 = in_out1;
8898        n_tmp6 = in_out2;
8899
8900      crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
8901            w_xtmp1, w_xtmp2, w_xtmp3,
8902            tmp4, tmp5,
8903            n_tmp6);
8904
8905        pop(in_out2);
8906        pop(in_out1);
8907        pop(tmp3);
8908
8909    addl(in_out2, 2 * size);
8910    subl(in_out1, 3 * size);
8911    jmp(L_processPartitions);
8912
8913  bind(L_exit);
8914}
8915#endif //LP64
8916
8917#ifdef _LP64
8918// Algorithm 2: Pipelined usage of the CRC32 instruction.
8919// Input: A buffer I of L bytes.
8920// Output: the CRC32C value of the buffer.
8921// Notations:
8922// Write L = 24N + r, with N = floor (L/24).
8923// r = L mod 24 (0 <= r < 24).
8924// Consider I as the concatenation of A|B|C|R, where A, B, C, each,
8925// N quadwords, and R consists of r bytes.
8926// A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
8927// B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
8928// C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
8929// if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
8930void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
8931                                          Register tmp1, Register tmp2, Register tmp3,
8932                                          Register tmp4, Register tmp5, Register tmp6,
8933                                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
8934                                          bool is_pclmulqdq_supported) {
8935  uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
8936  Label L_wordByWord;
8937  Label L_byteByByteProlog;
8938  Label L_byteByByte;
8939  Label L_exit;
8940
8941  if (is_pclmulqdq_supported ) {
8942    const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
8943    const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1);
8944
8945    const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
8946    const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
8947
8948    const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
8949    const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
8950    assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
8951  } else {
8952    const_or_pre_comp_const_index[0] = 1;
8953    const_or_pre_comp_const_index[1] = 0;
8954
8955    const_or_pre_comp_const_index[2] = 3;
8956    const_or_pre_comp_const_index[3] = 2;
8957
8958    const_or_pre_comp_const_index[4] = 5;
8959    const_or_pre_comp_const_index[5] = 4;
8960   }
8961  crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
8962                    in2, in1, in_out,
8963                    tmp1, tmp2, tmp3,
8964                    w_xtmp1, w_xtmp2, w_xtmp3,
8965                    tmp4, tmp5,
8966                    tmp6);
8967  crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
8968                    in2, in1, in_out,
8969                    tmp1, tmp2, tmp3,
8970                    w_xtmp1, w_xtmp2, w_xtmp3,
8971                    tmp4, tmp5,
8972                    tmp6);
8973  crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
8974                    in2, in1, in_out,
8975                    tmp1, tmp2, tmp3,
8976                    w_xtmp1, w_xtmp2, w_xtmp3,
8977                    tmp4, tmp5,
8978                    tmp6);
8979  movl(tmp1, in2);
8980  andl(tmp1, 0x00000007);
8981  negl(tmp1);
8982  addl(tmp1, in2);
8983  addq(tmp1, in1);
8984
8985  BIND(L_wordByWord);
8986  cmpq(in1, tmp1);
8987  jcc(Assembler::greaterEqual, L_byteByByteProlog);
8988    crc32(in_out, Address(in1, 0), 4);
8989    addq(in1, 4);
8990    jmp(L_wordByWord);
8991
8992  BIND(L_byteByByteProlog);
8993  andl(in2, 0x00000007);
8994  movl(tmp2, 1);
8995
8996  BIND(L_byteByByte);
8997  cmpl(tmp2, in2);
8998  jccb(Assembler::greater, L_exit);
8999    crc32(in_out, Address(in1, 0), 1);
9000    incq(in1);
9001    incl(tmp2);
9002    jmp(L_byteByByte);
9003
9004  BIND(L_exit);
9005}
9006#else
9007void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
9008                                          Register tmp1, Register  tmp2, Register tmp3,
9009                                          Register tmp4, Register  tmp5, Register tmp6,
9010                                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
9011                                          bool is_pclmulqdq_supported) {
9012  uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
9013  Label L_wordByWord;
9014  Label L_byteByByteProlog;
9015  Label L_byteByByte;
9016  Label L_exit;
9017
9018  if (is_pclmulqdq_supported) {
9019    const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
9020    const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1);
9021
9022    const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
9023    const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
9024
9025    const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
9026    const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
9027  } else {
9028    const_or_pre_comp_const_index[0] = 1;
9029    const_or_pre_comp_const_index[1] = 0;
9030
9031    const_or_pre_comp_const_index[2] = 3;
9032    const_or_pre_comp_const_index[3] = 2;
9033
9034    const_or_pre_comp_const_index[4] = 5;
9035    const_or_pre_comp_const_index[5] = 4;
9036  }
9037  crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
9038                    in2, in1, in_out,
9039                    tmp1, tmp2, tmp3,
9040                    w_xtmp1, w_xtmp2, w_xtmp3,
9041                    tmp4, tmp5,
9042                    tmp6);
9043  crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
9044                    in2, in1, in_out,
9045                    tmp1, tmp2, tmp3,
9046                    w_xtmp1, w_xtmp2, w_xtmp3,
9047                    tmp4, tmp5,
9048                    tmp6);
9049  crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
9050                    in2, in1, in_out,
9051                    tmp1, tmp2, tmp3,
9052                    w_xtmp1, w_xtmp2, w_xtmp3,
9053                    tmp4, tmp5,
9054                    tmp6);
9055  movl(tmp1, in2);
9056  andl(tmp1, 0x00000007);
9057  negl(tmp1);
9058  addl(tmp1, in2);
9059  addl(tmp1, in1);
9060
9061  BIND(L_wordByWord);
9062  cmpl(in1, tmp1);
9063  jcc(Assembler::greaterEqual, L_byteByByteProlog);
9064    crc32(in_out, Address(in1,0), 4);
9065    addl(in1, 4);
9066    jmp(L_wordByWord);
9067
9068  BIND(L_byteByByteProlog);
9069  andl(in2, 0x00000007);
9070  movl(tmp2, 1);
9071
9072  BIND(L_byteByByte);
9073  cmpl(tmp2, in2);
9074  jccb(Assembler::greater, L_exit);
9075    movb(tmp1, Address(in1, 0));
9076    crc32(in_out, tmp1, 1);
9077    incl(in1);
9078    incl(tmp2);
9079    jmp(L_byteByByte);
9080
9081  BIND(L_exit);
9082}
9083#endif // LP64
9084#undef BIND
9085#undef BLOCK_COMMENT
9086
9087
9088Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
9089  switch (cond) {
9090    // Note some conditions are synonyms for others
9091    case Assembler::zero:         return Assembler::notZero;
9092    case Assembler::notZero:      return Assembler::zero;
9093    case Assembler::less:         return Assembler::greaterEqual;
9094    case Assembler::lessEqual:    return Assembler::greater;
9095    case Assembler::greater:      return Assembler::lessEqual;
9096    case Assembler::greaterEqual: return Assembler::less;
9097    case Assembler::below:        return Assembler::aboveEqual;
9098    case Assembler::belowEqual:   return Assembler::above;
9099    case Assembler::above:        return Assembler::belowEqual;
9100    case Assembler::aboveEqual:   return Assembler::below;
9101    case Assembler::overflow:     return Assembler::noOverflow;
9102    case Assembler::noOverflow:   return Assembler::overflow;
9103    case Assembler::negative:     return Assembler::positive;
9104    case Assembler::positive:     return Assembler::negative;
9105    case Assembler::parity:       return Assembler::noParity;
9106    case Assembler::noParity:     return Assembler::parity;
9107  }
9108  ShouldNotReachHere(); return Assembler::overflow;
9109}
9110
9111SkipIfEqual::SkipIfEqual(
9112    MacroAssembler* masm, const bool* flag_addr, bool value) {
9113  _masm = masm;
9114  _masm->cmp8(ExternalAddress((address)flag_addr), value);
9115  _masm->jcc(Assembler::equal, _label);
9116}
9117
9118SkipIfEqual::~SkipIfEqual() {
9119  _masm->bind(_label);
9120}
9121