1/*
2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "asm/macroAssembler.inline.hpp"
28#include "c1/c1_Compilation.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_MacroAssembler.hpp"
31#include "c1/c1_Runtime1.hpp"
32#include "c1/c1_ValueStack.hpp"
33#include "ci/ciArrayKlass.hpp"
34#include "ci/ciInstance.hpp"
35#include "gc/shared/barrierSet.hpp"
36#include "gc/shared/cardTableModRefBS.hpp"
37#include "gc/shared/collectedHeap.hpp"
38#include "nativeInst_x86.hpp"
39#include "oops/objArrayKlass.hpp"
40#include "runtime/sharedRuntime.hpp"
41#include "vmreg_x86.inline.hpp"
42
43
44// These masks are used to provide 128-bit aligned bitmasks to the XMM
45// instructions, to allow sign-masking or sign-bit flipping.  They allow
46// fast versions of NegF/NegD and AbsF/AbsD.
47
48// Note: 'double' and 'long long' have 32-bits alignment on x86.
49static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
50  // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
51  // of 128-bits operands for SSE instructions.
52  jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
53  // Store the value to a 128-bits operand.
54  operand[0] = lo;
55  operand[1] = hi;
56  return operand;
57}
58
59// Buffer for 128-bits masks used by SSE instructions.
60static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
61
62// Static initialization during VM startup.
63static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
64static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
65static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
66static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
67
68
69
70NEEDS_CLEANUP // remove this definitions ?
71const Register IC_Klass    = rax;   // where the IC klass is cached
72const Register SYNC_header = rax;   // synchronization header
73const Register SHIFT_count = rcx;   // where count for shift operations must be
74
75#define __ _masm->
76
77
78static void select_different_registers(Register preserve,
79                                       Register extra,
80                                       Register &tmp1,
81                                       Register &tmp2) {
82  if (tmp1 == preserve) {
83    assert_different_registers(tmp1, tmp2, extra);
84    tmp1 = extra;
85  } else if (tmp2 == preserve) {
86    assert_different_registers(tmp1, tmp2, extra);
87    tmp2 = extra;
88  }
89  assert_different_registers(preserve, tmp1, tmp2);
90}
91
92
93
94static void select_different_registers(Register preserve,
95                                       Register extra,
96                                       Register &tmp1,
97                                       Register &tmp2,
98                                       Register &tmp3) {
99  if (tmp1 == preserve) {
100    assert_different_registers(tmp1, tmp2, tmp3, extra);
101    tmp1 = extra;
102  } else if (tmp2 == preserve) {
103    assert_different_registers(tmp1, tmp2, tmp3, extra);
104    tmp2 = extra;
105  } else if (tmp3 == preserve) {
106    assert_different_registers(tmp1, tmp2, tmp3, extra);
107    tmp3 = extra;
108  }
109  assert_different_registers(preserve, tmp1, tmp2, tmp3);
110}
111
112
113
114bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
115  if (opr->is_constant()) {
116    LIR_Const* constant = opr->as_constant_ptr();
117    switch (constant->type()) {
118      case T_INT: {
119        return true;
120      }
121
122      default:
123        return false;
124    }
125  }
126  return false;
127}
128
129
130LIR_Opr LIR_Assembler::receiverOpr() {
131  return FrameMap::receiver_opr;
132}
133
134LIR_Opr LIR_Assembler::osrBufferPointer() {
135  return FrameMap::as_pointer_opr(receiverOpr()->as_register());
136}
137
138//--------------fpu register translations-----------------------
139
140
141address LIR_Assembler::float_constant(float f) {
142  address const_addr = __ float_constant(f);
143  if (const_addr == NULL) {
144    bailout("const section overflow");
145    return __ code()->consts()->start();
146  } else {
147    return const_addr;
148  }
149}
150
151
152address LIR_Assembler::double_constant(double d) {
153  address const_addr = __ double_constant(d);
154  if (const_addr == NULL) {
155    bailout("const section overflow");
156    return __ code()->consts()->start();
157  } else {
158    return const_addr;
159  }
160}
161
162
163void LIR_Assembler::set_24bit_FPU() {
164  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
165}
166
167void LIR_Assembler::reset_FPU() {
168  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
169}
170
171void LIR_Assembler::fpop() {
172  __ fpop();
173}
174
175void LIR_Assembler::fxch(int i) {
176  __ fxch(i);
177}
178
179void LIR_Assembler::fld(int i) {
180  __ fld_s(i);
181}
182
183void LIR_Assembler::ffree(int i) {
184  __ ffree(i);
185}
186
187void LIR_Assembler::breakpoint() {
188  __ int3();
189}
190
191void LIR_Assembler::push(LIR_Opr opr) {
192  if (opr->is_single_cpu()) {
193    __ push_reg(opr->as_register());
194  } else if (opr->is_double_cpu()) {
195    NOT_LP64(__ push_reg(opr->as_register_hi()));
196    __ push_reg(opr->as_register_lo());
197  } else if (opr->is_stack()) {
198    __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
199  } else if (opr->is_constant()) {
200    LIR_Const* const_opr = opr->as_constant_ptr();
201    if (const_opr->type() == T_OBJECT) {
202      __ push_oop(const_opr->as_jobject());
203    } else if (const_opr->type() == T_INT) {
204      __ push_jint(const_opr->as_jint());
205    } else {
206      ShouldNotReachHere();
207    }
208
209  } else {
210    ShouldNotReachHere();
211  }
212}
213
214void LIR_Assembler::pop(LIR_Opr opr) {
215  if (opr->is_single_cpu()) {
216    __ pop_reg(opr->as_register());
217  } else {
218    ShouldNotReachHere();
219  }
220}
221
222bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
223  return addr->base()->is_illegal() && addr->index()->is_illegal();
224}
225
226//-------------------------------------------
227
228Address LIR_Assembler::as_Address(LIR_Address* addr) {
229  return as_Address(addr, rscratch1);
230}
231
232Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
233  if (addr->base()->is_illegal()) {
234    assert(addr->index()->is_illegal(), "must be illegal too");
235    AddressLiteral laddr((address)addr->disp(), relocInfo::none);
236    if (! __ reachable(laddr)) {
237      __ movptr(tmp, laddr.addr());
238      Address res(tmp, 0);
239      return res;
240    } else {
241      return __ as_Address(laddr);
242    }
243  }
244
245  Register base = addr->base()->as_pointer_register();
246
247  if (addr->index()->is_illegal()) {
248    return Address( base, addr->disp());
249  } else if (addr->index()->is_cpu_register()) {
250    Register index = addr->index()->as_pointer_register();
251    return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
252  } else if (addr->index()->is_constant()) {
253    intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
254    assert(Assembler::is_simm32(addr_offset), "must be");
255
256    return Address(base, addr_offset);
257  } else {
258    Unimplemented();
259    return Address();
260  }
261}
262
263
264Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
265  Address base = as_Address(addr);
266  return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
267}
268
269
270Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
271  return as_Address(addr);
272}
273
274
275void LIR_Assembler::osr_entry() {
276  offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
277  BlockBegin* osr_entry = compilation()->hir()->osr_entry();
278  ValueStack* entry_state = osr_entry->state();
279  int number_of_locks = entry_state->locks_size();
280
281  // we jump here if osr happens with the interpreter
282  // state set up to continue at the beginning of the
283  // loop that triggered osr - in particular, we have
284  // the following registers setup:
285  //
286  // rcx: osr buffer
287  //
288
289  // build frame
290  ciMethod* m = compilation()->method();
291  __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
292
293  // OSR buffer is
294  //
295  // locals[nlocals-1..0]
296  // monitors[0..number_of_locks]
297  //
298  // locals is a direct copy of the interpreter frame so in the osr buffer
299  // so first slot in the local array is the last local from the interpreter
300  // and last slot is local[0] (receiver) from the interpreter
301  //
302  // Similarly with locks. The first lock slot in the osr buffer is the nth lock
303  // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
304  // in the interpreter frame (the method lock if a sync method)
305
306  // Initialize monitors in the compiled activation.
307  //   rcx: pointer to osr buffer
308  //
309  // All other registers are dead at this point and the locals will be
310  // copied into place by code emitted in the IR.
311
312  Register OSR_buf = osrBufferPointer()->as_pointer_register();
313  { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
314    int monitor_offset = BytesPerWord * method()->max_locals() +
315      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
316    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
317    // the OSR buffer using 2 word entries: first the lock and then
318    // the oop.
319    for (int i = 0; i < number_of_locks; i++) {
320      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
321#ifdef ASSERT
322      // verify the interpreter's monitor has a non-null object
323      {
324        Label L;
325        __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
326        __ jcc(Assembler::notZero, L);
327        __ stop("locked object is NULL");
328        __ bind(L);
329      }
330#endif
331      __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
332      __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
333      __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
334      __ movptr(frame_map()->address_for_monitor_object(i), rbx);
335    }
336  }
337}
338
339
340// inline cache check; done before the frame is built.
341int LIR_Assembler::check_icache() {
342  Register receiver = FrameMap::receiver_opr->as_register();
343  Register ic_klass = IC_Klass;
344  const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
345  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
346  if (!do_post_padding) {
347    // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
348    __ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
349  }
350  int offset = __ offset();
351  __ inline_cache_check(receiver, IC_Klass);
352  assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
353  if (do_post_padding) {
354    // force alignment after the cache check.
355    // It's been verified to be aligned if !VerifyOops
356    __ align(CodeEntryAlignment);
357  }
358  return offset;
359}
360
361
362void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
363  jobject o = NULL;
364  PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
365  __ movoop(reg, o);
366  patching_epilog(patch, lir_patch_normal, reg, info);
367}
368
369void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
370  Metadata* o = NULL;
371  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
372  __ mov_metadata(reg, o);
373  patching_epilog(patch, lir_patch_normal, reg, info);
374}
375
376// This specifies the rsp decrement needed to build the frame
377int LIR_Assembler::initial_frame_size_in_bytes() const {
378  // if rounding, must let FrameMap know!
379
380  // The frame_map records size in slots (32bit word)
381
382  // subtract two words to account for return address and link
383  return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
384}
385
386
387int LIR_Assembler::emit_exception_handler() {
388  // if the last instruction is a call (typically to do a throw which
389  // is coming at the end after block reordering) the return address
390  // must still point into the code area in order to avoid assertion
391  // failures when searching for the corresponding bci => add a nop
392  // (was bug 5/14/1999 - gri)
393  __ nop();
394
395  // generate code for exception handler
396  address handler_base = __ start_a_stub(exception_handler_size());
397  if (handler_base == NULL) {
398    // not enough space left for the handler
399    bailout("exception handler overflow");
400    return -1;
401  }
402
403  int offset = code_offset();
404
405  // the exception oop and pc are in rax, and rdx
406  // no other registers need to be preserved, so invalidate them
407  __ invalidate_registers(false, true, true, false, true, true);
408
409  // check that there is really an exception
410  __ verify_not_null_oop(rax);
411
412  // search an exception handler (rax: exception oop, rdx: throwing pc)
413  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
414  __ should_not_reach_here();
415  guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
416  __ end_a_stub();
417
418  return offset;
419}
420
421
422// Emit the code to remove the frame from the stack in the exception
423// unwind path.
424int LIR_Assembler::emit_unwind_handler() {
425#ifndef PRODUCT
426  if (CommentedAssembly) {
427    _masm->block_comment("Unwind handler");
428  }
429#endif
430
431  int offset = code_offset();
432
433  // Fetch the exception from TLS and clear out exception related thread state
434  Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
435  NOT_LP64(__ get_thread(rsi));
436  __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
437  __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
438  __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
439
440  __ bind(_unwind_handler_entry);
441  __ verify_not_null_oop(rax);
442  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
443    __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
444  }
445
446  // Preform needed unlocking
447  MonitorExitStub* stub = NULL;
448  if (method()->is_synchronized()) {
449    monitor_address(0, FrameMap::rax_opr);
450    stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
451    __ unlock_object(rdi, rsi, rax, *stub->entry());
452    __ bind(*stub->continuation());
453  }
454
455  if (compilation()->env()->dtrace_method_probes()) {
456#ifdef _LP64
457    __ mov(rdi, r15_thread);
458    __ mov_metadata(rsi, method()->constant_encoding());
459#else
460    __ get_thread(rax);
461    __ movptr(Address(rsp, 0), rax);
462    __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
463#endif
464    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
465  }
466
467  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
468    __ mov(rax, rbx);  // Restore the exception
469  }
470
471  // remove the activation and dispatch to the unwind handler
472  __ remove_frame(initial_frame_size_in_bytes());
473  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
474
475  // Emit the slow path assembly
476  if (stub != NULL) {
477    stub->emit_code(this);
478  }
479
480  return offset;
481}
482
483
484int LIR_Assembler::emit_deopt_handler() {
485  // if the last instruction is a call (typically to do a throw which
486  // is coming at the end after block reordering) the return address
487  // must still point into the code area in order to avoid assertion
488  // failures when searching for the corresponding bci => add a nop
489  // (was bug 5/14/1999 - gri)
490  __ nop();
491
492  // generate code for exception handler
493  address handler_base = __ start_a_stub(deopt_handler_size());
494  if (handler_base == NULL) {
495    // not enough space left for the handler
496    bailout("deopt handler overflow");
497    return -1;
498  }
499
500  int offset = code_offset();
501  InternalAddress here(__ pc());
502
503  __ pushptr(here.addr());
504  __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
505  guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
506  __ end_a_stub();
507
508  return offset;
509}
510
511
512void LIR_Assembler::return_op(LIR_Opr result) {
513  assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
514  if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
515    assert(result->fpu() == 0, "result must already be on TOS");
516  }
517
518  // Pop the stack before the safepoint code
519  __ remove_frame(initial_frame_size_in_bytes());
520
521  if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
522    __ reserved_stack_check();
523  }
524
525  bool result_is_oop = result->is_valid() ? result->is_oop() : false;
526
527  // Note: we do not need to round double result; float result has the right precision
528  // the poll sets the condition code, but no data registers
529  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
530
531  if (Assembler::is_polling_page_far()) {
532    __ lea(rscratch1, polling_page);
533    __ relocate(relocInfo::poll_return_type);
534    __ testl(rax, Address(rscratch1, 0));
535  } else {
536    __ testl(rax, polling_page);
537  }
538  __ ret(0);
539}
540
541
542int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
543  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
544  guarantee(info != NULL, "Shouldn't be NULL");
545  int offset = __ offset();
546  if (Assembler::is_polling_page_far()) {
547    __ lea(rscratch1, polling_page);
548    offset = __ offset();
549    add_debug_info_for_branch(info);
550    __ relocate(relocInfo::poll_type);
551    __ testl(rax, Address(rscratch1, 0));
552  } else {
553    add_debug_info_for_branch(info);
554    __ testl(rax, polling_page);
555  }
556  return offset;
557}
558
559
560void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
561  if (from_reg != to_reg) __ mov(to_reg, from_reg);
562}
563
564void LIR_Assembler::swap_reg(Register a, Register b) {
565  __ xchgptr(a, b);
566}
567
568
569void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
570  assert(src->is_constant(), "should not call otherwise");
571  assert(dest->is_register(), "should not call otherwise");
572  LIR_Const* c = src->as_constant_ptr();
573
574  switch (c->type()) {
575    case T_INT: {
576      assert(patch_code == lir_patch_none, "no patching handled here");
577      __ movl(dest->as_register(), c->as_jint());
578      break;
579    }
580
581    case T_ADDRESS: {
582      assert(patch_code == lir_patch_none, "no patching handled here");
583      __ movptr(dest->as_register(), c->as_jint());
584      break;
585    }
586
587    case T_LONG: {
588      assert(patch_code == lir_patch_none, "no patching handled here");
589#ifdef _LP64
590      __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
591#else
592      __ movptr(dest->as_register_lo(), c->as_jint_lo());
593      __ movptr(dest->as_register_hi(), c->as_jint_hi());
594#endif // _LP64
595      break;
596    }
597
598    case T_OBJECT: {
599      if (patch_code != lir_patch_none) {
600        jobject2reg_with_patching(dest->as_register(), info);
601      } else {
602        __ movoop(dest->as_register(), c->as_jobject());
603      }
604      break;
605    }
606
607    case T_METADATA: {
608      if (patch_code != lir_patch_none) {
609        klass2reg_with_patching(dest->as_register(), info);
610      } else {
611        __ mov_metadata(dest->as_register(), c->as_metadata());
612      }
613      break;
614    }
615
616    case T_FLOAT: {
617      if (dest->is_single_xmm()) {
618        if (c->is_zero_float()) {
619          __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
620        } else {
621          __ movflt(dest->as_xmm_float_reg(),
622                   InternalAddress(float_constant(c->as_jfloat())));
623        }
624      } else {
625        assert(dest->is_single_fpu(), "must be");
626        assert(dest->fpu_regnr() == 0, "dest must be TOS");
627        if (c->is_zero_float()) {
628          __ fldz();
629        } else if (c->is_one_float()) {
630          __ fld1();
631        } else {
632          __ fld_s (InternalAddress(float_constant(c->as_jfloat())));
633        }
634      }
635      break;
636    }
637
638    case T_DOUBLE: {
639      if (dest->is_double_xmm()) {
640        if (c->is_zero_double()) {
641          __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
642        } else {
643          __ movdbl(dest->as_xmm_double_reg(),
644                    InternalAddress(double_constant(c->as_jdouble())));
645        }
646      } else {
647        assert(dest->is_double_fpu(), "must be");
648        assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
649        if (c->is_zero_double()) {
650          __ fldz();
651        } else if (c->is_one_double()) {
652          __ fld1();
653        } else {
654          __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
655        }
656      }
657      break;
658    }
659
660    default:
661      ShouldNotReachHere();
662  }
663}
664
665void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
666  assert(src->is_constant(), "should not call otherwise");
667  assert(dest->is_stack(), "should not call otherwise");
668  LIR_Const* c = src->as_constant_ptr();
669
670  switch (c->type()) {
671    case T_INT:  // fall through
672    case T_FLOAT:
673      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
674      break;
675
676    case T_ADDRESS:
677      __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
678      break;
679
680    case T_OBJECT:
681      __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
682      break;
683
684    case T_LONG:  // fall through
685    case T_DOUBLE:
686#ifdef _LP64
687      __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
688                                            lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
689#else
690      __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
691                                              lo_word_offset_in_bytes), c->as_jint_lo_bits());
692      __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
693                                              hi_word_offset_in_bytes), c->as_jint_hi_bits());
694#endif // _LP64
695      break;
696
697    default:
698      ShouldNotReachHere();
699  }
700}
701
702void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
703  assert(src->is_constant(), "should not call otherwise");
704  assert(dest->is_address(), "should not call otherwise");
705  LIR_Const* c = src->as_constant_ptr();
706  LIR_Address* addr = dest->as_address_ptr();
707
708  int null_check_here = code_offset();
709  switch (type) {
710    case T_INT:    // fall through
711    case T_FLOAT:
712      __ movl(as_Address(addr), c->as_jint_bits());
713      break;
714
715    case T_ADDRESS:
716      __ movptr(as_Address(addr), c->as_jint_bits());
717      break;
718
719    case T_OBJECT:  // fall through
720    case T_ARRAY:
721      if (c->as_jobject() == NULL) {
722        if (UseCompressedOops && !wide) {
723          __ movl(as_Address(addr), (int32_t)NULL_WORD);
724        } else {
725#ifdef _LP64
726          __ xorptr(rscratch1, rscratch1);
727          null_check_here = code_offset();
728          __ movptr(as_Address(addr), rscratch1);
729#else
730          __ movptr(as_Address(addr), NULL_WORD);
731#endif
732        }
733      } else {
734        if (is_literal_address(addr)) {
735          ShouldNotReachHere();
736          __ movoop(as_Address(addr, noreg), c->as_jobject());
737        } else {
738#ifdef _LP64
739          __ movoop(rscratch1, c->as_jobject());
740          if (UseCompressedOops && !wide) {
741            __ encode_heap_oop(rscratch1);
742            null_check_here = code_offset();
743            __ movl(as_Address_lo(addr), rscratch1);
744          } else {
745            null_check_here = code_offset();
746            __ movptr(as_Address_lo(addr), rscratch1);
747          }
748#else
749          __ movoop(as_Address(addr), c->as_jobject());
750#endif
751        }
752      }
753      break;
754
755    case T_LONG:    // fall through
756    case T_DOUBLE:
757#ifdef _LP64
758      if (is_literal_address(addr)) {
759        ShouldNotReachHere();
760        __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
761      } else {
762        __ movptr(r10, (intptr_t)c->as_jlong_bits());
763        null_check_here = code_offset();
764        __ movptr(as_Address_lo(addr), r10);
765      }
766#else
767      // Always reachable in 32bit so this doesn't produce useless move literal
768      __ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
769      __ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
770#endif // _LP64
771      break;
772
773    case T_BOOLEAN: // fall through
774    case T_BYTE:
775      __ movb(as_Address(addr), c->as_jint() & 0xFF);
776      break;
777
778    case T_CHAR:    // fall through
779    case T_SHORT:
780      __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
781      break;
782
783    default:
784      ShouldNotReachHere();
785  };
786
787  if (info != NULL) {
788    add_debug_info_for_null_check(null_check_here, info);
789  }
790}
791
792
793void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
794  assert(src->is_register(), "should not call otherwise");
795  assert(dest->is_register(), "should not call otherwise");
796
797  // move between cpu-registers
798  if (dest->is_single_cpu()) {
799#ifdef _LP64
800    if (src->type() == T_LONG) {
801      // Can do LONG -> OBJECT
802      move_regs(src->as_register_lo(), dest->as_register());
803      return;
804    }
805#endif
806    assert(src->is_single_cpu(), "must match");
807    if (src->type() == T_OBJECT) {
808      __ verify_oop(src->as_register());
809    }
810    move_regs(src->as_register(), dest->as_register());
811
812  } else if (dest->is_double_cpu()) {
813#ifdef _LP64
814    if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
815      // Surprising to me but we can see move of a long to t_object
816      __ verify_oop(src->as_register());
817      move_regs(src->as_register(), dest->as_register_lo());
818      return;
819    }
820#endif
821    assert(src->is_double_cpu(), "must match");
822    Register f_lo = src->as_register_lo();
823    Register f_hi = src->as_register_hi();
824    Register t_lo = dest->as_register_lo();
825    Register t_hi = dest->as_register_hi();
826#ifdef _LP64
827    assert(f_hi == f_lo, "must be same");
828    assert(t_hi == t_lo, "must be same");
829    move_regs(f_lo, t_lo);
830#else
831    assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
832
833
834    if (f_lo == t_hi && f_hi == t_lo) {
835      swap_reg(f_lo, f_hi);
836    } else if (f_hi == t_lo) {
837      assert(f_lo != t_hi, "overwriting register");
838      move_regs(f_hi, t_hi);
839      move_regs(f_lo, t_lo);
840    } else {
841      assert(f_hi != t_lo, "overwriting register");
842      move_regs(f_lo, t_lo);
843      move_regs(f_hi, t_hi);
844    }
845#endif // LP64
846
847    // special moves from fpu-register to xmm-register
848    // necessary for method results
849  } else if (src->is_single_xmm() && !dest->is_single_xmm()) {
850    __ movflt(Address(rsp, 0), src->as_xmm_float_reg());
851    __ fld_s(Address(rsp, 0));
852  } else if (src->is_double_xmm() && !dest->is_double_xmm()) {
853    __ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
854    __ fld_d(Address(rsp, 0));
855  } else if (dest->is_single_xmm() && !src->is_single_xmm()) {
856    __ fstp_s(Address(rsp, 0));
857    __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
858  } else if (dest->is_double_xmm() && !src->is_double_xmm()) {
859    __ fstp_d(Address(rsp, 0));
860    __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
861
862    // move between xmm-registers
863  } else if (dest->is_single_xmm()) {
864    assert(src->is_single_xmm(), "must match");
865    __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
866  } else if (dest->is_double_xmm()) {
867    assert(src->is_double_xmm(), "must match");
868    __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
869
870    // move between fpu-registers (no instruction necessary because of fpu-stack)
871  } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
872    assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
873    assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
874  } else {
875    ShouldNotReachHere();
876  }
877}
878
879void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
880  assert(src->is_register(), "should not call otherwise");
881  assert(dest->is_stack(), "should not call otherwise");
882
883  if (src->is_single_cpu()) {
884    Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
885    if (type == T_OBJECT || type == T_ARRAY) {
886      __ verify_oop(src->as_register());
887      __ movptr (dst, src->as_register());
888    } else if (type == T_METADATA) {
889      __ movptr (dst, src->as_register());
890    } else {
891      __ movl (dst, src->as_register());
892    }
893
894  } else if (src->is_double_cpu()) {
895    Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
896    Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
897    __ movptr (dstLO, src->as_register_lo());
898    NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
899
900  } else if (src->is_single_xmm()) {
901    Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
902    __ movflt(dst_addr, src->as_xmm_float_reg());
903
904  } else if (src->is_double_xmm()) {
905    Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
906    __ movdbl(dst_addr, src->as_xmm_double_reg());
907
908  } else if (src->is_single_fpu()) {
909    assert(src->fpu_regnr() == 0, "argument must be on TOS");
910    Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
911    if (pop_fpu_stack)     __ fstp_s (dst_addr);
912    else                   __ fst_s  (dst_addr);
913
914  } else if (src->is_double_fpu()) {
915    assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
916    Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
917    if (pop_fpu_stack)     __ fstp_d (dst_addr);
918    else                   __ fst_d  (dst_addr);
919
920  } else {
921    ShouldNotReachHere();
922  }
923}
924
925
926void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
927  LIR_Address* to_addr = dest->as_address_ptr();
928  PatchingStub* patch = NULL;
929  Register compressed_src = rscratch1;
930
931  if (type == T_ARRAY || type == T_OBJECT) {
932    __ verify_oop(src->as_register());
933#ifdef _LP64
934    if (UseCompressedOops && !wide) {
935      __ movptr(compressed_src, src->as_register());
936      __ encode_heap_oop(compressed_src);
937      if (patch_code != lir_patch_none) {
938        info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
939      }
940    }
941#endif
942  }
943
944  if (patch_code != lir_patch_none) {
945    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
946    Address toa = as_Address(to_addr);
947    assert(toa.disp() != 0, "must have");
948  }
949
950  int null_check_here = code_offset();
951  switch (type) {
952    case T_FLOAT: {
953      if (src->is_single_xmm()) {
954        __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
955      } else {
956        assert(src->is_single_fpu(), "must be");
957        assert(src->fpu_regnr() == 0, "argument must be on TOS");
958        if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
959        else                    __ fst_s (as_Address(to_addr));
960      }
961      break;
962    }
963
964    case T_DOUBLE: {
965      if (src->is_double_xmm()) {
966        __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
967      } else {
968        assert(src->is_double_fpu(), "must be");
969        assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
970        if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
971        else                    __ fst_d (as_Address(to_addr));
972      }
973      break;
974    }
975
976    case T_ARRAY:   // fall through
977    case T_OBJECT:  // fall through
978      if (UseCompressedOops && !wide) {
979        __ movl(as_Address(to_addr), compressed_src);
980      } else {
981        __ movptr(as_Address(to_addr), src->as_register());
982      }
983      break;
984    case T_METADATA:
985      // We get here to store a method pointer to the stack to pass to
986      // a dtrace runtime call. This can't work on 64 bit with
987      // compressed klass ptrs: T_METADATA can be a compressed klass
988      // ptr or a 64 bit method pointer.
989      LP64_ONLY(ShouldNotReachHere());
990      __ movptr(as_Address(to_addr), src->as_register());
991      break;
992    case T_ADDRESS:
993      __ movptr(as_Address(to_addr), src->as_register());
994      break;
995    case T_INT:
996      __ movl(as_Address(to_addr), src->as_register());
997      break;
998
999    case T_LONG: {
1000      Register from_lo = src->as_register_lo();
1001      Register from_hi = src->as_register_hi();
1002#ifdef _LP64
1003      __ movptr(as_Address_lo(to_addr), from_lo);
1004#else
1005      Register base = to_addr->base()->as_register();
1006      Register index = noreg;
1007      if (to_addr->index()->is_register()) {
1008        index = to_addr->index()->as_register();
1009      }
1010      if (base == from_lo || index == from_lo) {
1011        assert(base != from_hi, "can't be");
1012        assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1013        __ movl(as_Address_hi(to_addr), from_hi);
1014        if (patch != NULL) {
1015          patching_epilog(patch, lir_patch_high, base, info);
1016          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1017          patch_code = lir_patch_low;
1018        }
1019        __ movl(as_Address_lo(to_addr), from_lo);
1020      } else {
1021        assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1022        __ movl(as_Address_lo(to_addr), from_lo);
1023        if (patch != NULL) {
1024          patching_epilog(patch, lir_patch_low, base, info);
1025          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1026          patch_code = lir_patch_high;
1027        }
1028        __ movl(as_Address_hi(to_addr), from_hi);
1029      }
1030#endif // _LP64
1031      break;
1032    }
1033
1034    case T_BYTE:    // fall through
1035    case T_BOOLEAN: {
1036      Register src_reg = src->as_register();
1037      Address dst_addr = as_Address(to_addr);
1038      assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1039      __ movb(dst_addr, src_reg);
1040      break;
1041    }
1042
1043    case T_CHAR:    // fall through
1044    case T_SHORT:
1045      __ movw(as_Address(to_addr), src->as_register());
1046      break;
1047
1048    default:
1049      ShouldNotReachHere();
1050  }
1051  if (info != NULL) {
1052    add_debug_info_for_null_check(null_check_here, info);
1053  }
1054
1055  if (patch_code != lir_patch_none) {
1056    patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1057  }
1058}
1059
1060
1061void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1062  assert(src->is_stack(), "should not call otherwise");
1063  assert(dest->is_register(), "should not call otherwise");
1064
1065  if (dest->is_single_cpu()) {
1066    if (type == T_ARRAY || type == T_OBJECT) {
1067      __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1068      __ verify_oop(dest->as_register());
1069    } else if (type == T_METADATA) {
1070      __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1071    } else {
1072      __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1073    }
1074
1075  } else if (dest->is_double_cpu()) {
1076    Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1077    Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1078    __ movptr(dest->as_register_lo(), src_addr_LO);
1079    NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1080
1081  } else if (dest->is_single_xmm()) {
1082    Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1083    __ movflt(dest->as_xmm_float_reg(), src_addr);
1084
1085  } else if (dest->is_double_xmm()) {
1086    Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1087    __ movdbl(dest->as_xmm_double_reg(), src_addr);
1088
1089  } else if (dest->is_single_fpu()) {
1090    assert(dest->fpu_regnr() == 0, "dest must be TOS");
1091    Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1092    __ fld_s(src_addr);
1093
1094  } else if (dest->is_double_fpu()) {
1095    assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1096    Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1097    __ fld_d(src_addr);
1098
1099  } else {
1100    ShouldNotReachHere();
1101  }
1102}
1103
1104
1105void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1106  if (src->is_single_stack()) {
1107    if (type == T_OBJECT || type == T_ARRAY) {
1108      __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1109      __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1110    } else {
1111#ifndef _LP64
1112      __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1113      __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1114#else
1115      //no pushl on 64bits
1116      __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1117      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1118#endif
1119    }
1120
1121  } else if (src->is_double_stack()) {
1122#ifdef _LP64
1123    __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1124    __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1125#else
1126    __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1127    // push and pop the part at src + wordSize, adding wordSize for the previous push
1128    __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1129    __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1130    __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1131#endif // _LP64
1132
1133  } else {
1134    ShouldNotReachHere();
1135  }
1136}
1137
1138
1139void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1140  assert(src->is_address(), "should not call otherwise");
1141  assert(dest->is_register(), "should not call otherwise");
1142
1143  LIR_Address* addr = src->as_address_ptr();
1144  Address from_addr = as_Address(addr);
1145
1146  if (addr->base()->type() == T_OBJECT) {
1147    __ verify_oop(addr->base()->as_pointer_register());
1148  }
1149
1150  switch (type) {
1151    case T_BOOLEAN: // fall through
1152    case T_BYTE:    // fall through
1153    case T_CHAR:    // fall through
1154    case T_SHORT:
1155      if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1156        // on pre P6 processors we may get partial register stalls
1157        // so blow away the value of to_rinfo before loading a
1158        // partial word into it.  Do it here so that it precedes
1159        // the potential patch point below.
1160        __ xorptr(dest->as_register(), dest->as_register());
1161      }
1162      break;
1163   default:
1164     break;
1165  }
1166
1167  PatchingStub* patch = NULL;
1168  if (patch_code != lir_patch_none) {
1169    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1170    assert(from_addr.disp() != 0, "must have");
1171  }
1172  if (info != NULL) {
1173    add_debug_info_for_null_check_here(info);
1174  }
1175
1176  switch (type) {
1177    case T_FLOAT: {
1178      if (dest->is_single_xmm()) {
1179        __ movflt(dest->as_xmm_float_reg(), from_addr);
1180      } else {
1181        assert(dest->is_single_fpu(), "must be");
1182        assert(dest->fpu_regnr() == 0, "dest must be TOS");
1183        __ fld_s(from_addr);
1184      }
1185      break;
1186    }
1187
1188    case T_DOUBLE: {
1189      if (dest->is_double_xmm()) {
1190        __ movdbl(dest->as_xmm_double_reg(), from_addr);
1191      } else {
1192        assert(dest->is_double_fpu(), "must be");
1193        assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1194        __ fld_d(from_addr);
1195      }
1196      break;
1197    }
1198
1199    case T_OBJECT:  // fall through
1200    case T_ARRAY:   // fall through
1201      if (UseCompressedOops && !wide) {
1202        __ movl(dest->as_register(), from_addr);
1203      } else {
1204        __ movptr(dest->as_register(), from_addr);
1205      }
1206      break;
1207
1208    case T_ADDRESS:
1209      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1210        __ movl(dest->as_register(), from_addr);
1211      } else {
1212        __ movptr(dest->as_register(), from_addr);
1213      }
1214      break;
1215    case T_INT:
1216      __ movl(dest->as_register(), from_addr);
1217      break;
1218
1219    case T_LONG: {
1220      Register to_lo = dest->as_register_lo();
1221      Register to_hi = dest->as_register_hi();
1222#ifdef _LP64
1223      __ movptr(to_lo, as_Address_lo(addr));
1224#else
1225      Register base = addr->base()->as_register();
1226      Register index = noreg;
1227      if (addr->index()->is_register()) {
1228        index = addr->index()->as_register();
1229      }
1230      if ((base == to_lo && index == to_hi) ||
1231          (base == to_hi && index == to_lo)) {
1232        // addresses with 2 registers are only formed as a result of
1233        // array access so this code will never have to deal with
1234        // patches or null checks.
1235        assert(info == NULL && patch == NULL, "must be");
1236        __ lea(to_hi, as_Address(addr));
1237        __ movl(to_lo, Address(to_hi, 0));
1238        __ movl(to_hi, Address(to_hi, BytesPerWord));
1239      } else if (base == to_lo || index == to_lo) {
1240        assert(base != to_hi, "can't be");
1241        assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1242        __ movl(to_hi, as_Address_hi(addr));
1243        if (patch != NULL) {
1244          patching_epilog(patch, lir_patch_high, base, info);
1245          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1246          patch_code = lir_patch_low;
1247        }
1248        __ movl(to_lo, as_Address_lo(addr));
1249      } else {
1250        assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1251        __ movl(to_lo, as_Address_lo(addr));
1252        if (patch != NULL) {
1253          patching_epilog(patch, lir_patch_low, base, info);
1254          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1255          patch_code = lir_patch_high;
1256        }
1257        __ movl(to_hi, as_Address_hi(addr));
1258      }
1259#endif // _LP64
1260      break;
1261    }
1262
1263    case T_BOOLEAN: // fall through
1264    case T_BYTE: {
1265      Register dest_reg = dest->as_register();
1266      assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1267      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1268        __ movsbl(dest_reg, from_addr);
1269      } else {
1270        __ movb(dest_reg, from_addr);
1271        __ shll(dest_reg, 24);
1272        __ sarl(dest_reg, 24);
1273      }
1274      break;
1275    }
1276
1277    case T_CHAR: {
1278      Register dest_reg = dest->as_register();
1279      assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1280      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1281        __ movzwl(dest_reg, from_addr);
1282      } else {
1283        __ movw(dest_reg, from_addr);
1284      }
1285      break;
1286    }
1287
1288    case T_SHORT: {
1289      Register dest_reg = dest->as_register();
1290      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1291        __ movswl(dest_reg, from_addr);
1292      } else {
1293        __ movw(dest_reg, from_addr);
1294        __ shll(dest_reg, 16);
1295        __ sarl(dest_reg, 16);
1296      }
1297      break;
1298    }
1299
1300    default:
1301      ShouldNotReachHere();
1302  }
1303
1304  if (patch != NULL) {
1305    patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1306  }
1307
1308  if (type == T_ARRAY || type == T_OBJECT) {
1309#ifdef _LP64
1310    if (UseCompressedOops && !wide) {
1311      __ decode_heap_oop(dest->as_register());
1312    }
1313#endif
1314    __ verify_oop(dest->as_register());
1315  } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1316#ifdef _LP64
1317    if (UseCompressedClassPointers) {
1318      __ decode_klass_not_null(dest->as_register());
1319    }
1320#endif
1321  }
1322}
1323
1324
1325NEEDS_CLEANUP; // This could be static?
1326Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1327  int elem_size = type2aelembytes(type);
1328  switch (elem_size) {
1329    case 1: return Address::times_1;
1330    case 2: return Address::times_2;
1331    case 4: return Address::times_4;
1332    case 8: return Address::times_8;
1333  }
1334  ShouldNotReachHere();
1335  return Address::no_scale;
1336}
1337
1338
1339void LIR_Assembler::emit_op3(LIR_Op3* op) {
1340  switch (op->code()) {
1341    case lir_idiv:
1342    case lir_irem:
1343      arithmetic_idiv(op->code(),
1344                      op->in_opr1(),
1345                      op->in_opr2(),
1346                      op->in_opr3(),
1347                      op->result_opr(),
1348                      op->info());
1349      break;
1350    case lir_fmad:
1351      __ fmad(op->result_opr()->as_xmm_double_reg(),
1352              op->in_opr1()->as_xmm_double_reg(),
1353              op->in_opr2()->as_xmm_double_reg(),
1354              op->in_opr3()->as_xmm_double_reg());
1355      break;
1356    case lir_fmaf:
1357      __ fmaf(op->result_opr()->as_xmm_float_reg(),
1358              op->in_opr1()->as_xmm_float_reg(),
1359              op->in_opr2()->as_xmm_float_reg(),
1360              op->in_opr3()->as_xmm_float_reg());
1361      break;
1362    default:      ShouldNotReachHere(); break;
1363  }
1364}
1365
1366void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1367#ifdef ASSERT
1368  assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1369  if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1370  if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1371#endif
1372
1373  if (op->cond() == lir_cond_always) {
1374    if (op->info() != NULL) add_debug_info_for_branch(op->info());
1375    __ jmp (*(op->label()));
1376  } else {
1377    Assembler::Condition acond = Assembler::zero;
1378    if (op->code() == lir_cond_float_branch) {
1379      assert(op->ublock() != NULL, "must have unordered successor");
1380      __ jcc(Assembler::parity, *(op->ublock()->label()));
1381      switch(op->cond()) {
1382        case lir_cond_equal:        acond = Assembler::equal;      break;
1383        case lir_cond_notEqual:     acond = Assembler::notEqual;   break;
1384        case lir_cond_less:         acond = Assembler::below;      break;
1385        case lir_cond_lessEqual:    acond = Assembler::belowEqual; break;
1386        case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1387        case lir_cond_greater:      acond = Assembler::above;      break;
1388        default:                         ShouldNotReachHere();
1389      }
1390    } else {
1391      switch (op->cond()) {
1392        case lir_cond_equal:        acond = Assembler::equal;       break;
1393        case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1394        case lir_cond_less:         acond = Assembler::less;        break;
1395        case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1396        case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1397        case lir_cond_greater:      acond = Assembler::greater;     break;
1398        case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1399        case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1400        default:                         ShouldNotReachHere();
1401      }
1402    }
1403    __ jcc(acond,*(op->label()));
1404  }
1405}
1406
1407void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1408  LIR_Opr src  = op->in_opr();
1409  LIR_Opr dest = op->result_opr();
1410
1411  switch (op->bytecode()) {
1412    case Bytecodes::_i2l:
1413#ifdef _LP64
1414      __ movl2ptr(dest->as_register_lo(), src->as_register());
1415#else
1416      move_regs(src->as_register(), dest->as_register_lo());
1417      move_regs(src->as_register(), dest->as_register_hi());
1418      __ sarl(dest->as_register_hi(), 31);
1419#endif // LP64
1420      break;
1421
1422    case Bytecodes::_l2i:
1423#ifdef _LP64
1424      __ movl(dest->as_register(), src->as_register_lo());
1425#else
1426      move_regs(src->as_register_lo(), dest->as_register());
1427#endif
1428      break;
1429
1430    case Bytecodes::_i2b:
1431      move_regs(src->as_register(), dest->as_register());
1432      __ sign_extend_byte(dest->as_register());
1433      break;
1434
1435    case Bytecodes::_i2c:
1436      move_regs(src->as_register(), dest->as_register());
1437      __ andl(dest->as_register(), 0xFFFF);
1438      break;
1439
1440    case Bytecodes::_i2s:
1441      move_regs(src->as_register(), dest->as_register());
1442      __ sign_extend_short(dest->as_register());
1443      break;
1444
1445
1446    case Bytecodes::_f2d:
1447    case Bytecodes::_d2f:
1448      if (dest->is_single_xmm()) {
1449        __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1450      } else if (dest->is_double_xmm()) {
1451        __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1452      } else {
1453        assert(src->fpu() == dest->fpu(), "register must be equal");
1454        // do nothing (float result is rounded later through spilling)
1455      }
1456      break;
1457
1458    case Bytecodes::_i2f:
1459    case Bytecodes::_i2d:
1460      if (dest->is_single_xmm()) {
1461        __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1462      } else if (dest->is_double_xmm()) {
1463        __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1464      } else {
1465        assert(dest->fpu() == 0, "result must be on TOS");
1466        __ movl(Address(rsp, 0), src->as_register());
1467        __ fild_s(Address(rsp, 0));
1468      }
1469      break;
1470
1471    case Bytecodes::_f2i:
1472    case Bytecodes::_d2i:
1473      if (src->is_single_xmm()) {
1474        __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1475      } else if (src->is_double_xmm()) {
1476        __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1477      } else {
1478        assert(src->fpu() == 0, "input must be on TOS");
1479        __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
1480        __ fist_s(Address(rsp, 0));
1481        __ movl(dest->as_register(), Address(rsp, 0));
1482        __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1483      }
1484
1485      // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1486      assert(op->stub() != NULL, "stub required");
1487      __ cmpl(dest->as_register(), 0x80000000);
1488      __ jcc(Assembler::equal, *op->stub()->entry());
1489      __ bind(*op->stub()->continuation());
1490      break;
1491
1492    case Bytecodes::_l2f:
1493    case Bytecodes::_l2d:
1494      assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1495      assert(dest->fpu() == 0, "result must be on TOS");
1496
1497      __ movptr(Address(rsp, 0),            src->as_register_lo());
1498      NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));
1499      __ fild_d(Address(rsp, 0));
1500      // float result is rounded later through spilling
1501      break;
1502
1503    case Bytecodes::_f2l:
1504    case Bytecodes::_d2l:
1505      assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1506      assert(src->fpu() == 0, "input must be on TOS");
1507      assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1508
1509      // instruction sequence too long to inline it here
1510      {
1511        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1512      }
1513      break;
1514
1515    default: ShouldNotReachHere();
1516  }
1517}
1518
1519void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1520  if (op->init_check()) {
1521    __ cmpb(Address(op->klass()->as_register(),
1522                    InstanceKlass::init_state_offset()),
1523                    InstanceKlass::fully_initialized);
1524    add_debug_info_for_null_check_here(op->stub()->info());
1525    __ jcc(Assembler::notEqual, *op->stub()->entry());
1526  }
1527  __ allocate_object(op->obj()->as_register(),
1528                     op->tmp1()->as_register(),
1529                     op->tmp2()->as_register(),
1530                     op->header_size(),
1531                     op->object_size(),
1532                     op->klass()->as_register(),
1533                     *op->stub()->entry());
1534  __ bind(*op->stub()->continuation());
1535}
1536
1537void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1538  Register len =  op->len()->as_register();
1539  LP64_ONLY( __ movslq(len, len); )
1540
1541  if (UseSlowPath ||
1542      (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1543      (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1544    __ jmp(*op->stub()->entry());
1545  } else {
1546    Register tmp1 = op->tmp1()->as_register();
1547    Register tmp2 = op->tmp2()->as_register();
1548    Register tmp3 = op->tmp3()->as_register();
1549    if (len == tmp1) {
1550      tmp1 = tmp3;
1551    } else if (len == tmp2) {
1552      tmp2 = tmp3;
1553    } else if (len == tmp3) {
1554      // everything is ok
1555    } else {
1556      __ mov(tmp3, len);
1557    }
1558    __ allocate_array(op->obj()->as_register(),
1559                      len,
1560                      tmp1,
1561                      tmp2,
1562                      arrayOopDesc::header_size(op->type()),
1563                      array_element_size(op->type()),
1564                      op->klass()->as_register(),
1565                      *op->stub()->entry());
1566  }
1567  __ bind(*op->stub()->continuation());
1568}
1569
1570void LIR_Assembler::type_profile_helper(Register mdo,
1571                                        ciMethodData *md, ciProfileData *data,
1572                                        Register recv, Label* update_done) {
1573  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1574    Label next_test;
1575    // See if the receiver is receiver[n].
1576    __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1577    __ jccb(Assembler::notEqual, next_test);
1578    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1579    __ addptr(data_addr, DataLayout::counter_increment);
1580    __ jmp(*update_done);
1581    __ bind(next_test);
1582  }
1583
1584  // Didn't find receiver; find next empty slot and fill it in
1585  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1586    Label next_test;
1587    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1588    __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1589    __ jccb(Assembler::notEqual, next_test);
1590    __ movptr(recv_addr, recv);
1591    __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1592    __ jmp(*update_done);
1593    __ bind(next_test);
1594  }
1595}
1596
1597void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1598  // we always need a stub for the failure case.
1599  CodeStub* stub = op->stub();
1600  Register obj = op->object()->as_register();
1601  Register k_RInfo = op->tmp1()->as_register();
1602  Register klass_RInfo = op->tmp2()->as_register();
1603  Register dst = op->result_opr()->as_register();
1604  ciKlass* k = op->klass();
1605  Register Rtmp1 = noreg;
1606
1607  // check if it needs to be profiled
1608  ciMethodData* md = NULL;
1609  ciProfileData* data = NULL;
1610
1611  if (op->should_profile()) {
1612    ciMethod* method = op->profiled_method();
1613    assert(method != NULL, "Should have method");
1614    int bci = op->profiled_bci();
1615    md = method->method_data_or_null();
1616    assert(md != NULL, "Sanity");
1617    data = md->bci_to_data(bci);
1618    assert(data != NULL,                "need data for type check");
1619    assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1620  }
1621  Label profile_cast_success, profile_cast_failure;
1622  Label *success_target = op->should_profile() ? &profile_cast_success : success;
1623  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1624
1625  if (obj == k_RInfo) {
1626    k_RInfo = dst;
1627  } else if (obj == klass_RInfo) {
1628    klass_RInfo = dst;
1629  }
1630  if (k->is_loaded() && !UseCompressedClassPointers) {
1631    select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1632  } else {
1633    Rtmp1 = op->tmp3()->as_register();
1634    select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1635  }
1636
1637  assert_different_registers(obj, k_RInfo, klass_RInfo);
1638
1639  __ cmpptr(obj, (int32_t)NULL_WORD);
1640  if (op->should_profile()) {
1641    Label not_null;
1642    __ jccb(Assembler::notEqual, not_null);
1643    // Object is null; update MDO and exit
1644    Register mdo  = klass_RInfo;
1645    __ mov_metadata(mdo, md->constant_encoding());
1646    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1647    int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1648    __ orl(data_addr, header_bits);
1649    __ jmp(*obj_is_null);
1650    __ bind(not_null);
1651  } else {
1652    __ jcc(Assembler::equal, *obj_is_null);
1653  }
1654
1655  if (!k->is_loaded()) {
1656    klass2reg_with_patching(k_RInfo, op->info_for_patch());
1657  } else {
1658#ifdef _LP64
1659    __ mov_metadata(k_RInfo, k->constant_encoding());
1660#endif // _LP64
1661  }
1662  __ verify_oop(obj);
1663
1664  if (op->fast_check()) {
1665    // get object class
1666    // not a safepoint as obj null check happens earlier
1667#ifdef _LP64
1668    if (UseCompressedClassPointers) {
1669      __ load_klass(Rtmp1, obj);
1670      __ cmpptr(k_RInfo, Rtmp1);
1671    } else {
1672      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1673    }
1674#else
1675    if (k->is_loaded()) {
1676      __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1677    } else {
1678      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1679    }
1680#endif
1681    __ jcc(Assembler::notEqual, *failure_target);
1682    // successful cast, fall through to profile or jump
1683  } else {
1684    // get object class
1685    // not a safepoint as obj null check happens earlier
1686    __ load_klass(klass_RInfo, obj);
1687    if (k->is_loaded()) {
1688      // See if we get an immediate positive hit
1689#ifdef _LP64
1690      __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1691#else
1692      __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1693#endif // _LP64
1694      if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1695        __ jcc(Assembler::notEqual, *failure_target);
1696        // successful cast, fall through to profile or jump
1697      } else {
1698        // See if we get an immediate positive hit
1699        __ jcc(Assembler::equal, *success_target);
1700        // check for self
1701#ifdef _LP64
1702        __ cmpptr(klass_RInfo, k_RInfo);
1703#else
1704        __ cmpklass(klass_RInfo, k->constant_encoding());
1705#endif // _LP64
1706        __ jcc(Assembler::equal, *success_target);
1707
1708        __ push(klass_RInfo);
1709#ifdef _LP64
1710        __ push(k_RInfo);
1711#else
1712        __ pushklass(k->constant_encoding());
1713#endif // _LP64
1714        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1715        __ pop(klass_RInfo);
1716        __ pop(klass_RInfo);
1717        // result is a boolean
1718        __ cmpl(klass_RInfo, 0);
1719        __ jcc(Assembler::equal, *failure_target);
1720        // successful cast, fall through to profile or jump
1721      }
1722    } else {
1723      // perform the fast part of the checking logic
1724      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1725      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1726      __ push(klass_RInfo);
1727      __ push(k_RInfo);
1728      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1729      __ pop(klass_RInfo);
1730      __ pop(k_RInfo);
1731      // result is a boolean
1732      __ cmpl(k_RInfo, 0);
1733      __ jcc(Assembler::equal, *failure_target);
1734      // successful cast, fall through to profile or jump
1735    }
1736  }
1737  if (op->should_profile()) {
1738    Register mdo  = klass_RInfo, recv = k_RInfo;
1739    __ bind(profile_cast_success);
1740    __ mov_metadata(mdo, md->constant_encoding());
1741    __ load_klass(recv, obj);
1742    Label update_done;
1743    type_profile_helper(mdo, md, data, recv, success);
1744    __ jmp(*success);
1745
1746    __ bind(profile_cast_failure);
1747    __ mov_metadata(mdo, md->constant_encoding());
1748    Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1749    __ subptr(counter_addr, DataLayout::counter_increment);
1750    __ jmp(*failure);
1751  }
1752  __ jmp(*success);
1753}
1754
1755
1756void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1757  LIR_Code code = op->code();
1758  if (code == lir_store_check) {
1759    Register value = op->object()->as_register();
1760    Register array = op->array()->as_register();
1761    Register k_RInfo = op->tmp1()->as_register();
1762    Register klass_RInfo = op->tmp2()->as_register();
1763    Register Rtmp1 = op->tmp3()->as_register();
1764
1765    CodeStub* stub = op->stub();
1766
1767    // check if it needs to be profiled
1768    ciMethodData* md = NULL;
1769    ciProfileData* data = NULL;
1770
1771    if (op->should_profile()) {
1772      ciMethod* method = op->profiled_method();
1773      assert(method != NULL, "Should have method");
1774      int bci = op->profiled_bci();
1775      md = method->method_data_or_null();
1776      assert(md != NULL, "Sanity");
1777      data = md->bci_to_data(bci);
1778      assert(data != NULL,                "need data for type check");
1779      assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1780    }
1781    Label profile_cast_success, profile_cast_failure, done;
1782    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1783    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1784
1785    __ cmpptr(value, (int32_t)NULL_WORD);
1786    if (op->should_profile()) {
1787      Label not_null;
1788      __ jccb(Assembler::notEqual, not_null);
1789      // Object is null; update MDO and exit
1790      Register mdo  = klass_RInfo;
1791      __ mov_metadata(mdo, md->constant_encoding());
1792      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1793      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1794      __ orl(data_addr, header_bits);
1795      __ jmp(done);
1796      __ bind(not_null);
1797    } else {
1798      __ jcc(Assembler::equal, done);
1799    }
1800
1801    add_debug_info_for_null_check_here(op->info_for_exception());
1802    __ load_klass(k_RInfo, array);
1803    __ load_klass(klass_RInfo, value);
1804
1805    // get instance klass (it's already uncompressed)
1806    __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1807    // perform the fast part of the checking logic
1808    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1809    // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1810    __ push(klass_RInfo);
1811    __ push(k_RInfo);
1812    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1813    __ pop(klass_RInfo);
1814    __ pop(k_RInfo);
1815    // result is a boolean
1816    __ cmpl(k_RInfo, 0);
1817    __ jcc(Assembler::equal, *failure_target);
1818    // fall through to the success case
1819
1820    if (op->should_profile()) {
1821      Register mdo  = klass_RInfo, recv = k_RInfo;
1822      __ bind(profile_cast_success);
1823      __ mov_metadata(mdo, md->constant_encoding());
1824      __ load_klass(recv, value);
1825      Label update_done;
1826      type_profile_helper(mdo, md, data, recv, &done);
1827      __ jmpb(done);
1828
1829      __ bind(profile_cast_failure);
1830      __ mov_metadata(mdo, md->constant_encoding());
1831      Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1832      __ subptr(counter_addr, DataLayout::counter_increment);
1833      __ jmp(*stub->entry());
1834    }
1835
1836    __ bind(done);
1837  } else
1838    if (code == lir_checkcast) {
1839      Register obj = op->object()->as_register();
1840      Register dst = op->result_opr()->as_register();
1841      Label success;
1842      emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1843      __ bind(success);
1844      if (dst != obj) {
1845        __ mov(dst, obj);
1846      }
1847    } else
1848      if (code == lir_instanceof) {
1849        Register obj = op->object()->as_register();
1850        Register dst = op->result_opr()->as_register();
1851        Label success, failure, done;
1852        emit_typecheck_helper(op, &success, &failure, &failure);
1853        __ bind(failure);
1854        __ xorptr(dst, dst);
1855        __ jmpb(done);
1856        __ bind(success);
1857        __ movptr(dst, 1);
1858        __ bind(done);
1859      } else {
1860        ShouldNotReachHere();
1861      }
1862
1863}
1864
1865
1866void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1867  if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1868    assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1869    assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1870    assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1871    assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1872    Register addr = op->addr()->as_register();
1873    if (os::is_MP()) {
1874      __ lock();
1875    }
1876    NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1877
1878  } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1879    NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1880    Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1881    Register newval = op->new_value()->as_register();
1882    Register cmpval = op->cmp_value()->as_register();
1883    assert(cmpval == rax, "wrong register");
1884    assert(newval != NULL, "new val must be register");
1885    assert(cmpval != newval, "cmp and new values must be in different registers");
1886    assert(cmpval != addr, "cmp and addr must be in different registers");
1887    assert(newval != addr, "new value and addr must be in different registers");
1888
1889    if ( op->code() == lir_cas_obj) {
1890#ifdef _LP64
1891      if (UseCompressedOops) {
1892        __ encode_heap_oop(cmpval);
1893        __ mov(rscratch1, newval);
1894        __ encode_heap_oop(rscratch1);
1895        if (os::is_MP()) {
1896          __ lock();
1897        }
1898        // cmpval (rax) is implicitly used by this instruction
1899        __ cmpxchgl(rscratch1, Address(addr, 0));
1900      } else
1901#endif
1902      {
1903        if (os::is_MP()) {
1904          __ lock();
1905        }
1906        __ cmpxchgptr(newval, Address(addr, 0));
1907      }
1908    } else {
1909      assert(op->code() == lir_cas_int, "lir_cas_int expected");
1910      if (os::is_MP()) {
1911        __ lock();
1912      }
1913      __ cmpxchgl(newval, Address(addr, 0));
1914    }
1915#ifdef _LP64
1916  } else if (op->code() == lir_cas_long) {
1917    Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1918    Register newval = op->new_value()->as_register_lo();
1919    Register cmpval = op->cmp_value()->as_register_lo();
1920    assert(cmpval == rax, "wrong register");
1921    assert(newval != NULL, "new val must be register");
1922    assert(cmpval != newval, "cmp and new values must be in different registers");
1923    assert(cmpval != addr, "cmp and addr must be in different registers");
1924    assert(newval != addr, "new value and addr must be in different registers");
1925    if (os::is_MP()) {
1926      __ lock();
1927    }
1928    __ cmpxchgq(newval, Address(addr, 0));
1929#endif // _LP64
1930  } else {
1931    Unimplemented();
1932  }
1933}
1934
1935void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1936  Assembler::Condition acond, ncond;
1937  switch (condition) {
1938    case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1939    case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1940    case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1941    case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1942    case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1943    case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1944    case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1945    case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1946    default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1947                                ShouldNotReachHere();
1948  }
1949
1950  if (opr1->is_cpu_register()) {
1951    reg2reg(opr1, result);
1952  } else if (opr1->is_stack()) {
1953    stack2reg(opr1, result, result->type());
1954  } else if (opr1->is_constant()) {
1955    const2reg(opr1, result, lir_patch_none, NULL);
1956  } else {
1957    ShouldNotReachHere();
1958  }
1959
1960  if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1961    // optimized version that does not require a branch
1962    if (opr2->is_single_cpu()) {
1963      assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1964      __ cmov(ncond, result->as_register(), opr2->as_register());
1965    } else if (opr2->is_double_cpu()) {
1966      assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1967      assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1968      __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1969      NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
1970    } else if (opr2->is_single_stack()) {
1971      __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1972    } else if (opr2->is_double_stack()) {
1973      __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1974      NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
1975    } else {
1976      ShouldNotReachHere();
1977    }
1978
1979  } else {
1980    Label skip;
1981    __ jcc (acond, skip);
1982    if (opr2->is_cpu_register()) {
1983      reg2reg(opr2, result);
1984    } else if (opr2->is_stack()) {
1985      stack2reg(opr2, result, result->type());
1986    } else if (opr2->is_constant()) {
1987      const2reg(opr2, result, lir_patch_none, NULL);
1988    } else {
1989      ShouldNotReachHere();
1990    }
1991    __ bind(skip);
1992  }
1993}
1994
1995
1996void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1997  assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1998
1999  if (left->is_single_cpu()) {
2000    assert(left == dest, "left and dest must be equal");
2001    Register lreg = left->as_register();
2002
2003    if (right->is_single_cpu()) {
2004      // cpu register - cpu register
2005      Register rreg = right->as_register();
2006      switch (code) {
2007        case lir_add: __ addl (lreg, rreg); break;
2008        case lir_sub: __ subl (lreg, rreg); break;
2009        case lir_mul: __ imull(lreg, rreg); break;
2010        default:      ShouldNotReachHere();
2011      }
2012
2013    } else if (right->is_stack()) {
2014      // cpu register - stack
2015      Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2016      switch (code) {
2017        case lir_add: __ addl(lreg, raddr); break;
2018        case lir_sub: __ subl(lreg, raddr); break;
2019        default:      ShouldNotReachHere();
2020      }
2021
2022    } else if (right->is_constant()) {
2023      // cpu register - constant
2024      jint c = right->as_constant_ptr()->as_jint();
2025      switch (code) {
2026        case lir_add: {
2027          __ incrementl(lreg, c);
2028          break;
2029        }
2030        case lir_sub: {
2031          __ decrementl(lreg, c);
2032          break;
2033        }
2034        default: ShouldNotReachHere();
2035      }
2036
2037    } else {
2038      ShouldNotReachHere();
2039    }
2040
2041  } else if (left->is_double_cpu()) {
2042    assert(left == dest, "left and dest must be equal");
2043    Register lreg_lo = left->as_register_lo();
2044    Register lreg_hi = left->as_register_hi();
2045
2046    if (right->is_double_cpu()) {
2047      // cpu register - cpu register
2048      Register rreg_lo = right->as_register_lo();
2049      Register rreg_hi = right->as_register_hi();
2050      NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2051      LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2052      switch (code) {
2053        case lir_add:
2054          __ addptr(lreg_lo, rreg_lo);
2055          NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2056          break;
2057        case lir_sub:
2058          __ subptr(lreg_lo, rreg_lo);
2059          NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2060          break;
2061        case lir_mul:
2062#ifdef _LP64
2063          __ imulq(lreg_lo, rreg_lo);
2064#else
2065          assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2066          __ imull(lreg_hi, rreg_lo);
2067          __ imull(rreg_hi, lreg_lo);
2068          __ addl (rreg_hi, lreg_hi);
2069          __ mull (rreg_lo);
2070          __ addl (lreg_hi, rreg_hi);
2071#endif // _LP64
2072          break;
2073        default:
2074          ShouldNotReachHere();
2075      }
2076
2077    } else if (right->is_constant()) {
2078      // cpu register - constant
2079#ifdef _LP64
2080      jlong c = right->as_constant_ptr()->as_jlong_bits();
2081      __ movptr(r10, (intptr_t) c);
2082      switch (code) {
2083        case lir_add:
2084          __ addptr(lreg_lo, r10);
2085          break;
2086        case lir_sub:
2087          __ subptr(lreg_lo, r10);
2088          break;
2089        default:
2090          ShouldNotReachHere();
2091      }
2092#else
2093      jint c_lo = right->as_constant_ptr()->as_jint_lo();
2094      jint c_hi = right->as_constant_ptr()->as_jint_hi();
2095      switch (code) {
2096        case lir_add:
2097          __ addptr(lreg_lo, c_lo);
2098          __ adcl(lreg_hi, c_hi);
2099          break;
2100        case lir_sub:
2101          __ subptr(lreg_lo, c_lo);
2102          __ sbbl(lreg_hi, c_hi);
2103          break;
2104        default:
2105          ShouldNotReachHere();
2106      }
2107#endif // _LP64
2108
2109    } else {
2110      ShouldNotReachHere();
2111    }
2112
2113  } else if (left->is_single_xmm()) {
2114    assert(left == dest, "left and dest must be equal");
2115    XMMRegister lreg = left->as_xmm_float_reg();
2116
2117    if (right->is_single_xmm()) {
2118      XMMRegister rreg = right->as_xmm_float_reg();
2119      switch (code) {
2120        case lir_add: __ addss(lreg, rreg);  break;
2121        case lir_sub: __ subss(lreg, rreg);  break;
2122        case lir_mul_strictfp: // fall through
2123        case lir_mul: __ mulss(lreg, rreg);  break;
2124        case lir_div_strictfp: // fall through
2125        case lir_div: __ divss(lreg, rreg);  break;
2126        default: ShouldNotReachHere();
2127      }
2128    } else {
2129      Address raddr;
2130      if (right->is_single_stack()) {
2131        raddr = frame_map()->address_for_slot(right->single_stack_ix());
2132      } else if (right->is_constant()) {
2133        // hack for now
2134        raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2135      } else {
2136        ShouldNotReachHere();
2137      }
2138      switch (code) {
2139        case lir_add: __ addss(lreg, raddr);  break;
2140        case lir_sub: __ subss(lreg, raddr);  break;
2141        case lir_mul_strictfp: // fall through
2142        case lir_mul: __ mulss(lreg, raddr);  break;
2143        case lir_div_strictfp: // fall through
2144        case lir_div: __ divss(lreg, raddr);  break;
2145        default: ShouldNotReachHere();
2146      }
2147    }
2148
2149  } else if (left->is_double_xmm()) {
2150    assert(left == dest, "left and dest must be equal");
2151
2152    XMMRegister lreg = left->as_xmm_double_reg();
2153    if (right->is_double_xmm()) {
2154      XMMRegister rreg = right->as_xmm_double_reg();
2155      switch (code) {
2156        case lir_add: __ addsd(lreg, rreg);  break;
2157        case lir_sub: __ subsd(lreg, rreg);  break;
2158        case lir_mul_strictfp: // fall through
2159        case lir_mul: __ mulsd(lreg, rreg);  break;
2160        case lir_div_strictfp: // fall through
2161        case lir_div: __ divsd(lreg, rreg);  break;
2162        default: ShouldNotReachHere();
2163      }
2164    } else {
2165      Address raddr;
2166      if (right->is_double_stack()) {
2167        raddr = frame_map()->address_for_slot(right->double_stack_ix());
2168      } else if (right->is_constant()) {
2169        // hack for now
2170        raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2171      } else {
2172        ShouldNotReachHere();
2173      }
2174      switch (code) {
2175        case lir_add: __ addsd(lreg, raddr);  break;
2176        case lir_sub: __ subsd(lreg, raddr);  break;
2177        case lir_mul_strictfp: // fall through
2178        case lir_mul: __ mulsd(lreg, raddr);  break;
2179        case lir_div_strictfp: // fall through
2180        case lir_div: __ divsd(lreg, raddr);  break;
2181        default: ShouldNotReachHere();
2182      }
2183    }
2184
2185  } else if (left->is_single_fpu()) {
2186    assert(dest->is_single_fpu(),  "fpu stack allocation required");
2187
2188    if (right->is_single_fpu()) {
2189      arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2190
2191    } else {
2192      assert(left->fpu_regnr() == 0, "left must be on TOS");
2193      assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2194
2195      Address raddr;
2196      if (right->is_single_stack()) {
2197        raddr = frame_map()->address_for_slot(right->single_stack_ix());
2198      } else if (right->is_constant()) {
2199        address const_addr = float_constant(right->as_jfloat());
2200        assert(const_addr != NULL, "incorrect float/double constant maintainance");
2201        // hack for now
2202        raddr = __ as_Address(InternalAddress(const_addr));
2203      } else {
2204        ShouldNotReachHere();
2205      }
2206
2207      switch (code) {
2208        case lir_add: __ fadd_s(raddr); break;
2209        case lir_sub: __ fsub_s(raddr); break;
2210        case lir_mul_strictfp: // fall through
2211        case lir_mul: __ fmul_s(raddr); break;
2212        case lir_div_strictfp: // fall through
2213        case lir_div: __ fdiv_s(raddr); break;
2214        default:      ShouldNotReachHere();
2215      }
2216    }
2217
2218  } else if (left->is_double_fpu()) {
2219    assert(dest->is_double_fpu(),  "fpu stack allocation required");
2220
2221    if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2222      // Double values require special handling for strictfp mul/div on x86
2223      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
2224      __ fmulp(left->fpu_regnrLo() + 1);
2225    }
2226
2227    if (right->is_double_fpu()) {
2228      arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2229
2230    } else {
2231      assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2232      assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2233
2234      Address raddr;
2235      if (right->is_double_stack()) {
2236        raddr = frame_map()->address_for_slot(right->double_stack_ix());
2237      } else if (right->is_constant()) {
2238        // hack for now
2239        raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2240      } else {
2241        ShouldNotReachHere();
2242      }
2243
2244      switch (code) {
2245        case lir_add: __ fadd_d(raddr); break;
2246        case lir_sub: __ fsub_d(raddr); break;
2247        case lir_mul_strictfp: // fall through
2248        case lir_mul: __ fmul_d(raddr); break;
2249        case lir_div_strictfp: // fall through
2250        case lir_div: __ fdiv_d(raddr); break;
2251        default: ShouldNotReachHere();
2252      }
2253    }
2254
2255    if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2256      // Double values require special handling for strictfp mul/div on x86
2257      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
2258      __ fmulp(dest->fpu_regnrLo() + 1);
2259    }
2260
2261  } else if (left->is_single_stack() || left->is_address()) {
2262    assert(left == dest, "left and dest must be equal");
2263
2264    Address laddr;
2265    if (left->is_single_stack()) {
2266      laddr = frame_map()->address_for_slot(left->single_stack_ix());
2267    } else if (left->is_address()) {
2268      laddr = as_Address(left->as_address_ptr());
2269    } else {
2270      ShouldNotReachHere();
2271    }
2272
2273    if (right->is_single_cpu()) {
2274      Register rreg = right->as_register();
2275      switch (code) {
2276        case lir_add: __ addl(laddr, rreg); break;
2277        case lir_sub: __ subl(laddr, rreg); break;
2278        default:      ShouldNotReachHere();
2279      }
2280    } else if (right->is_constant()) {
2281      jint c = right->as_constant_ptr()->as_jint();
2282      switch (code) {
2283        case lir_add: {
2284          __ incrementl(laddr, c);
2285          break;
2286        }
2287        case lir_sub: {
2288          __ decrementl(laddr, c);
2289          break;
2290        }
2291        default: ShouldNotReachHere();
2292      }
2293    } else {
2294      ShouldNotReachHere();
2295    }
2296
2297  } else {
2298    ShouldNotReachHere();
2299  }
2300}
2301
2302void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2303  assert(pop_fpu_stack  || (left_index     == dest_index || right_index     == dest_index), "invalid LIR");
2304  assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2305  assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2306
2307  bool left_is_tos = (left_index == 0);
2308  bool dest_is_tos = (dest_index == 0);
2309  int non_tos_index = (left_is_tos ? right_index : left_index);
2310
2311  switch (code) {
2312    case lir_add:
2313      if (pop_fpu_stack)       __ faddp(non_tos_index);
2314      else if (dest_is_tos)    __ fadd (non_tos_index);
2315      else                     __ fadda(non_tos_index);
2316      break;
2317
2318    case lir_sub:
2319      if (left_is_tos) {
2320        if (pop_fpu_stack)     __ fsubrp(non_tos_index);
2321        else if (dest_is_tos)  __ fsub  (non_tos_index);
2322        else                   __ fsubra(non_tos_index);
2323      } else {
2324        if (pop_fpu_stack)     __ fsubp (non_tos_index);
2325        else if (dest_is_tos)  __ fsubr (non_tos_index);
2326        else                   __ fsuba (non_tos_index);
2327      }
2328      break;
2329
2330    case lir_mul_strictfp: // fall through
2331    case lir_mul:
2332      if (pop_fpu_stack)       __ fmulp(non_tos_index);
2333      else if (dest_is_tos)    __ fmul (non_tos_index);
2334      else                     __ fmula(non_tos_index);
2335      break;
2336
2337    case lir_div_strictfp: // fall through
2338    case lir_div:
2339      if (left_is_tos) {
2340        if (pop_fpu_stack)     __ fdivrp(non_tos_index);
2341        else if (dest_is_tos)  __ fdiv  (non_tos_index);
2342        else                   __ fdivra(non_tos_index);
2343      } else {
2344        if (pop_fpu_stack)     __ fdivp (non_tos_index);
2345        else if (dest_is_tos)  __ fdivr (non_tos_index);
2346        else                   __ fdiva (non_tos_index);
2347      }
2348      break;
2349
2350    case lir_rem:
2351      assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2352      __ fremr(noreg);
2353      break;
2354
2355    default:
2356      ShouldNotReachHere();
2357  }
2358}
2359
2360
2361void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2362  if (value->is_double_xmm()) {
2363    switch(code) {
2364      case lir_abs :
2365        {
2366          if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2367            __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2368          }
2369          __ andpd(dest->as_xmm_double_reg(),
2370                    ExternalAddress((address)double_signmask_pool));
2371        }
2372        break;
2373
2374      case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2375      // all other intrinsics are not available in the SSE instruction set, so FPU is used
2376      default      : ShouldNotReachHere();
2377    }
2378
2379  } else if (value->is_double_fpu()) {
2380    assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2381    switch(code) {
2382      case lir_abs   : __ fabs() ; break;
2383      case lir_sqrt  : __ fsqrt(); break;
2384      default      : ShouldNotReachHere();
2385    }
2386  } else {
2387    Unimplemented();
2388  }
2389}
2390
2391void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2392  // assert(left->destroys_register(), "check");
2393  if (left->is_single_cpu()) {
2394    Register reg = left->as_register();
2395    if (right->is_constant()) {
2396      int val = right->as_constant_ptr()->as_jint();
2397      switch (code) {
2398        case lir_logic_and: __ andl (reg, val); break;
2399        case lir_logic_or:  __ orl  (reg, val); break;
2400        case lir_logic_xor: __ xorl (reg, val); break;
2401        default: ShouldNotReachHere();
2402      }
2403    } else if (right->is_stack()) {
2404      // added support for stack operands
2405      Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2406      switch (code) {
2407        case lir_logic_and: __ andl (reg, raddr); break;
2408        case lir_logic_or:  __ orl  (reg, raddr); break;
2409        case lir_logic_xor: __ xorl (reg, raddr); break;
2410        default: ShouldNotReachHere();
2411      }
2412    } else {
2413      Register rright = right->as_register();
2414      switch (code) {
2415        case lir_logic_and: __ andptr (reg, rright); break;
2416        case lir_logic_or : __ orptr  (reg, rright); break;
2417        case lir_logic_xor: __ xorptr (reg, rright); break;
2418        default: ShouldNotReachHere();
2419      }
2420    }
2421    move_regs(reg, dst->as_register());
2422  } else {
2423    Register l_lo = left->as_register_lo();
2424    Register l_hi = left->as_register_hi();
2425    if (right->is_constant()) {
2426#ifdef _LP64
2427      __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2428      switch (code) {
2429        case lir_logic_and:
2430          __ andq(l_lo, rscratch1);
2431          break;
2432        case lir_logic_or:
2433          __ orq(l_lo, rscratch1);
2434          break;
2435        case lir_logic_xor:
2436          __ xorq(l_lo, rscratch1);
2437          break;
2438        default: ShouldNotReachHere();
2439      }
2440#else
2441      int r_lo = right->as_constant_ptr()->as_jint_lo();
2442      int r_hi = right->as_constant_ptr()->as_jint_hi();
2443      switch (code) {
2444        case lir_logic_and:
2445          __ andl(l_lo, r_lo);
2446          __ andl(l_hi, r_hi);
2447          break;
2448        case lir_logic_or:
2449          __ orl(l_lo, r_lo);
2450          __ orl(l_hi, r_hi);
2451          break;
2452        case lir_logic_xor:
2453          __ xorl(l_lo, r_lo);
2454          __ xorl(l_hi, r_hi);
2455          break;
2456        default: ShouldNotReachHere();
2457      }
2458#endif // _LP64
2459    } else {
2460#ifdef _LP64
2461      Register r_lo;
2462      if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2463        r_lo = right->as_register();
2464      } else {
2465        r_lo = right->as_register_lo();
2466      }
2467#else
2468      Register r_lo = right->as_register_lo();
2469      Register r_hi = right->as_register_hi();
2470      assert(l_lo != r_hi, "overwriting registers");
2471#endif
2472      switch (code) {
2473        case lir_logic_and:
2474          __ andptr(l_lo, r_lo);
2475          NOT_LP64(__ andptr(l_hi, r_hi);)
2476          break;
2477        case lir_logic_or:
2478          __ orptr(l_lo, r_lo);
2479          NOT_LP64(__ orptr(l_hi, r_hi);)
2480          break;
2481        case lir_logic_xor:
2482          __ xorptr(l_lo, r_lo);
2483          NOT_LP64(__ xorptr(l_hi, r_hi);)
2484          break;
2485        default: ShouldNotReachHere();
2486      }
2487    }
2488
2489    Register dst_lo = dst->as_register_lo();
2490    Register dst_hi = dst->as_register_hi();
2491
2492#ifdef _LP64
2493    move_regs(l_lo, dst_lo);
2494#else
2495    if (dst_lo == l_hi) {
2496      assert(dst_hi != l_lo, "overwriting registers");
2497      move_regs(l_hi, dst_hi);
2498      move_regs(l_lo, dst_lo);
2499    } else {
2500      assert(dst_lo != l_hi, "overwriting registers");
2501      move_regs(l_lo, dst_lo);
2502      move_regs(l_hi, dst_hi);
2503    }
2504#endif // _LP64
2505  }
2506}
2507
2508
2509// we assume that rax, and rdx can be overwritten
2510void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2511
2512  assert(left->is_single_cpu(),   "left must be register");
2513  assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
2514  assert(result->is_single_cpu(), "result must be register");
2515
2516  //  assert(left->destroys_register(), "check");
2517  //  assert(right->destroys_register(), "check");
2518
2519  Register lreg = left->as_register();
2520  Register dreg = result->as_register();
2521
2522  if (right->is_constant()) {
2523    int divisor = right->as_constant_ptr()->as_jint();
2524    assert(divisor > 0 && is_power_of_2(divisor), "must be");
2525    if (code == lir_idiv) {
2526      assert(lreg == rax, "must be rax,");
2527      assert(temp->as_register() == rdx, "tmp register must be rdx");
2528      __ cdql(); // sign extend into rdx:rax
2529      if (divisor == 2) {
2530        __ subl(lreg, rdx);
2531      } else {
2532        __ andl(rdx, divisor - 1);
2533        __ addl(lreg, rdx);
2534      }
2535      __ sarl(lreg, log2_intptr(divisor));
2536      move_regs(lreg, dreg);
2537    } else if (code == lir_irem) {
2538      Label done;
2539      __ mov(dreg, lreg);
2540      __ andl(dreg, 0x80000000 | (divisor - 1));
2541      __ jcc(Assembler::positive, done);
2542      __ decrement(dreg);
2543      __ orl(dreg, ~(divisor - 1));
2544      __ increment(dreg);
2545      __ bind(done);
2546    } else {
2547      ShouldNotReachHere();
2548    }
2549  } else {
2550    Register rreg = right->as_register();
2551    assert(lreg == rax, "left register must be rax,");
2552    assert(rreg != rdx, "right register must not be rdx");
2553    assert(temp->as_register() == rdx, "tmp register must be rdx");
2554
2555    move_regs(lreg, rax);
2556
2557    int idivl_offset = __ corrected_idivl(rreg);
2558    add_debug_info_for_div0(idivl_offset, info);
2559    if (code == lir_irem) {
2560      move_regs(rdx, dreg); // result is in rdx
2561    } else {
2562      move_regs(rax, dreg);
2563    }
2564  }
2565}
2566
2567
2568void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2569  if (opr1->is_single_cpu()) {
2570    Register reg1 = opr1->as_register();
2571    if (opr2->is_single_cpu()) {
2572      // cpu register - cpu register
2573      if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2574        __ cmpptr(reg1, opr2->as_register());
2575      } else {
2576        assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2577        __ cmpl(reg1, opr2->as_register());
2578      }
2579    } else if (opr2->is_stack()) {
2580      // cpu register - stack
2581      if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2582        __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2583      } else {
2584        __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2585      }
2586    } else if (opr2->is_constant()) {
2587      // cpu register - constant
2588      LIR_Const* c = opr2->as_constant_ptr();
2589      if (c->type() == T_INT) {
2590        __ cmpl(reg1, c->as_jint());
2591      } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2592        // In 64bit oops are single register
2593        jobject o = c->as_jobject();
2594        if (o == NULL) {
2595          __ cmpptr(reg1, (int32_t)NULL_WORD);
2596        } else {
2597#ifdef _LP64
2598          __ movoop(rscratch1, o);
2599          __ cmpptr(reg1, rscratch1);
2600#else
2601          __ cmpoop(reg1, c->as_jobject());
2602#endif // _LP64
2603        }
2604      } else {
2605        fatal("unexpected type: %s", basictype_to_str(c->type()));
2606      }
2607      // cpu register - address
2608    } else if (opr2->is_address()) {
2609      if (op->info() != NULL) {
2610        add_debug_info_for_null_check_here(op->info());
2611      }
2612      __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2613    } else {
2614      ShouldNotReachHere();
2615    }
2616
2617  } else if(opr1->is_double_cpu()) {
2618    Register xlo = opr1->as_register_lo();
2619    Register xhi = opr1->as_register_hi();
2620    if (opr2->is_double_cpu()) {
2621#ifdef _LP64
2622      __ cmpptr(xlo, opr2->as_register_lo());
2623#else
2624      // cpu register - cpu register
2625      Register ylo = opr2->as_register_lo();
2626      Register yhi = opr2->as_register_hi();
2627      __ subl(xlo, ylo);
2628      __ sbbl(xhi, yhi);
2629      if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2630        __ orl(xhi, xlo);
2631      }
2632#endif // _LP64
2633    } else if (opr2->is_constant()) {
2634      // cpu register - constant 0
2635      assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2636#ifdef _LP64
2637      __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2638#else
2639      assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2640      __ orl(xhi, xlo);
2641#endif // _LP64
2642    } else {
2643      ShouldNotReachHere();
2644    }
2645
2646  } else if (opr1->is_single_xmm()) {
2647    XMMRegister reg1 = opr1->as_xmm_float_reg();
2648    if (opr2->is_single_xmm()) {
2649      // xmm register - xmm register
2650      __ ucomiss(reg1, opr2->as_xmm_float_reg());
2651    } else if (opr2->is_stack()) {
2652      // xmm register - stack
2653      __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2654    } else if (opr2->is_constant()) {
2655      // xmm register - constant
2656      __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2657    } else if (opr2->is_address()) {
2658      // xmm register - address
2659      if (op->info() != NULL) {
2660        add_debug_info_for_null_check_here(op->info());
2661      }
2662      __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2663    } else {
2664      ShouldNotReachHere();
2665    }
2666
2667  } else if (opr1->is_double_xmm()) {
2668    XMMRegister reg1 = opr1->as_xmm_double_reg();
2669    if (opr2->is_double_xmm()) {
2670      // xmm register - xmm register
2671      __ ucomisd(reg1, opr2->as_xmm_double_reg());
2672    } else if (opr2->is_stack()) {
2673      // xmm register - stack
2674      __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2675    } else if (opr2->is_constant()) {
2676      // xmm register - constant
2677      __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2678    } else if (opr2->is_address()) {
2679      // xmm register - address
2680      if (op->info() != NULL) {
2681        add_debug_info_for_null_check_here(op->info());
2682      }
2683      __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2684    } else {
2685      ShouldNotReachHere();
2686    }
2687
2688  } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2689    assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2690    assert(opr2->is_fpu_register(), "both must be registers");
2691    __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2692
2693  } else if (opr1->is_address() && opr2->is_constant()) {
2694    LIR_Const* c = opr2->as_constant_ptr();
2695#ifdef _LP64
2696    if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2697      assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2698      __ movoop(rscratch1, c->as_jobject());
2699    }
2700#endif // LP64
2701    if (op->info() != NULL) {
2702      add_debug_info_for_null_check_here(op->info());
2703    }
2704    // special case: address - constant
2705    LIR_Address* addr = opr1->as_address_ptr();
2706    if (c->type() == T_INT) {
2707      __ cmpl(as_Address(addr), c->as_jint());
2708    } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2709#ifdef _LP64
2710      // %%% Make this explode if addr isn't reachable until we figure out a
2711      // better strategy by giving noreg as the temp for as_Address
2712      __ cmpptr(rscratch1, as_Address(addr, noreg));
2713#else
2714      __ cmpoop(as_Address(addr), c->as_jobject());
2715#endif // _LP64
2716    } else {
2717      ShouldNotReachHere();
2718    }
2719
2720  } else {
2721    ShouldNotReachHere();
2722  }
2723}
2724
2725void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2726  if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2727    if (left->is_single_xmm()) {
2728      assert(right->is_single_xmm(), "must match");
2729      __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2730    } else if (left->is_double_xmm()) {
2731      assert(right->is_double_xmm(), "must match");
2732      __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2733
2734    } else {
2735      assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2736      assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2737
2738      assert(left->fpu() == 0, "left must be on TOS");
2739      __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2740                  op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2741    }
2742  } else {
2743    assert(code == lir_cmp_l2i, "check");
2744#ifdef _LP64
2745    Label done;
2746    Register dest = dst->as_register();
2747    __ cmpptr(left->as_register_lo(), right->as_register_lo());
2748    __ movl(dest, -1);
2749    __ jccb(Assembler::less, done);
2750    __ set_byte_if_not_zero(dest);
2751    __ movzbl(dest, dest);
2752    __ bind(done);
2753#else
2754    __ lcmp2int(left->as_register_hi(),
2755                left->as_register_lo(),
2756                right->as_register_hi(),
2757                right->as_register_lo());
2758    move_regs(left->as_register_hi(), dst->as_register());
2759#endif // _LP64
2760  }
2761}
2762
2763
2764void LIR_Assembler::align_call(LIR_Code code) {
2765  if (os::is_MP()) {
2766    // make sure that the displacement word of the call ends up word aligned
2767    int offset = __ offset();
2768    switch (code) {
2769      case lir_static_call:
2770      case lir_optvirtual_call:
2771      case lir_dynamic_call:
2772        offset += NativeCall::displacement_offset;
2773        break;
2774      case lir_icvirtual_call:
2775        offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2776      break;
2777      case lir_virtual_call:  // currently, sparc-specific for niagara
2778      default: ShouldNotReachHere();
2779    }
2780    __ align(BytesPerWord, offset);
2781  }
2782}
2783
2784
2785void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2786  assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2787         "must be aligned");
2788  __ call(AddressLiteral(op->addr(), rtype));
2789  add_call_info(code_offset(), op->info());
2790}
2791
2792
2793void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2794  __ ic_call(op->addr());
2795  add_call_info(code_offset(), op->info());
2796  assert(!os::is_MP() ||
2797         (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2798         "must be aligned");
2799}
2800
2801
2802/* Currently, vtable-dispatch is only enabled for sparc platforms */
2803void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2804  ShouldNotReachHere();
2805}
2806
2807
2808void LIR_Assembler::emit_static_call_stub() {
2809  address call_pc = __ pc();
2810  address stub = __ start_a_stub(call_stub_size());
2811  if (stub == NULL) {
2812    bailout("static call stub overflow");
2813    return;
2814  }
2815
2816  int start = __ offset();
2817  if (os::is_MP()) {
2818    // make sure that the displacement word of the call ends up word aligned
2819    __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2820  }
2821  __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
2822  __ mov_metadata(rbx, (Metadata*)NULL);
2823  // must be set to -1 at code generation time
2824  assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2825  // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2826  __ jump(RuntimeAddress(__ pc()));
2827
2828  if (UseAOT) {
2829    // Trampoline to aot code
2830    __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
2831#ifdef _LP64
2832    __ mov64(rax, CONST64(0));  // address is zapped till fixup time.
2833#else
2834    __ movl(rax, 0xdeadffff);  // address is zapped till fixup time.
2835#endif
2836    __ jmp(rax);
2837  }
2838  assert(__ offset() - start <= call_stub_size(), "stub too big");
2839  __ end_a_stub();
2840}
2841
2842
2843void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2844  assert(exceptionOop->as_register() == rax, "must match");
2845  assert(exceptionPC->as_register() == rdx, "must match");
2846
2847  // exception object is not added to oop map by LinearScan
2848  // (LinearScan assumes that no oops are in fixed registers)
2849  info->add_register_oop(exceptionOop);
2850  Runtime1::StubID unwind_id;
2851
2852  // get current pc information
2853  // pc is only needed if the method has an exception handler, the unwind code does not need it.
2854  int pc_for_athrow_offset = __ offset();
2855  InternalAddress pc_for_athrow(__ pc());
2856  __ lea(exceptionPC->as_register(), pc_for_athrow);
2857  add_call_info(pc_for_athrow_offset, info); // for exception handler
2858
2859  __ verify_not_null_oop(rax);
2860  // search an exception handler (rax: exception oop, rdx: throwing pc)
2861  if (compilation()->has_fpu_code()) {
2862    unwind_id = Runtime1::handle_exception_id;
2863  } else {
2864    unwind_id = Runtime1::handle_exception_nofpu_id;
2865  }
2866  __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2867
2868  // enough room for two byte trap
2869  __ nop();
2870}
2871
2872
2873void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2874  assert(exceptionOop->as_register() == rax, "must match");
2875
2876  __ jmp(_unwind_handler_entry);
2877}
2878
2879
2880void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2881
2882  // optimized version for linear scan:
2883  // * count must be already in ECX (guaranteed by LinearScan)
2884  // * left and dest must be equal
2885  // * tmp must be unused
2886  assert(count->as_register() == SHIFT_count, "count must be in ECX");
2887  assert(left == dest, "left and dest must be equal");
2888  assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2889
2890  if (left->is_single_cpu()) {
2891    Register value = left->as_register();
2892    assert(value != SHIFT_count, "left cannot be ECX");
2893
2894    switch (code) {
2895      case lir_shl:  __ shll(value); break;
2896      case lir_shr:  __ sarl(value); break;
2897      case lir_ushr: __ shrl(value); break;
2898      default: ShouldNotReachHere();
2899    }
2900  } else if (left->is_double_cpu()) {
2901    Register lo = left->as_register_lo();
2902    Register hi = left->as_register_hi();
2903    assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2904#ifdef _LP64
2905    switch (code) {
2906      case lir_shl:  __ shlptr(lo);        break;
2907      case lir_shr:  __ sarptr(lo);        break;
2908      case lir_ushr: __ shrptr(lo);        break;
2909      default: ShouldNotReachHere();
2910    }
2911#else
2912
2913    switch (code) {
2914      case lir_shl:  __ lshl(hi, lo);        break;
2915      case lir_shr:  __ lshr(hi, lo, true);  break;
2916      case lir_ushr: __ lshr(hi, lo, false); break;
2917      default: ShouldNotReachHere();
2918    }
2919#endif // LP64
2920  } else {
2921    ShouldNotReachHere();
2922  }
2923}
2924
2925
2926void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2927  if (dest->is_single_cpu()) {
2928    // first move left into dest so that left is not destroyed by the shift
2929    Register value = dest->as_register();
2930    count = count & 0x1F; // Java spec
2931
2932    move_regs(left->as_register(), value);
2933    switch (code) {
2934      case lir_shl:  __ shll(value, count); break;
2935      case lir_shr:  __ sarl(value, count); break;
2936      case lir_ushr: __ shrl(value, count); break;
2937      default: ShouldNotReachHere();
2938    }
2939  } else if (dest->is_double_cpu()) {
2940#ifndef _LP64
2941    Unimplemented();
2942#else
2943    // first move left into dest so that left is not destroyed by the shift
2944    Register value = dest->as_register_lo();
2945    count = count & 0x1F; // Java spec
2946
2947    move_regs(left->as_register_lo(), value);
2948    switch (code) {
2949      case lir_shl:  __ shlptr(value, count); break;
2950      case lir_shr:  __ sarptr(value, count); break;
2951      case lir_ushr: __ shrptr(value, count); break;
2952      default: ShouldNotReachHere();
2953    }
2954#endif // _LP64
2955  } else {
2956    ShouldNotReachHere();
2957  }
2958}
2959
2960
2961void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2962  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2963  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2964  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2965  __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2966}
2967
2968
2969void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2970  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2971  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2972  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2973  __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2974}
2975
2976
2977void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2978  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2979  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2980  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2981  __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
2982}
2983
2984
2985void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
2986  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2987  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2988  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2989  __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
2990}
2991
2992
2993// This code replaces a call to arraycopy; no exception may
2994// be thrown in this code, they must be thrown in the System.arraycopy
2995// activation frame; we could save some checks if this would not be the case
2996void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2997  ciArrayKlass* default_type = op->expected_type();
2998  Register src = op->src()->as_register();
2999  Register dst = op->dst()->as_register();
3000  Register src_pos = op->src_pos()->as_register();
3001  Register dst_pos = op->dst_pos()->as_register();
3002  Register length  = op->length()->as_register();
3003  Register tmp = op->tmp()->as_register();
3004
3005  CodeStub* stub = op->stub();
3006  int flags = op->flags();
3007  BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3008  if (basic_type == T_ARRAY) basic_type = T_OBJECT;
3009
3010  // if we don't know anything, just go through the generic arraycopy
3011  if (default_type == NULL) {
3012    Label done;
3013    // save outgoing arguments on stack in case call to System.arraycopy is needed
3014    // HACK ALERT. This code used to push the parameters in a hardwired fashion
3015    // for interpreter calling conventions. Now we have to do it in new style conventions.
3016    // For the moment until C1 gets the new register allocator I just force all the
3017    // args to the right place (except the register args) and then on the back side
3018    // reload the register args properly if we go slow path. Yuck
3019
3020    // These are proper for the calling convention
3021    store_parameter(length, 2);
3022    store_parameter(dst_pos, 1);
3023    store_parameter(dst, 0);
3024
3025    // these are just temporary placements until we need to reload
3026    store_parameter(src_pos, 3);
3027    store_parameter(src, 4);
3028    NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3029
3030    address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
3031
3032    address copyfunc_addr = StubRoutines::generic_arraycopy();
3033
3034    // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3035#ifdef _LP64
3036    // The arguments are in java calling convention so we can trivially shift them to C
3037    // convention
3038    assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3039    __ mov(c_rarg0, j_rarg0);
3040    assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3041    __ mov(c_rarg1, j_rarg1);
3042    assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3043    __ mov(c_rarg2, j_rarg2);
3044    assert_different_registers(c_rarg3, j_rarg4);
3045    __ mov(c_rarg3, j_rarg3);
3046#ifdef _WIN64
3047    // Allocate abi space for args but be sure to keep stack aligned
3048    __ subptr(rsp, 6*wordSize);
3049    store_parameter(j_rarg4, 4);
3050    if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3051      __ call(RuntimeAddress(C_entry));
3052    } else {
3053#ifndef PRODUCT
3054      if (PrintC1Statistics) {
3055        __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3056      }
3057#endif
3058      __ call(RuntimeAddress(copyfunc_addr));
3059    }
3060    __ addptr(rsp, 6*wordSize);
3061#else
3062    __ mov(c_rarg4, j_rarg4);
3063    if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3064      __ call(RuntimeAddress(C_entry));
3065    } else {
3066#ifndef PRODUCT
3067      if (PrintC1Statistics) {
3068        __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3069      }
3070#endif
3071      __ call(RuntimeAddress(copyfunc_addr));
3072    }
3073#endif // _WIN64
3074#else
3075    __ push(length);
3076    __ push(dst_pos);
3077    __ push(dst);
3078    __ push(src_pos);
3079    __ push(src);
3080
3081    if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3082      __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
3083    } else {
3084#ifndef PRODUCT
3085      if (PrintC1Statistics) {
3086        __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3087      }
3088#endif
3089      __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3090    }
3091
3092#endif // _LP64
3093
3094    __ cmpl(rax, 0);
3095    __ jcc(Assembler::equal, *stub->continuation());
3096
3097    if (copyfunc_addr != NULL) {
3098      __ mov(tmp, rax);
3099      __ xorl(tmp, -1);
3100    }
3101
3102    // Reload values from the stack so they are where the stub
3103    // expects them.
3104    __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3105    __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3106    __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3107    __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3108    __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3109
3110    if (copyfunc_addr != NULL) {
3111      __ subl(length, tmp);
3112      __ addl(src_pos, tmp);
3113      __ addl(dst_pos, tmp);
3114    }
3115    __ jmp(*stub->entry());
3116
3117    __ bind(*stub->continuation());
3118    return;
3119  }
3120
3121  assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3122
3123  int elem_size = type2aelembytes(basic_type);
3124  Address::ScaleFactor scale;
3125
3126  switch (elem_size) {
3127    case 1 :
3128      scale = Address::times_1;
3129      break;
3130    case 2 :
3131      scale = Address::times_2;
3132      break;
3133    case 4 :
3134      scale = Address::times_4;
3135      break;
3136    case 8 :
3137      scale = Address::times_8;
3138      break;
3139    default:
3140      scale = Address::no_scale;
3141      ShouldNotReachHere();
3142  }
3143
3144  Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3145  Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3146  Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3147  Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3148
3149  // length and pos's are all sign extended at this point on 64bit
3150
3151  // test for NULL
3152  if (flags & LIR_OpArrayCopy::src_null_check) {
3153    __ testptr(src, src);
3154    __ jcc(Assembler::zero, *stub->entry());
3155  }
3156  if (flags & LIR_OpArrayCopy::dst_null_check) {
3157    __ testptr(dst, dst);
3158    __ jcc(Assembler::zero, *stub->entry());
3159  }
3160
3161  // If the compiler was not able to prove that exact type of the source or the destination
3162  // of the arraycopy is an array type, check at runtime if the source or the destination is
3163  // an instance type.
3164  if (flags & LIR_OpArrayCopy::type_check) {
3165    if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3166      __ load_klass(tmp, dst);
3167      __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3168      __ jcc(Assembler::greaterEqual, *stub->entry());
3169    }
3170
3171    if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3172      __ load_klass(tmp, src);
3173      __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3174      __ jcc(Assembler::greaterEqual, *stub->entry());
3175    }
3176  }
3177
3178  // check if negative
3179  if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3180    __ testl(src_pos, src_pos);
3181    __ jcc(Assembler::less, *stub->entry());
3182  }
3183  if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3184    __ testl(dst_pos, dst_pos);
3185    __ jcc(Assembler::less, *stub->entry());
3186  }
3187
3188  if (flags & LIR_OpArrayCopy::src_range_check) {
3189    __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3190    __ cmpl(tmp, src_length_addr);
3191    __ jcc(Assembler::above, *stub->entry());
3192  }
3193  if (flags & LIR_OpArrayCopy::dst_range_check) {
3194    __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3195    __ cmpl(tmp, dst_length_addr);
3196    __ jcc(Assembler::above, *stub->entry());
3197  }
3198
3199  if (flags & LIR_OpArrayCopy::length_positive_check) {
3200    __ testl(length, length);
3201    __ jcc(Assembler::less, *stub->entry());
3202  }
3203
3204#ifdef _LP64
3205  __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3206  __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3207#endif
3208
3209  if (flags & LIR_OpArrayCopy::type_check) {
3210    // We don't know the array types are compatible
3211    if (basic_type != T_OBJECT) {
3212      // Simple test for basic type arrays
3213      if (UseCompressedClassPointers) {
3214        __ movl(tmp, src_klass_addr);
3215        __ cmpl(tmp, dst_klass_addr);
3216      } else {
3217        __ movptr(tmp, src_klass_addr);
3218        __ cmpptr(tmp, dst_klass_addr);
3219      }
3220      __ jcc(Assembler::notEqual, *stub->entry());
3221    } else {
3222      // For object arrays, if src is a sub class of dst then we can
3223      // safely do the copy.
3224      Label cont, slow;
3225
3226      __ push(src);
3227      __ push(dst);
3228
3229      __ load_klass(src, src);
3230      __ load_klass(dst, dst);
3231
3232      __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3233
3234      __ push(src);
3235      __ push(dst);
3236      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3237      __ pop(dst);
3238      __ pop(src);
3239
3240      __ cmpl(src, 0);
3241      __ jcc(Assembler::notEqual, cont);
3242
3243      __ bind(slow);
3244      __ pop(dst);
3245      __ pop(src);
3246
3247      address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3248      if (copyfunc_addr != NULL) { // use stub if available
3249        // src is not a sub class of dst so we have to do a
3250        // per-element check.
3251
3252        int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3253        if ((flags & mask) != mask) {
3254          // Check that at least both of them object arrays.
3255          assert(flags & mask, "one of the two should be known to be an object array");
3256
3257          if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3258            __ load_klass(tmp, src);
3259          } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3260            __ load_klass(tmp, dst);
3261          }
3262          int lh_offset = in_bytes(Klass::layout_helper_offset());
3263          Address klass_lh_addr(tmp, lh_offset);
3264          jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3265          __ cmpl(klass_lh_addr, objArray_lh);
3266          __ jcc(Assembler::notEqual, *stub->entry());
3267        }
3268
3269       // Spill because stubs can use any register they like and it's
3270       // easier to restore just those that we care about.
3271       store_parameter(dst, 0);
3272       store_parameter(dst_pos, 1);
3273       store_parameter(length, 2);
3274       store_parameter(src_pos, 3);
3275       store_parameter(src, 4);
3276
3277#ifndef _LP64
3278        __ movptr(tmp, dst_klass_addr);
3279        __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3280        __ push(tmp);
3281        __ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3282        __ push(tmp);
3283        __ push(length);
3284        __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3285        __ push(tmp);
3286        __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3287        __ push(tmp);
3288
3289        __ call_VM_leaf(copyfunc_addr, 5);
3290#else
3291        __ movl2ptr(length, length); //higher 32bits must be null
3292
3293        __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3294        assert_different_registers(c_rarg0, dst, dst_pos, length);
3295        __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3296        assert_different_registers(c_rarg1, dst, length);
3297
3298        __ mov(c_rarg2, length);
3299        assert_different_registers(c_rarg2, dst);
3300
3301#ifdef _WIN64
3302        // Allocate abi space for args but be sure to keep stack aligned
3303        __ subptr(rsp, 6*wordSize);
3304        __ load_klass(c_rarg3, dst);
3305        __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3306        store_parameter(c_rarg3, 4);
3307        __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3308        __ call(RuntimeAddress(copyfunc_addr));
3309        __ addptr(rsp, 6*wordSize);
3310#else
3311        __ load_klass(c_rarg4, dst);
3312        __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3313        __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3314        __ call(RuntimeAddress(copyfunc_addr));
3315#endif
3316
3317#endif
3318
3319#ifndef PRODUCT
3320        if (PrintC1Statistics) {
3321          Label failed;
3322          __ testl(rax, rax);
3323          __ jcc(Assembler::notZero, failed);
3324          __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3325          __ bind(failed);
3326        }
3327#endif
3328
3329        __ testl(rax, rax);
3330        __ jcc(Assembler::zero, *stub->continuation());
3331
3332#ifndef PRODUCT
3333        if (PrintC1Statistics) {
3334          __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3335        }
3336#endif
3337
3338        __ mov(tmp, rax);
3339
3340        __ xorl(tmp, -1);
3341
3342        // Restore previously spilled arguments
3343        __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3344        __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3345        __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3346        __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3347        __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3348
3349
3350        __ subl(length, tmp);
3351        __ addl(src_pos, tmp);
3352        __ addl(dst_pos, tmp);
3353      }
3354
3355      __ jmp(*stub->entry());
3356
3357      __ bind(cont);
3358      __ pop(dst);
3359      __ pop(src);
3360    }
3361  }
3362
3363#ifdef ASSERT
3364  if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3365    // Sanity check the known type with the incoming class.  For the
3366    // primitive case the types must match exactly with src.klass and
3367    // dst.klass each exactly matching the default type.  For the
3368    // object array case, if no type check is needed then either the
3369    // dst type is exactly the expected type and the src type is a
3370    // subtype which we can't check or src is the same array as dst
3371    // but not necessarily exactly of type default_type.
3372    Label known_ok, halt;
3373    __ mov_metadata(tmp, default_type->constant_encoding());
3374#ifdef _LP64
3375    if (UseCompressedClassPointers) {
3376      __ encode_klass_not_null(tmp);
3377    }
3378#endif
3379
3380    if (basic_type != T_OBJECT) {
3381
3382      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
3383      else                   __ cmpptr(tmp, dst_klass_addr);
3384      __ jcc(Assembler::notEqual, halt);
3385      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
3386      else                   __ cmpptr(tmp, src_klass_addr);
3387      __ jcc(Assembler::equal, known_ok);
3388    } else {
3389      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
3390      else                   __ cmpptr(tmp, dst_klass_addr);
3391      __ jcc(Assembler::equal, known_ok);
3392      __ cmpptr(src, dst);
3393      __ jcc(Assembler::equal, known_ok);
3394    }
3395    __ bind(halt);
3396    __ stop("incorrect type information in arraycopy");
3397    __ bind(known_ok);
3398  }
3399#endif
3400
3401#ifndef PRODUCT
3402  if (PrintC1Statistics) {
3403    __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3404  }
3405#endif
3406
3407#ifdef _LP64
3408  assert_different_registers(c_rarg0, dst, dst_pos, length);
3409  __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3410  assert_different_registers(c_rarg1, length);
3411  __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3412  __ mov(c_rarg2, length);
3413
3414#else
3415  __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3416  store_parameter(tmp, 0);
3417  __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3418  store_parameter(tmp, 1);
3419  store_parameter(length, 2);
3420#endif // _LP64
3421
3422  bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3423  bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3424  const char *name;
3425  address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3426  __ call_VM_leaf(entry, 0);
3427
3428  __ bind(*stub->continuation());
3429}
3430
3431void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3432  assert(op->crc()->is_single_cpu(),  "crc must be register");
3433  assert(op->val()->is_single_cpu(),  "byte value must be register");
3434  assert(op->result_opr()->is_single_cpu(), "result must be register");
3435  Register crc = op->crc()->as_register();
3436  Register val = op->val()->as_register();
3437  Register res = op->result_opr()->as_register();
3438
3439  assert_different_registers(val, crc, res);
3440
3441  __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3442  __ notl(crc); // ~crc
3443  __ update_byte_crc32(crc, val, res);
3444  __ notl(crc); // ~crc
3445  __ mov(res, crc);
3446}
3447
3448void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3449  Register obj = op->obj_opr()->as_register();  // may not be an oop
3450  Register hdr = op->hdr_opr()->as_register();
3451  Register lock = op->lock_opr()->as_register();
3452  if (!UseFastLocking) {
3453    __ jmp(*op->stub()->entry());
3454  } else if (op->code() == lir_lock) {
3455    Register scratch = noreg;
3456    if (UseBiasedLocking) {
3457      scratch = op->scratch_opr()->as_register();
3458    }
3459    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3460    // add debug info for NullPointerException only if one is possible
3461    int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3462    if (op->info() != NULL) {
3463      add_debug_info_for_null_check(null_check_offset, op->info());
3464    }
3465    // done
3466  } else if (op->code() == lir_unlock) {
3467    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3468    __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3469  } else {
3470    Unimplemented();
3471  }
3472  __ bind(*op->stub()->continuation());
3473}
3474
3475
3476void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3477  ciMethod* method = op->profiled_method();
3478  int bci          = op->profiled_bci();
3479  ciMethod* callee = op->profiled_callee();
3480
3481  // Update counter for all call types
3482  ciMethodData* md = method->method_data_or_null();
3483  assert(md != NULL, "Sanity");
3484  ciProfileData* data = md->bci_to_data(bci);
3485  assert(data->is_CounterData(), "need CounterData for calls");
3486  assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
3487  Register mdo  = op->mdo()->as_register();
3488  __ mov_metadata(mdo, md->constant_encoding());
3489  Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3490  Bytecodes::Code bc = method->java_code_at_bci(bci);
3491  const bool callee_is_static = callee->is_loaded() && callee->is_static();
3492  // Perform additional virtual call profiling for invokevirtual and
3493  // invokeinterface bytecodes
3494  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3495      !callee_is_static &&  // required for optimized MH invokes
3496      C1ProfileVirtualCalls) {
3497    assert(op->recv()->is_single_cpu(), "recv must be allocated");
3498    Register recv = op->recv()->as_register();
3499    assert_different_registers(mdo, recv);
3500    assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3501    ciKlass* known_klass = op->known_holder();
3502    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3503      // We know the type that will be seen at this call site; we can
3504      // statically update the MethodData* rather than needing to do
3505      // dynamic tests on the receiver type
3506
3507      // NOTE: we should probably put a lock around this search to
3508      // avoid collisions by concurrent compilations
3509      ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3510      uint i;
3511      for (i = 0; i < VirtualCallData::row_limit(); i++) {
3512        ciKlass* receiver = vc_data->receiver(i);
3513        if (known_klass->equals(receiver)) {
3514          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3515          __ addptr(data_addr, DataLayout::counter_increment);
3516          return;
3517        }
3518      }
3519
3520      // Receiver type not found in profile data; select an empty slot
3521
3522      // Note that this is less efficient than it should be because it
3523      // always does a write to the receiver part of the
3524      // VirtualCallData rather than just the first time
3525      for (i = 0; i < VirtualCallData::row_limit(); i++) {
3526        ciKlass* receiver = vc_data->receiver(i);
3527        if (receiver == NULL) {
3528          Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3529          __ mov_metadata(recv_addr, known_klass->constant_encoding());
3530          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3531          __ addptr(data_addr, DataLayout::counter_increment);
3532          return;
3533        }
3534      }
3535    } else {
3536      __ load_klass(recv, recv);
3537      Label update_done;
3538      type_profile_helper(mdo, md, data, recv, &update_done);
3539      // Receiver did not match any saved receiver and there is no empty row for it.
3540      // Increment total counter to indicate polymorphic case.
3541      __ addptr(counter_addr, DataLayout::counter_increment);
3542
3543      __ bind(update_done);
3544    }
3545  } else {
3546    // Static call
3547    __ addptr(counter_addr, DataLayout::counter_increment);
3548  }
3549}
3550
3551void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3552  Register obj = op->obj()->as_register();
3553  Register tmp = op->tmp()->as_pointer_register();
3554  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3555  ciKlass* exact_klass = op->exact_klass();
3556  intptr_t current_klass = op->current_klass();
3557  bool not_null = op->not_null();
3558  bool no_conflict = op->no_conflict();
3559
3560  Label update, next, none;
3561
3562  bool do_null = !not_null;
3563  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3564  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3565
3566  assert(do_null || do_update, "why are we here?");
3567  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3568
3569  __ verify_oop(obj);
3570
3571  if (tmp != obj) {
3572    __ mov(tmp, obj);
3573  }
3574  if (do_null) {
3575    __ testptr(tmp, tmp);
3576    __ jccb(Assembler::notZero, update);
3577    if (!TypeEntries::was_null_seen(current_klass)) {
3578      __ orptr(mdo_addr, TypeEntries::null_seen);
3579    }
3580    if (do_update) {
3581#ifndef ASSERT
3582      __ jmpb(next);
3583    }
3584#else
3585      __ jmp(next);
3586    }
3587  } else {
3588    __ testptr(tmp, tmp);
3589    __ jccb(Assembler::notZero, update);
3590    __ stop("unexpect null obj");
3591#endif
3592  }
3593
3594  __ bind(update);
3595
3596  if (do_update) {
3597#ifdef ASSERT
3598    if (exact_klass != NULL) {
3599      Label ok;
3600      __ load_klass(tmp, tmp);
3601      __ push(tmp);
3602      __ mov_metadata(tmp, exact_klass->constant_encoding());
3603      __ cmpptr(tmp, Address(rsp, 0));
3604      __ jccb(Assembler::equal, ok);
3605      __ stop("exact klass and actual klass differ");
3606      __ bind(ok);
3607      __ pop(tmp);
3608    }
3609#endif
3610    if (!no_conflict) {
3611      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3612        if (exact_klass != NULL) {
3613          __ mov_metadata(tmp, exact_klass->constant_encoding());
3614        } else {
3615          __ load_klass(tmp, tmp);
3616        }
3617
3618        __ xorptr(tmp, mdo_addr);
3619        __ testptr(tmp, TypeEntries::type_klass_mask);
3620        // klass seen before, nothing to do. The unknown bit may have been
3621        // set already but no need to check.
3622        __ jccb(Assembler::zero, next);
3623
3624        __ testptr(tmp, TypeEntries::type_unknown);
3625        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3626
3627        if (TypeEntries::is_type_none(current_klass)) {
3628          __ cmpptr(mdo_addr, 0);
3629          __ jccb(Assembler::equal, none);
3630          __ cmpptr(mdo_addr, TypeEntries::null_seen);
3631          __ jccb(Assembler::equal, none);
3632          // There is a chance that the checks above (re-reading profiling
3633          // data from memory) fail if another thread has just set the
3634          // profiling to this obj's klass
3635          __ xorptr(tmp, mdo_addr);
3636          __ testptr(tmp, TypeEntries::type_klass_mask);
3637          __ jccb(Assembler::zero, next);
3638        }
3639      } else {
3640        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3641               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3642
3643        __ movptr(tmp, mdo_addr);
3644        __ testptr(tmp, TypeEntries::type_unknown);
3645        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3646      }
3647
3648      // different than before. Cannot keep accurate profile.
3649      __ orptr(mdo_addr, TypeEntries::type_unknown);
3650
3651      if (TypeEntries::is_type_none(current_klass)) {
3652        __ jmpb(next);
3653
3654        __ bind(none);
3655        // first time here. Set profile type.
3656        __ movptr(mdo_addr, tmp);
3657      }
3658    } else {
3659      // There's a single possible klass at this profile point
3660      assert(exact_klass != NULL, "should be");
3661      if (TypeEntries::is_type_none(current_klass)) {
3662        __ mov_metadata(tmp, exact_klass->constant_encoding());
3663        __ xorptr(tmp, mdo_addr);
3664        __ testptr(tmp, TypeEntries::type_klass_mask);
3665#ifdef ASSERT
3666        __ jcc(Assembler::zero, next);
3667
3668        {
3669          Label ok;
3670          __ push(tmp);
3671          __ cmpptr(mdo_addr, 0);
3672          __ jcc(Assembler::equal, ok);
3673          __ cmpptr(mdo_addr, TypeEntries::null_seen);
3674          __ jcc(Assembler::equal, ok);
3675          // may have been set by another thread
3676          __ mov_metadata(tmp, exact_klass->constant_encoding());
3677          __ xorptr(tmp, mdo_addr);
3678          __ testptr(tmp, TypeEntries::type_mask);
3679          __ jcc(Assembler::zero, ok);
3680
3681          __ stop("unexpected profiling mismatch");
3682          __ bind(ok);
3683          __ pop(tmp);
3684        }
3685#else
3686        __ jccb(Assembler::zero, next);
3687#endif
3688        // first time here. Set profile type.
3689        __ movptr(mdo_addr, tmp);
3690      } else {
3691        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3692               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3693
3694        __ movptr(tmp, mdo_addr);
3695        __ testptr(tmp, TypeEntries::type_unknown);
3696        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3697
3698        __ orptr(mdo_addr, TypeEntries::type_unknown);
3699      }
3700    }
3701
3702    __ bind(next);
3703  }
3704}
3705
3706void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3707  Unimplemented();
3708}
3709
3710
3711void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3712  __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3713}
3714
3715
3716void LIR_Assembler::align_backward_branch_target() {
3717  __ align(BytesPerWord);
3718}
3719
3720
3721void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3722  if (left->is_single_cpu()) {
3723    __ negl(left->as_register());
3724    move_regs(left->as_register(), dest->as_register());
3725
3726  } else if (left->is_double_cpu()) {
3727    Register lo = left->as_register_lo();
3728#ifdef _LP64
3729    Register dst = dest->as_register_lo();
3730    __ movptr(dst, lo);
3731    __ negptr(dst);
3732#else
3733    Register hi = left->as_register_hi();
3734    __ lneg(hi, lo);
3735    if (dest->as_register_lo() == hi) {
3736      assert(dest->as_register_hi() != lo, "destroying register");
3737      move_regs(hi, dest->as_register_hi());
3738      move_regs(lo, dest->as_register_lo());
3739    } else {
3740      move_regs(lo, dest->as_register_lo());
3741      move_regs(hi, dest->as_register_hi());
3742    }
3743#endif // _LP64
3744
3745  } else if (dest->is_single_xmm()) {
3746    if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3747      __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3748    }
3749    if (UseAVX > 0) {
3750      __ vnegatess(dest->as_xmm_float_reg(), dest->as_xmm_float_reg(),
3751                   ExternalAddress((address)float_signflip_pool));
3752    } else {
3753      __ xorps(dest->as_xmm_float_reg(),
3754               ExternalAddress((address)float_signflip_pool));
3755    }
3756  } else if (dest->is_double_xmm()) {
3757    if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3758      __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3759    }
3760    if (UseAVX > 0) {
3761      __ vnegatesd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg(),
3762                   ExternalAddress((address)double_signflip_pool));
3763    } else {
3764      __ xorpd(dest->as_xmm_double_reg(),
3765               ExternalAddress((address)double_signflip_pool));
3766    }
3767  } else if (left->is_single_fpu() || left->is_double_fpu()) {
3768    assert(left->fpu() == 0, "arg must be on TOS");
3769    assert(dest->fpu() == 0, "dest must be TOS");
3770    __ fchs();
3771
3772  } else {
3773    ShouldNotReachHere();
3774  }
3775}
3776
3777
3778void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
3779  assert(addr->is_address() && dest->is_register(), "check");
3780  Register reg;
3781  reg = dest->as_pointer_register();
3782  __ lea(reg, as_Address(addr->as_address_ptr()));
3783}
3784
3785
3786
3787void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3788  assert(!tmp->is_valid(), "don't need temporary");
3789  __ call(RuntimeAddress(dest));
3790  if (info != NULL) {
3791    add_call_info_here(info);
3792  }
3793}
3794
3795
3796void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3797  assert(type == T_LONG, "only for volatile long fields");
3798
3799  if (info != NULL) {
3800    add_debug_info_for_null_check_here(info);
3801  }
3802
3803  if (src->is_double_xmm()) {
3804    if (dest->is_double_cpu()) {
3805#ifdef _LP64
3806      __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3807#else
3808      __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3809      __ psrlq(src->as_xmm_double_reg(), 32);
3810      __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3811#endif // _LP64
3812    } else if (dest->is_double_stack()) {
3813      __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3814    } else if (dest->is_address()) {
3815      __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3816    } else {
3817      ShouldNotReachHere();
3818    }
3819
3820  } else if (dest->is_double_xmm()) {
3821    if (src->is_double_stack()) {
3822      __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3823    } else if (src->is_address()) {
3824      __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3825    } else {
3826      ShouldNotReachHere();
3827    }
3828
3829  } else if (src->is_double_fpu()) {
3830    assert(src->fpu_regnrLo() == 0, "must be TOS");
3831    if (dest->is_double_stack()) {
3832      __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3833    } else if (dest->is_address()) {
3834      __ fistp_d(as_Address(dest->as_address_ptr()));
3835    } else {
3836      ShouldNotReachHere();
3837    }
3838
3839  } else if (dest->is_double_fpu()) {
3840    assert(dest->fpu_regnrLo() == 0, "must be TOS");
3841    if (src->is_double_stack()) {
3842      __ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3843    } else if (src->is_address()) {
3844      __ fild_d(as_Address(src->as_address_ptr()));
3845    } else {
3846      ShouldNotReachHere();
3847    }
3848  } else {
3849    ShouldNotReachHere();
3850  }
3851}
3852
3853#ifdef ASSERT
3854// emit run-time assertion
3855void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3856  assert(op->code() == lir_assert, "must be");
3857
3858  if (op->in_opr1()->is_valid()) {
3859    assert(op->in_opr2()->is_valid(), "both operands must be valid");
3860    comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3861  } else {
3862    assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3863    assert(op->condition() == lir_cond_always, "no other conditions allowed");
3864  }
3865
3866  Label ok;
3867  if (op->condition() != lir_cond_always) {
3868    Assembler::Condition acond = Assembler::zero;
3869    switch (op->condition()) {
3870      case lir_cond_equal:        acond = Assembler::equal;       break;
3871      case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
3872      case lir_cond_less:         acond = Assembler::less;        break;
3873      case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
3874      case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3875      case lir_cond_greater:      acond = Assembler::greater;     break;
3876      case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
3877      case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
3878      default:                    ShouldNotReachHere();
3879    }
3880    __ jcc(acond, ok);
3881  }
3882  if (op->halt()) {
3883    const char* str = __ code_string(op->msg());
3884    __ stop(str);
3885  } else {
3886    breakpoint();
3887  }
3888  __ bind(ok);
3889}
3890#endif
3891
3892void LIR_Assembler::membar() {
3893  // QQQ sparc TSO uses this,
3894  __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3895}
3896
3897void LIR_Assembler::membar_acquire() {
3898  // No x86 machines currently require load fences
3899}
3900
3901void LIR_Assembler::membar_release() {
3902  // No x86 machines currently require store fences
3903}
3904
3905void LIR_Assembler::membar_loadload() {
3906  // no-op
3907  //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3908}
3909
3910void LIR_Assembler::membar_storestore() {
3911  // no-op
3912  //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3913}
3914
3915void LIR_Assembler::membar_loadstore() {
3916  // no-op
3917  //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3918}
3919
3920void LIR_Assembler::membar_storeload() {
3921  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3922}
3923
3924void LIR_Assembler::on_spin_wait() {
3925  __ pause ();
3926}
3927
3928void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3929  assert(result_reg->is_register(), "check");
3930#ifdef _LP64
3931  // __ get_thread(result_reg->as_register_lo());
3932  __ mov(result_reg->as_register(), r15_thread);
3933#else
3934  __ get_thread(result_reg->as_register());
3935#endif // _LP64
3936}
3937
3938
3939void LIR_Assembler::peephole(LIR_List*) {
3940  // do nothing for now
3941}
3942
3943void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3944  assert(data == dest, "xchg/xadd uses only 2 operands");
3945
3946  if (data->type() == T_INT) {
3947    if (code == lir_xadd) {
3948      if (os::is_MP()) {
3949        __ lock();
3950      }
3951      __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3952    } else {
3953      __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3954    }
3955  } else if (data->is_oop()) {
3956    assert (code == lir_xchg, "xadd for oops");
3957    Register obj = data->as_register();
3958#ifdef _LP64
3959    if (UseCompressedOops) {
3960      __ encode_heap_oop(obj);
3961      __ xchgl(obj, as_Address(src->as_address_ptr()));
3962      __ decode_heap_oop(obj);
3963    } else {
3964      __ xchgptr(obj, as_Address(src->as_address_ptr()));
3965    }
3966#else
3967    __ xchgl(obj, as_Address(src->as_address_ptr()));
3968#endif
3969  } else if (data->type() == T_LONG) {
3970#ifdef _LP64
3971    assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3972    if (code == lir_xadd) {
3973      if (os::is_MP()) {
3974        __ lock();
3975      }
3976      __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3977    } else {
3978      __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3979    }
3980#else
3981    ShouldNotReachHere();
3982#endif
3983  } else {
3984    ShouldNotReachHere();
3985  }
3986}
3987
3988#undef __
3989