c1_LIRAssembler_x86.cpp revision 9450:b57d415bdaa9
1/*
2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "asm/macroAssembler.inline.hpp"
28#include "c1/c1_Compilation.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_MacroAssembler.hpp"
31#include "c1/c1_Runtime1.hpp"
32#include "c1/c1_ValueStack.hpp"
33#include "ci/ciArrayKlass.hpp"
34#include "ci/ciInstance.hpp"
35#include "gc/shared/barrierSet.hpp"
36#include "gc/shared/cardTableModRefBS.hpp"
37#include "gc/shared/collectedHeap.hpp"
38#include "nativeInst_x86.hpp"
39#include "oops/objArrayKlass.hpp"
40#include "runtime/sharedRuntime.hpp"
41#include "vmreg_x86.inline.hpp"
42
43
44// These masks are used to provide 128-bit aligned bitmasks to the XMM
45// instructions, to allow sign-masking or sign-bit flipping.  They allow
46// fast versions of NegF/NegD and AbsF/AbsD.
47
48// Note: 'double' and 'long long' have 32-bits alignment on x86.
49static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
50  // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
51  // of 128-bits operands for SSE instructions.
52  jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
53  // Store the value to a 128-bits operand.
54  operand[0] = lo;
55  operand[1] = hi;
56  return operand;
57}
58
59// Buffer for 128-bits masks used by SSE instructions.
60static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
61
62// Static initialization during VM startup.
63static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
64static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
65static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
66static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
67
68
69
70NEEDS_CLEANUP // remove this definitions ?
71const Register IC_Klass    = rax;   // where the IC klass is cached
72const Register SYNC_header = rax;   // synchronization header
73const Register SHIFT_count = rcx;   // where count for shift operations must be
74
75#define __ _masm->
76
77
78static void select_different_registers(Register preserve,
79                                       Register extra,
80                                       Register &tmp1,
81                                       Register &tmp2) {
82  if (tmp1 == preserve) {
83    assert_different_registers(tmp1, tmp2, extra);
84    tmp1 = extra;
85  } else if (tmp2 == preserve) {
86    assert_different_registers(tmp1, tmp2, extra);
87    tmp2 = extra;
88  }
89  assert_different_registers(preserve, tmp1, tmp2);
90}
91
92
93
94static void select_different_registers(Register preserve,
95                                       Register extra,
96                                       Register &tmp1,
97                                       Register &tmp2,
98                                       Register &tmp3) {
99  if (tmp1 == preserve) {
100    assert_different_registers(tmp1, tmp2, tmp3, extra);
101    tmp1 = extra;
102  } else if (tmp2 == preserve) {
103    assert_different_registers(tmp1, tmp2, tmp3, extra);
104    tmp2 = extra;
105  } else if (tmp3 == preserve) {
106    assert_different_registers(tmp1, tmp2, tmp3, extra);
107    tmp3 = extra;
108  }
109  assert_different_registers(preserve, tmp1, tmp2, tmp3);
110}
111
112
113
114bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
115  if (opr->is_constant()) {
116    LIR_Const* constant = opr->as_constant_ptr();
117    switch (constant->type()) {
118      case T_INT: {
119        return true;
120      }
121
122      default:
123        return false;
124    }
125  }
126  return false;
127}
128
129
130LIR_Opr LIR_Assembler::receiverOpr() {
131  return FrameMap::receiver_opr;
132}
133
134LIR_Opr LIR_Assembler::osrBufferPointer() {
135  return FrameMap::as_pointer_opr(receiverOpr()->as_register());
136}
137
138//--------------fpu register translations-----------------------
139
140
141address LIR_Assembler::float_constant(float f) {
142  address const_addr = __ float_constant(f);
143  if (const_addr == NULL) {
144    bailout("const section overflow");
145    return __ code()->consts()->start();
146  } else {
147    return const_addr;
148  }
149}
150
151
152address LIR_Assembler::double_constant(double d) {
153  address const_addr = __ double_constant(d);
154  if (const_addr == NULL) {
155    bailout("const section overflow");
156    return __ code()->consts()->start();
157  } else {
158    return const_addr;
159  }
160}
161
162
163void LIR_Assembler::set_24bit_FPU() {
164  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
165}
166
167void LIR_Assembler::reset_FPU() {
168  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
169}
170
171void LIR_Assembler::fpop() {
172  __ fpop();
173}
174
175void LIR_Assembler::fxch(int i) {
176  __ fxch(i);
177}
178
179void LIR_Assembler::fld(int i) {
180  __ fld_s(i);
181}
182
183void LIR_Assembler::ffree(int i) {
184  __ ffree(i);
185}
186
187void LIR_Assembler::breakpoint() {
188  __ int3();
189}
190
191void LIR_Assembler::push(LIR_Opr opr) {
192  if (opr->is_single_cpu()) {
193    __ push_reg(opr->as_register());
194  } else if (opr->is_double_cpu()) {
195    NOT_LP64(__ push_reg(opr->as_register_hi()));
196    __ push_reg(opr->as_register_lo());
197  } else if (opr->is_stack()) {
198    __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
199  } else if (opr->is_constant()) {
200    LIR_Const* const_opr = opr->as_constant_ptr();
201    if (const_opr->type() == T_OBJECT) {
202      __ push_oop(const_opr->as_jobject());
203    } else if (const_opr->type() == T_INT) {
204      __ push_jint(const_opr->as_jint());
205    } else {
206      ShouldNotReachHere();
207    }
208
209  } else {
210    ShouldNotReachHere();
211  }
212}
213
214void LIR_Assembler::pop(LIR_Opr opr) {
215  if (opr->is_single_cpu()) {
216    __ pop_reg(opr->as_register());
217  } else {
218    ShouldNotReachHere();
219  }
220}
221
222bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
223  return addr->base()->is_illegal() && addr->index()->is_illegal();
224}
225
226//-------------------------------------------
227
228Address LIR_Assembler::as_Address(LIR_Address* addr) {
229  return as_Address(addr, rscratch1);
230}
231
232Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
233  if (addr->base()->is_illegal()) {
234    assert(addr->index()->is_illegal(), "must be illegal too");
235    AddressLiteral laddr((address)addr->disp(), relocInfo::none);
236    if (! __ reachable(laddr)) {
237      __ movptr(tmp, laddr.addr());
238      Address res(tmp, 0);
239      return res;
240    } else {
241      return __ as_Address(laddr);
242    }
243  }
244
245  Register base = addr->base()->as_pointer_register();
246
247  if (addr->index()->is_illegal()) {
248    return Address( base, addr->disp());
249  } else if (addr->index()->is_cpu_register()) {
250    Register index = addr->index()->as_pointer_register();
251    return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
252  } else if (addr->index()->is_constant()) {
253    intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
254    assert(Assembler::is_simm32(addr_offset), "must be");
255
256    return Address(base, addr_offset);
257  } else {
258    Unimplemented();
259    return Address();
260  }
261}
262
263
264Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
265  Address base = as_Address(addr);
266  return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
267}
268
269
270Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
271  return as_Address(addr);
272}
273
274
275void LIR_Assembler::osr_entry() {
276  offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
277  BlockBegin* osr_entry = compilation()->hir()->osr_entry();
278  ValueStack* entry_state = osr_entry->state();
279  int number_of_locks = entry_state->locks_size();
280
281  // we jump here if osr happens with the interpreter
282  // state set up to continue at the beginning of the
283  // loop that triggered osr - in particular, we have
284  // the following registers setup:
285  //
286  // rcx: osr buffer
287  //
288
289  // build frame
290  ciMethod* m = compilation()->method();
291  __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
292
293  // OSR buffer is
294  //
295  // locals[nlocals-1..0]
296  // monitors[0..number_of_locks]
297  //
298  // locals is a direct copy of the interpreter frame so in the osr buffer
299  // so first slot in the local array is the last local from the interpreter
300  // and last slot is local[0] (receiver) from the interpreter
301  //
302  // Similarly with locks. The first lock slot in the osr buffer is the nth lock
303  // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
304  // in the interpreter frame (the method lock if a sync method)
305
306  // Initialize monitors in the compiled activation.
307  //   rcx: pointer to osr buffer
308  //
309  // All other registers are dead at this point and the locals will be
310  // copied into place by code emitted in the IR.
311
312  Register OSR_buf = osrBufferPointer()->as_pointer_register();
313  { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
314    int monitor_offset = BytesPerWord * method()->max_locals() +
315      (2 * BytesPerWord) * (number_of_locks - 1);
316    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
317    // the OSR buffer using 2 word entries: first the lock and then
318    // the oop.
319    for (int i = 0; i < number_of_locks; i++) {
320      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
321#ifdef ASSERT
322      // verify the interpreter's monitor has a non-null object
323      {
324        Label L;
325        __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
326        __ jcc(Assembler::notZero, L);
327        __ stop("locked object is NULL");
328        __ bind(L);
329      }
330#endif
331      __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
332      __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
333      __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
334      __ movptr(frame_map()->address_for_monitor_object(i), rbx);
335    }
336  }
337}
338
339
340// inline cache check; done before the frame is built.
341int LIR_Assembler::check_icache() {
342  Register receiver = FrameMap::receiver_opr->as_register();
343  Register ic_klass = IC_Klass;
344  const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
345  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
346  if (!do_post_padding) {
347    // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
348    __ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
349  }
350  int offset = __ offset();
351  __ inline_cache_check(receiver, IC_Klass);
352  assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
353  if (do_post_padding) {
354    // force alignment after the cache check.
355    // It's been verified to be aligned if !VerifyOops
356    __ align(CodeEntryAlignment);
357  }
358  return offset;
359}
360
361
362void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
363  jobject o = NULL;
364  PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
365  __ movoop(reg, o);
366  patching_epilog(patch, lir_patch_normal, reg, info);
367}
368
369void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
370  Metadata* o = NULL;
371  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
372  __ mov_metadata(reg, o);
373  patching_epilog(patch, lir_patch_normal, reg, info);
374}
375
376// This specifies the rsp decrement needed to build the frame
377int LIR_Assembler::initial_frame_size_in_bytes() const {
378  // if rounding, must let FrameMap know!
379
380  // The frame_map records size in slots (32bit word)
381
382  // subtract two words to account for return address and link
383  return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
384}
385
386
387int LIR_Assembler::emit_exception_handler() {
388  // if the last instruction is a call (typically to do a throw which
389  // is coming at the end after block reordering) the return address
390  // must still point into the code area in order to avoid assertion
391  // failures when searching for the corresponding bci => add a nop
392  // (was bug 5/14/1999 - gri)
393  __ nop();
394
395  // generate code for exception handler
396  address handler_base = __ start_a_stub(exception_handler_size);
397  if (handler_base == NULL) {
398    // not enough space left for the handler
399    bailout("exception handler overflow");
400    return -1;
401  }
402
403  int offset = code_offset();
404
405  // the exception oop and pc are in rax, and rdx
406  // no other registers need to be preserved, so invalidate them
407  __ invalidate_registers(false, true, true, false, true, true);
408
409  // check that there is really an exception
410  __ verify_not_null_oop(rax);
411
412  // search an exception handler (rax: exception oop, rdx: throwing pc)
413  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
414  __ should_not_reach_here();
415  guarantee(code_offset() - offset <= exception_handler_size, "overflow");
416  __ end_a_stub();
417
418  return offset;
419}
420
421
422// Emit the code to remove the frame from the stack in the exception
423// unwind path.
424int LIR_Assembler::emit_unwind_handler() {
425#ifndef PRODUCT
426  if (CommentedAssembly) {
427    _masm->block_comment("Unwind handler");
428  }
429#endif
430
431  int offset = code_offset();
432
433  // Fetch the exception from TLS and clear out exception related thread state
434  Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
435  NOT_LP64(__ get_thread(rsi));
436  __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
437  __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
438  __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
439
440  __ bind(_unwind_handler_entry);
441  __ verify_not_null_oop(rax);
442  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
443    __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
444  }
445
446  // Preform needed unlocking
447  MonitorExitStub* stub = NULL;
448  if (method()->is_synchronized()) {
449    monitor_address(0, FrameMap::rax_opr);
450    stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
451    __ unlock_object(rdi, rsi, rax, *stub->entry());
452    __ bind(*stub->continuation());
453  }
454
455  if (compilation()->env()->dtrace_method_probes()) {
456#ifdef _LP64
457    __ mov(rdi, r15_thread);
458    __ mov_metadata(rsi, method()->constant_encoding());
459#else
460    __ get_thread(rax);
461    __ movptr(Address(rsp, 0), rax);
462    __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
463#endif
464    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
465  }
466
467  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
468    __ mov(rax, rbx);  // Restore the exception
469  }
470
471  // remove the activation and dispatch to the unwind handler
472  __ remove_frame(initial_frame_size_in_bytes());
473  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
474
475  // Emit the slow path assembly
476  if (stub != NULL) {
477    stub->emit_code(this);
478  }
479
480  return offset;
481}
482
483
484int LIR_Assembler::emit_deopt_handler() {
485  // if the last instruction is a call (typically to do a throw which
486  // is coming at the end after block reordering) the return address
487  // must still point into the code area in order to avoid assertion
488  // failures when searching for the corresponding bci => add a nop
489  // (was bug 5/14/1999 - gri)
490  __ nop();
491
492  // generate code for exception handler
493  address handler_base = __ start_a_stub(deopt_handler_size);
494  if (handler_base == NULL) {
495    // not enough space left for the handler
496    bailout("deopt handler overflow");
497    return -1;
498  }
499
500  int offset = code_offset();
501  InternalAddress here(__ pc());
502
503  __ pushptr(here.addr());
504  __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
505  guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
506  __ end_a_stub();
507
508  return offset;
509}
510
511
512void LIR_Assembler::return_op(LIR_Opr result) {
513  assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
514  if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
515    assert(result->fpu() == 0, "result must already be on TOS");
516  }
517
518  // Pop the stack before the safepoint code
519  __ remove_frame(initial_frame_size_in_bytes());
520
521  bool result_is_oop = result->is_valid() ? result->is_oop() : false;
522
523  // Note: we do not need to round double result; float result has the right precision
524  // the poll sets the condition code, but no data registers
525  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
526
527  if (Assembler::is_polling_page_far()) {
528    __ lea(rscratch1, polling_page);
529    __ relocate(relocInfo::poll_return_type);
530    __ testl(rax, Address(rscratch1, 0));
531  } else {
532    __ testl(rax, polling_page);
533  }
534  __ ret(0);
535}
536
537
538int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
539  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
540  guarantee(info != NULL, "Shouldn't be NULL");
541  int offset = __ offset();
542  if (Assembler::is_polling_page_far()) {
543    __ lea(rscratch1, polling_page);
544    offset = __ offset();
545    add_debug_info_for_branch(info);
546    __ relocate(relocInfo::poll_type);
547    __ testl(rax, Address(rscratch1, 0));
548  } else {
549    add_debug_info_for_branch(info);
550    __ testl(rax, polling_page);
551  }
552  return offset;
553}
554
555
556void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
557  if (from_reg != to_reg) __ mov(to_reg, from_reg);
558}
559
560void LIR_Assembler::swap_reg(Register a, Register b) {
561  __ xchgptr(a, b);
562}
563
564
565void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
566  assert(src->is_constant(), "should not call otherwise");
567  assert(dest->is_register(), "should not call otherwise");
568  LIR_Const* c = src->as_constant_ptr();
569
570  switch (c->type()) {
571    case T_INT: {
572      assert(patch_code == lir_patch_none, "no patching handled here");
573      __ movl(dest->as_register(), c->as_jint());
574      break;
575    }
576
577    case T_ADDRESS: {
578      assert(patch_code == lir_patch_none, "no patching handled here");
579      __ movptr(dest->as_register(), c->as_jint());
580      break;
581    }
582
583    case T_LONG: {
584      assert(patch_code == lir_patch_none, "no patching handled here");
585#ifdef _LP64
586      __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
587#else
588      __ movptr(dest->as_register_lo(), c->as_jint_lo());
589      __ movptr(dest->as_register_hi(), c->as_jint_hi());
590#endif // _LP64
591      break;
592    }
593
594    case T_OBJECT: {
595      if (patch_code != lir_patch_none) {
596        jobject2reg_with_patching(dest->as_register(), info);
597      } else {
598        __ movoop(dest->as_register(), c->as_jobject());
599      }
600      break;
601    }
602
603    case T_METADATA: {
604      if (patch_code != lir_patch_none) {
605        klass2reg_with_patching(dest->as_register(), info);
606      } else {
607        __ mov_metadata(dest->as_register(), c->as_metadata());
608      }
609      break;
610    }
611
612    case T_FLOAT: {
613      if (dest->is_single_xmm()) {
614        if (c->is_zero_float()) {
615          __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
616        } else {
617          __ movflt(dest->as_xmm_float_reg(),
618                   InternalAddress(float_constant(c->as_jfloat())));
619        }
620      } else {
621        assert(dest->is_single_fpu(), "must be");
622        assert(dest->fpu_regnr() == 0, "dest must be TOS");
623        if (c->is_zero_float()) {
624          __ fldz();
625        } else if (c->is_one_float()) {
626          __ fld1();
627        } else {
628          __ fld_s (InternalAddress(float_constant(c->as_jfloat())));
629        }
630      }
631      break;
632    }
633
634    case T_DOUBLE: {
635      if (dest->is_double_xmm()) {
636        if (c->is_zero_double()) {
637          __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
638        } else {
639          __ movdbl(dest->as_xmm_double_reg(),
640                    InternalAddress(double_constant(c->as_jdouble())));
641        }
642      } else {
643        assert(dest->is_double_fpu(), "must be");
644        assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
645        if (c->is_zero_double()) {
646          __ fldz();
647        } else if (c->is_one_double()) {
648          __ fld1();
649        } else {
650          __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
651        }
652      }
653      break;
654    }
655
656    default:
657      ShouldNotReachHere();
658  }
659}
660
661void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
662  assert(src->is_constant(), "should not call otherwise");
663  assert(dest->is_stack(), "should not call otherwise");
664  LIR_Const* c = src->as_constant_ptr();
665
666  switch (c->type()) {
667    case T_INT:  // fall through
668    case T_FLOAT:
669      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
670      break;
671
672    case T_ADDRESS:
673      __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
674      break;
675
676    case T_OBJECT:
677      __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
678      break;
679
680    case T_LONG:  // fall through
681    case T_DOUBLE:
682#ifdef _LP64
683      __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
684                                            lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
685#else
686      __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
687                                              lo_word_offset_in_bytes), c->as_jint_lo_bits());
688      __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
689                                              hi_word_offset_in_bytes), c->as_jint_hi_bits());
690#endif // _LP64
691      break;
692
693    default:
694      ShouldNotReachHere();
695  }
696}
697
698void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
699  assert(src->is_constant(), "should not call otherwise");
700  assert(dest->is_address(), "should not call otherwise");
701  LIR_Const* c = src->as_constant_ptr();
702  LIR_Address* addr = dest->as_address_ptr();
703
704  int null_check_here = code_offset();
705  switch (type) {
706    case T_INT:    // fall through
707    case T_FLOAT:
708      __ movl(as_Address(addr), c->as_jint_bits());
709      break;
710
711    case T_ADDRESS:
712      __ movptr(as_Address(addr), c->as_jint_bits());
713      break;
714
715    case T_OBJECT:  // fall through
716    case T_ARRAY:
717      if (c->as_jobject() == NULL) {
718        if (UseCompressedOops && !wide) {
719          __ movl(as_Address(addr), (int32_t)NULL_WORD);
720        } else {
721#ifdef _LP64
722          __ xorptr(rscratch1, rscratch1);
723          null_check_here = code_offset();
724          __ movptr(as_Address(addr), rscratch1);
725#else
726          __ movptr(as_Address(addr), NULL_WORD);
727#endif
728        }
729      } else {
730        if (is_literal_address(addr)) {
731          ShouldNotReachHere();
732          __ movoop(as_Address(addr, noreg), c->as_jobject());
733        } else {
734#ifdef _LP64
735          __ movoop(rscratch1, c->as_jobject());
736          if (UseCompressedOops && !wide) {
737            __ encode_heap_oop(rscratch1);
738            null_check_here = code_offset();
739            __ movl(as_Address_lo(addr), rscratch1);
740          } else {
741            null_check_here = code_offset();
742            __ movptr(as_Address_lo(addr), rscratch1);
743          }
744#else
745          __ movoop(as_Address(addr), c->as_jobject());
746#endif
747        }
748      }
749      break;
750
751    case T_LONG:    // fall through
752    case T_DOUBLE:
753#ifdef _LP64
754      if (is_literal_address(addr)) {
755        ShouldNotReachHere();
756        __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
757      } else {
758        __ movptr(r10, (intptr_t)c->as_jlong_bits());
759        null_check_here = code_offset();
760        __ movptr(as_Address_lo(addr), r10);
761      }
762#else
763      // Always reachable in 32bit so this doesn't produce useless move literal
764      __ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
765      __ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
766#endif // _LP64
767      break;
768
769    case T_BOOLEAN: // fall through
770    case T_BYTE:
771      __ movb(as_Address(addr), c->as_jint() & 0xFF);
772      break;
773
774    case T_CHAR:    // fall through
775    case T_SHORT:
776      __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
777      break;
778
779    default:
780      ShouldNotReachHere();
781  };
782
783  if (info != NULL) {
784    add_debug_info_for_null_check(null_check_here, info);
785  }
786}
787
788
789void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
790  assert(src->is_register(), "should not call otherwise");
791  assert(dest->is_register(), "should not call otherwise");
792
793  // move between cpu-registers
794  if (dest->is_single_cpu()) {
795#ifdef _LP64
796    if (src->type() == T_LONG) {
797      // Can do LONG -> OBJECT
798      move_regs(src->as_register_lo(), dest->as_register());
799      return;
800    }
801#endif
802    assert(src->is_single_cpu(), "must match");
803    if (src->type() == T_OBJECT) {
804      __ verify_oop(src->as_register());
805    }
806    move_regs(src->as_register(), dest->as_register());
807
808  } else if (dest->is_double_cpu()) {
809#ifdef _LP64
810    if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
811      // Surprising to me but we can see move of a long to t_object
812      __ verify_oop(src->as_register());
813      move_regs(src->as_register(), dest->as_register_lo());
814      return;
815    }
816#endif
817    assert(src->is_double_cpu(), "must match");
818    Register f_lo = src->as_register_lo();
819    Register f_hi = src->as_register_hi();
820    Register t_lo = dest->as_register_lo();
821    Register t_hi = dest->as_register_hi();
822#ifdef _LP64
823    assert(f_hi == f_lo, "must be same");
824    assert(t_hi == t_lo, "must be same");
825    move_regs(f_lo, t_lo);
826#else
827    assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
828
829
830    if (f_lo == t_hi && f_hi == t_lo) {
831      swap_reg(f_lo, f_hi);
832    } else if (f_hi == t_lo) {
833      assert(f_lo != t_hi, "overwriting register");
834      move_regs(f_hi, t_hi);
835      move_regs(f_lo, t_lo);
836    } else {
837      assert(f_hi != t_lo, "overwriting register");
838      move_regs(f_lo, t_lo);
839      move_regs(f_hi, t_hi);
840    }
841#endif // LP64
842
843    // special moves from fpu-register to xmm-register
844    // necessary for method results
845  } else if (src->is_single_xmm() && !dest->is_single_xmm()) {
846    __ movflt(Address(rsp, 0), src->as_xmm_float_reg());
847    __ fld_s(Address(rsp, 0));
848  } else if (src->is_double_xmm() && !dest->is_double_xmm()) {
849    __ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
850    __ fld_d(Address(rsp, 0));
851  } else if (dest->is_single_xmm() && !src->is_single_xmm()) {
852    __ fstp_s(Address(rsp, 0));
853    __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
854  } else if (dest->is_double_xmm() && !src->is_double_xmm()) {
855    __ fstp_d(Address(rsp, 0));
856    __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
857
858    // move between xmm-registers
859  } else if (dest->is_single_xmm()) {
860    assert(src->is_single_xmm(), "must match");
861    __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
862  } else if (dest->is_double_xmm()) {
863    assert(src->is_double_xmm(), "must match");
864    __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
865
866    // move between fpu-registers (no instruction necessary because of fpu-stack)
867  } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
868    assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
869    assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
870  } else {
871    ShouldNotReachHere();
872  }
873}
874
875void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
876  assert(src->is_register(), "should not call otherwise");
877  assert(dest->is_stack(), "should not call otherwise");
878
879  if (src->is_single_cpu()) {
880    Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
881    if (type == T_OBJECT || type == T_ARRAY) {
882      __ verify_oop(src->as_register());
883      __ movptr (dst, src->as_register());
884    } else if (type == T_METADATA) {
885      __ movptr (dst, src->as_register());
886    } else {
887      __ movl (dst, src->as_register());
888    }
889
890  } else if (src->is_double_cpu()) {
891    Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
892    Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
893    __ movptr (dstLO, src->as_register_lo());
894    NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
895
896  } else if (src->is_single_xmm()) {
897    Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
898    __ movflt(dst_addr, src->as_xmm_float_reg());
899
900  } else if (src->is_double_xmm()) {
901    Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
902    __ movdbl(dst_addr, src->as_xmm_double_reg());
903
904  } else if (src->is_single_fpu()) {
905    assert(src->fpu_regnr() == 0, "argument must be on TOS");
906    Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
907    if (pop_fpu_stack)     __ fstp_s (dst_addr);
908    else                   __ fst_s  (dst_addr);
909
910  } else if (src->is_double_fpu()) {
911    assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
912    Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
913    if (pop_fpu_stack)     __ fstp_d (dst_addr);
914    else                   __ fst_d  (dst_addr);
915
916  } else {
917    ShouldNotReachHere();
918  }
919}
920
921
922void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
923  LIR_Address* to_addr = dest->as_address_ptr();
924  PatchingStub* patch = NULL;
925  Register compressed_src = rscratch1;
926
927  if (type == T_ARRAY || type == T_OBJECT) {
928    __ verify_oop(src->as_register());
929#ifdef _LP64
930    if (UseCompressedOops && !wide) {
931      __ movptr(compressed_src, src->as_register());
932      __ encode_heap_oop(compressed_src);
933      if (patch_code != lir_patch_none) {
934        info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
935      }
936    }
937#endif
938  }
939
940  if (patch_code != lir_patch_none) {
941    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
942    Address toa = as_Address(to_addr);
943    assert(toa.disp() != 0, "must have");
944  }
945
946  int null_check_here = code_offset();
947  switch (type) {
948    case T_FLOAT: {
949      if (src->is_single_xmm()) {
950        __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
951      } else {
952        assert(src->is_single_fpu(), "must be");
953        assert(src->fpu_regnr() == 0, "argument must be on TOS");
954        if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
955        else                    __ fst_s (as_Address(to_addr));
956      }
957      break;
958    }
959
960    case T_DOUBLE: {
961      if (src->is_double_xmm()) {
962        __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
963      } else {
964        assert(src->is_double_fpu(), "must be");
965        assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
966        if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
967        else                    __ fst_d (as_Address(to_addr));
968      }
969      break;
970    }
971
972    case T_ARRAY:   // fall through
973    case T_OBJECT:  // fall through
974      if (UseCompressedOops && !wide) {
975        __ movl(as_Address(to_addr), compressed_src);
976      } else {
977        __ movptr(as_Address(to_addr), src->as_register());
978      }
979      break;
980    case T_METADATA:
981      // We get here to store a method pointer to the stack to pass to
982      // a dtrace runtime call. This can't work on 64 bit with
983      // compressed klass ptrs: T_METADATA can be a compressed klass
984      // ptr or a 64 bit method pointer.
985      LP64_ONLY(ShouldNotReachHere());
986      __ movptr(as_Address(to_addr), src->as_register());
987      break;
988    case T_ADDRESS:
989      __ movptr(as_Address(to_addr), src->as_register());
990      break;
991    case T_INT:
992      __ movl(as_Address(to_addr), src->as_register());
993      break;
994
995    case T_LONG: {
996      Register from_lo = src->as_register_lo();
997      Register from_hi = src->as_register_hi();
998#ifdef _LP64
999      __ movptr(as_Address_lo(to_addr), from_lo);
1000#else
1001      Register base = to_addr->base()->as_register();
1002      Register index = noreg;
1003      if (to_addr->index()->is_register()) {
1004        index = to_addr->index()->as_register();
1005      }
1006      if (base == from_lo || index == from_lo) {
1007        assert(base != from_hi, "can't be");
1008        assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1009        __ movl(as_Address_hi(to_addr), from_hi);
1010        if (patch != NULL) {
1011          patching_epilog(patch, lir_patch_high, base, info);
1012          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1013          patch_code = lir_patch_low;
1014        }
1015        __ movl(as_Address_lo(to_addr), from_lo);
1016      } else {
1017        assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1018        __ movl(as_Address_lo(to_addr), from_lo);
1019        if (patch != NULL) {
1020          patching_epilog(patch, lir_patch_low, base, info);
1021          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1022          patch_code = lir_patch_high;
1023        }
1024        __ movl(as_Address_hi(to_addr), from_hi);
1025      }
1026#endif // _LP64
1027      break;
1028    }
1029
1030    case T_BYTE:    // fall through
1031    case T_BOOLEAN: {
1032      Register src_reg = src->as_register();
1033      Address dst_addr = as_Address(to_addr);
1034      assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1035      __ movb(dst_addr, src_reg);
1036      break;
1037    }
1038
1039    case T_CHAR:    // fall through
1040    case T_SHORT:
1041      __ movw(as_Address(to_addr), src->as_register());
1042      break;
1043
1044    default:
1045      ShouldNotReachHere();
1046  }
1047  if (info != NULL) {
1048    add_debug_info_for_null_check(null_check_here, info);
1049  }
1050
1051  if (patch_code != lir_patch_none) {
1052    patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1053  }
1054}
1055
1056
1057void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1058  assert(src->is_stack(), "should not call otherwise");
1059  assert(dest->is_register(), "should not call otherwise");
1060
1061  if (dest->is_single_cpu()) {
1062    if (type == T_ARRAY || type == T_OBJECT) {
1063      __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1064      __ verify_oop(dest->as_register());
1065    } else if (type == T_METADATA) {
1066      __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1067    } else {
1068      __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1069    }
1070
1071  } else if (dest->is_double_cpu()) {
1072    Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1073    Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1074    __ movptr(dest->as_register_lo(), src_addr_LO);
1075    NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1076
1077  } else if (dest->is_single_xmm()) {
1078    Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1079    __ movflt(dest->as_xmm_float_reg(), src_addr);
1080
1081  } else if (dest->is_double_xmm()) {
1082    Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1083    __ movdbl(dest->as_xmm_double_reg(), src_addr);
1084
1085  } else if (dest->is_single_fpu()) {
1086    assert(dest->fpu_regnr() == 0, "dest must be TOS");
1087    Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1088    __ fld_s(src_addr);
1089
1090  } else if (dest->is_double_fpu()) {
1091    assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1092    Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1093    __ fld_d(src_addr);
1094
1095  } else {
1096    ShouldNotReachHere();
1097  }
1098}
1099
1100
1101void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1102  if (src->is_single_stack()) {
1103    if (type == T_OBJECT || type == T_ARRAY) {
1104      __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1105      __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1106    } else {
1107#ifndef _LP64
1108      __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1109      __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1110#else
1111      //no pushl on 64bits
1112      __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1113      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1114#endif
1115    }
1116
1117  } else if (src->is_double_stack()) {
1118#ifdef _LP64
1119    __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1120    __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1121#else
1122    __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1123    // push and pop the part at src + wordSize, adding wordSize for the previous push
1124    __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1125    __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1126    __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1127#endif // _LP64
1128
1129  } else {
1130    ShouldNotReachHere();
1131  }
1132}
1133
1134
1135void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1136  assert(src->is_address(), "should not call otherwise");
1137  assert(dest->is_register(), "should not call otherwise");
1138
1139  LIR_Address* addr = src->as_address_ptr();
1140  Address from_addr = as_Address(addr);
1141
1142  if (addr->base()->type() == T_OBJECT) {
1143    __ verify_oop(addr->base()->as_pointer_register());
1144  }
1145
1146  switch (type) {
1147    case T_BOOLEAN: // fall through
1148    case T_BYTE:    // fall through
1149    case T_CHAR:    // fall through
1150    case T_SHORT:
1151      if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1152        // on pre P6 processors we may get partial register stalls
1153        // so blow away the value of to_rinfo before loading a
1154        // partial word into it.  Do it here so that it precedes
1155        // the potential patch point below.
1156        __ xorptr(dest->as_register(), dest->as_register());
1157      }
1158      break;
1159  }
1160
1161  PatchingStub* patch = NULL;
1162  if (patch_code != lir_patch_none) {
1163    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1164    assert(from_addr.disp() != 0, "must have");
1165  }
1166  if (info != NULL) {
1167    add_debug_info_for_null_check_here(info);
1168  }
1169
1170  switch (type) {
1171    case T_FLOAT: {
1172      if (dest->is_single_xmm()) {
1173        __ movflt(dest->as_xmm_float_reg(), from_addr);
1174      } else {
1175        assert(dest->is_single_fpu(), "must be");
1176        assert(dest->fpu_regnr() == 0, "dest must be TOS");
1177        __ fld_s(from_addr);
1178      }
1179      break;
1180    }
1181
1182    case T_DOUBLE: {
1183      if (dest->is_double_xmm()) {
1184        __ movdbl(dest->as_xmm_double_reg(), from_addr);
1185      } else {
1186        assert(dest->is_double_fpu(), "must be");
1187        assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1188        __ fld_d(from_addr);
1189      }
1190      break;
1191    }
1192
1193    case T_OBJECT:  // fall through
1194    case T_ARRAY:   // fall through
1195      if (UseCompressedOops && !wide) {
1196        __ movl(dest->as_register(), from_addr);
1197      } else {
1198        __ movptr(dest->as_register(), from_addr);
1199      }
1200      break;
1201
1202    case T_ADDRESS:
1203      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1204        __ movl(dest->as_register(), from_addr);
1205      } else {
1206        __ movptr(dest->as_register(), from_addr);
1207      }
1208      break;
1209    case T_INT:
1210      __ movl(dest->as_register(), from_addr);
1211      break;
1212
1213    case T_LONG: {
1214      Register to_lo = dest->as_register_lo();
1215      Register to_hi = dest->as_register_hi();
1216#ifdef _LP64
1217      __ movptr(to_lo, as_Address_lo(addr));
1218#else
1219      Register base = addr->base()->as_register();
1220      Register index = noreg;
1221      if (addr->index()->is_register()) {
1222        index = addr->index()->as_register();
1223      }
1224      if ((base == to_lo && index == to_hi) ||
1225          (base == to_hi && index == to_lo)) {
1226        // addresses with 2 registers are only formed as a result of
1227        // array access so this code will never have to deal with
1228        // patches or null checks.
1229        assert(info == NULL && patch == NULL, "must be");
1230        __ lea(to_hi, as_Address(addr));
1231        __ movl(to_lo, Address(to_hi, 0));
1232        __ movl(to_hi, Address(to_hi, BytesPerWord));
1233      } else if (base == to_lo || index == to_lo) {
1234        assert(base != to_hi, "can't be");
1235        assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1236        __ movl(to_hi, as_Address_hi(addr));
1237        if (patch != NULL) {
1238          patching_epilog(patch, lir_patch_high, base, info);
1239          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1240          patch_code = lir_patch_low;
1241        }
1242        __ movl(to_lo, as_Address_lo(addr));
1243      } else {
1244        assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1245        __ movl(to_lo, as_Address_lo(addr));
1246        if (patch != NULL) {
1247          patching_epilog(patch, lir_patch_low, base, info);
1248          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1249          patch_code = lir_patch_high;
1250        }
1251        __ movl(to_hi, as_Address_hi(addr));
1252      }
1253#endif // _LP64
1254      break;
1255    }
1256
1257    case T_BOOLEAN: // fall through
1258    case T_BYTE: {
1259      Register dest_reg = dest->as_register();
1260      assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1261      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1262        __ movsbl(dest_reg, from_addr);
1263      } else {
1264        __ movb(dest_reg, from_addr);
1265        __ shll(dest_reg, 24);
1266        __ sarl(dest_reg, 24);
1267      }
1268      break;
1269    }
1270
1271    case T_CHAR: {
1272      Register dest_reg = dest->as_register();
1273      assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1274      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1275        __ movzwl(dest_reg, from_addr);
1276      } else {
1277        __ movw(dest_reg, from_addr);
1278      }
1279      break;
1280    }
1281
1282    case T_SHORT: {
1283      Register dest_reg = dest->as_register();
1284      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1285        __ movswl(dest_reg, from_addr);
1286      } else {
1287        __ movw(dest_reg, from_addr);
1288        __ shll(dest_reg, 16);
1289        __ sarl(dest_reg, 16);
1290      }
1291      break;
1292    }
1293
1294    default:
1295      ShouldNotReachHere();
1296  }
1297
1298  if (patch != NULL) {
1299    patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1300  }
1301
1302  if (type == T_ARRAY || type == T_OBJECT) {
1303#ifdef _LP64
1304    if (UseCompressedOops && !wide) {
1305      __ decode_heap_oop(dest->as_register());
1306    }
1307#endif
1308    __ verify_oop(dest->as_register());
1309  } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1310#ifdef _LP64
1311    if (UseCompressedClassPointers) {
1312      __ decode_klass_not_null(dest->as_register());
1313    }
1314#endif
1315  }
1316}
1317
1318
1319NEEDS_CLEANUP; // This could be static?
1320Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1321  int elem_size = type2aelembytes(type);
1322  switch (elem_size) {
1323    case 1: return Address::times_1;
1324    case 2: return Address::times_2;
1325    case 4: return Address::times_4;
1326    case 8: return Address::times_8;
1327  }
1328  ShouldNotReachHere();
1329  return Address::no_scale;
1330}
1331
1332
1333void LIR_Assembler::emit_op3(LIR_Op3* op) {
1334  switch (op->code()) {
1335    case lir_idiv:
1336    case lir_irem:
1337      arithmetic_idiv(op->code(),
1338                      op->in_opr1(),
1339                      op->in_opr2(),
1340                      op->in_opr3(),
1341                      op->result_opr(),
1342                      op->info());
1343      break;
1344    default:      ShouldNotReachHere(); break;
1345  }
1346}
1347
1348void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1349#ifdef ASSERT
1350  assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1351  if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1352  if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1353#endif
1354
1355  if (op->cond() == lir_cond_always) {
1356    if (op->info() != NULL) add_debug_info_for_branch(op->info());
1357    __ jmp (*(op->label()));
1358  } else {
1359    Assembler::Condition acond = Assembler::zero;
1360    if (op->code() == lir_cond_float_branch) {
1361      assert(op->ublock() != NULL, "must have unordered successor");
1362      __ jcc(Assembler::parity, *(op->ublock()->label()));
1363      switch(op->cond()) {
1364        case lir_cond_equal:        acond = Assembler::equal;      break;
1365        case lir_cond_notEqual:     acond = Assembler::notEqual;   break;
1366        case lir_cond_less:         acond = Assembler::below;      break;
1367        case lir_cond_lessEqual:    acond = Assembler::belowEqual; break;
1368        case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1369        case lir_cond_greater:      acond = Assembler::above;      break;
1370        default:                         ShouldNotReachHere();
1371      }
1372    } else {
1373      switch (op->cond()) {
1374        case lir_cond_equal:        acond = Assembler::equal;       break;
1375        case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1376        case lir_cond_less:         acond = Assembler::less;        break;
1377        case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1378        case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1379        case lir_cond_greater:      acond = Assembler::greater;     break;
1380        case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1381        case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1382        default:                         ShouldNotReachHere();
1383      }
1384    }
1385    __ jcc(acond,*(op->label()));
1386  }
1387}
1388
1389void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1390  LIR_Opr src  = op->in_opr();
1391  LIR_Opr dest = op->result_opr();
1392
1393  switch (op->bytecode()) {
1394    case Bytecodes::_i2l:
1395#ifdef _LP64
1396      __ movl2ptr(dest->as_register_lo(), src->as_register());
1397#else
1398      move_regs(src->as_register(), dest->as_register_lo());
1399      move_regs(src->as_register(), dest->as_register_hi());
1400      __ sarl(dest->as_register_hi(), 31);
1401#endif // LP64
1402      break;
1403
1404    case Bytecodes::_l2i:
1405#ifdef _LP64
1406      __ movl(dest->as_register(), src->as_register_lo());
1407#else
1408      move_regs(src->as_register_lo(), dest->as_register());
1409#endif
1410      break;
1411
1412    case Bytecodes::_i2b:
1413      move_regs(src->as_register(), dest->as_register());
1414      __ sign_extend_byte(dest->as_register());
1415      break;
1416
1417    case Bytecodes::_i2c:
1418      move_regs(src->as_register(), dest->as_register());
1419      __ andl(dest->as_register(), 0xFFFF);
1420      break;
1421
1422    case Bytecodes::_i2s:
1423      move_regs(src->as_register(), dest->as_register());
1424      __ sign_extend_short(dest->as_register());
1425      break;
1426
1427
1428    case Bytecodes::_f2d:
1429    case Bytecodes::_d2f:
1430      if (dest->is_single_xmm()) {
1431        __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1432      } else if (dest->is_double_xmm()) {
1433        __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1434      } else {
1435        assert(src->fpu() == dest->fpu(), "register must be equal");
1436        // do nothing (float result is rounded later through spilling)
1437      }
1438      break;
1439
1440    case Bytecodes::_i2f:
1441    case Bytecodes::_i2d:
1442      if (dest->is_single_xmm()) {
1443        __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1444      } else if (dest->is_double_xmm()) {
1445        __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1446      } else {
1447        assert(dest->fpu() == 0, "result must be on TOS");
1448        __ movl(Address(rsp, 0), src->as_register());
1449        __ fild_s(Address(rsp, 0));
1450      }
1451      break;
1452
1453    case Bytecodes::_f2i:
1454    case Bytecodes::_d2i:
1455      if (src->is_single_xmm()) {
1456        __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1457      } else if (src->is_double_xmm()) {
1458        __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1459      } else {
1460        assert(src->fpu() == 0, "input must be on TOS");
1461        __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
1462        __ fist_s(Address(rsp, 0));
1463        __ movl(dest->as_register(), Address(rsp, 0));
1464        __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1465      }
1466
1467      // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1468      assert(op->stub() != NULL, "stub required");
1469      __ cmpl(dest->as_register(), 0x80000000);
1470      __ jcc(Assembler::equal, *op->stub()->entry());
1471      __ bind(*op->stub()->continuation());
1472      break;
1473
1474    case Bytecodes::_l2f:
1475    case Bytecodes::_l2d:
1476      assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1477      assert(dest->fpu() == 0, "result must be on TOS");
1478
1479      __ movptr(Address(rsp, 0),            src->as_register_lo());
1480      NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));
1481      __ fild_d(Address(rsp, 0));
1482      // float result is rounded later through spilling
1483      break;
1484
1485    case Bytecodes::_f2l:
1486    case Bytecodes::_d2l:
1487      assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1488      assert(src->fpu() == 0, "input must be on TOS");
1489      assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1490
1491      // instruction sequence too long to inline it here
1492      {
1493        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1494      }
1495      break;
1496
1497    default: ShouldNotReachHere();
1498  }
1499}
1500
1501void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1502  if (op->init_check()) {
1503    __ cmpb(Address(op->klass()->as_register(),
1504                    InstanceKlass::init_state_offset()),
1505                    InstanceKlass::fully_initialized);
1506    add_debug_info_for_null_check_here(op->stub()->info());
1507    __ jcc(Assembler::notEqual, *op->stub()->entry());
1508  }
1509  __ allocate_object(op->obj()->as_register(),
1510                     op->tmp1()->as_register(),
1511                     op->tmp2()->as_register(),
1512                     op->header_size(),
1513                     op->object_size(),
1514                     op->klass()->as_register(),
1515                     *op->stub()->entry());
1516  __ bind(*op->stub()->continuation());
1517}
1518
1519void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1520  Register len =  op->len()->as_register();
1521  LP64_ONLY( __ movslq(len, len); )
1522
1523  if (UseSlowPath ||
1524      (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1525      (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1526    __ jmp(*op->stub()->entry());
1527  } else {
1528    Register tmp1 = op->tmp1()->as_register();
1529    Register tmp2 = op->tmp2()->as_register();
1530    Register tmp3 = op->tmp3()->as_register();
1531    if (len == tmp1) {
1532      tmp1 = tmp3;
1533    } else if (len == tmp2) {
1534      tmp2 = tmp3;
1535    } else if (len == tmp3) {
1536      // everything is ok
1537    } else {
1538      __ mov(tmp3, len);
1539    }
1540    __ allocate_array(op->obj()->as_register(),
1541                      len,
1542                      tmp1,
1543                      tmp2,
1544                      arrayOopDesc::header_size(op->type()),
1545                      array_element_size(op->type()),
1546                      op->klass()->as_register(),
1547                      *op->stub()->entry());
1548  }
1549  __ bind(*op->stub()->continuation());
1550}
1551
1552void LIR_Assembler::type_profile_helper(Register mdo,
1553                                        ciMethodData *md, ciProfileData *data,
1554                                        Register recv, Label* update_done) {
1555  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1556    Label next_test;
1557    // See if the receiver is receiver[n].
1558    __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1559    __ jccb(Assembler::notEqual, next_test);
1560    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1561    __ addptr(data_addr, DataLayout::counter_increment);
1562    __ jmp(*update_done);
1563    __ bind(next_test);
1564  }
1565
1566  // Didn't find receiver; find next empty slot and fill it in
1567  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1568    Label next_test;
1569    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1570    __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1571    __ jccb(Assembler::notEqual, next_test);
1572    __ movptr(recv_addr, recv);
1573    __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1574    __ jmp(*update_done);
1575    __ bind(next_test);
1576  }
1577}
1578
1579void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1580  // we always need a stub for the failure case.
1581  CodeStub* stub = op->stub();
1582  Register obj = op->object()->as_register();
1583  Register k_RInfo = op->tmp1()->as_register();
1584  Register klass_RInfo = op->tmp2()->as_register();
1585  Register dst = op->result_opr()->as_register();
1586  ciKlass* k = op->klass();
1587  Register Rtmp1 = noreg;
1588
1589  // check if it needs to be profiled
1590  ciMethodData* md;
1591  ciProfileData* data;
1592
1593  if (op->should_profile()) {
1594    ciMethod* method = op->profiled_method();
1595    assert(method != NULL, "Should have method");
1596    int bci = op->profiled_bci();
1597    md = method->method_data_or_null();
1598    assert(md != NULL, "Sanity");
1599    data = md->bci_to_data(bci);
1600    assert(data != NULL,                "need data for type check");
1601    assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1602  }
1603  Label profile_cast_success, profile_cast_failure;
1604  Label *success_target = op->should_profile() ? &profile_cast_success : success;
1605  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1606
1607  if (obj == k_RInfo) {
1608    k_RInfo = dst;
1609  } else if (obj == klass_RInfo) {
1610    klass_RInfo = dst;
1611  }
1612  if (k->is_loaded() && !UseCompressedClassPointers) {
1613    select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1614  } else {
1615    Rtmp1 = op->tmp3()->as_register();
1616    select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1617  }
1618
1619  assert_different_registers(obj, k_RInfo, klass_RInfo);
1620
1621  __ cmpptr(obj, (int32_t)NULL_WORD);
1622  if (op->should_profile()) {
1623    Label not_null;
1624    __ jccb(Assembler::notEqual, not_null);
1625    // Object is null; update MDO and exit
1626    Register mdo  = klass_RInfo;
1627    __ mov_metadata(mdo, md->constant_encoding());
1628    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1629    int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1630    __ orl(data_addr, header_bits);
1631    __ jmp(*obj_is_null);
1632    __ bind(not_null);
1633  } else {
1634    __ jcc(Assembler::equal, *obj_is_null);
1635  }
1636
1637  if (!k->is_loaded()) {
1638    klass2reg_with_patching(k_RInfo, op->info_for_patch());
1639  } else {
1640#ifdef _LP64
1641    __ mov_metadata(k_RInfo, k->constant_encoding());
1642#endif // _LP64
1643  }
1644  __ verify_oop(obj);
1645
1646  if (op->fast_check()) {
1647    // get object class
1648    // not a safepoint as obj null check happens earlier
1649#ifdef _LP64
1650    if (UseCompressedClassPointers) {
1651      __ load_klass(Rtmp1, obj);
1652      __ cmpptr(k_RInfo, Rtmp1);
1653    } else {
1654      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1655    }
1656#else
1657    if (k->is_loaded()) {
1658      __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1659    } else {
1660      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1661    }
1662#endif
1663    __ jcc(Assembler::notEqual, *failure_target);
1664    // successful cast, fall through to profile or jump
1665  } else {
1666    // get object class
1667    // not a safepoint as obj null check happens earlier
1668    __ load_klass(klass_RInfo, obj);
1669    if (k->is_loaded()) {
1670      // See if we get an immediate positive hit
1671#ifdef _LP64
1672      __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1673#else
1674      __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1675#endif // _LP64
1676      if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1677        __ jcc(Assembler::notEqual, *failure_target);
1678        // successful cast, fall through to profile or jump
1679      } else {
1680        // See if we get an immediate positive hit
1681        __ jcc(Assembler::equal, *success_target);
1682        // check for self
1683#ifdef _LP64
1684        __ cmpptr(klass_RInfo, k_RInfo);
1685#else
1686        __ cmpklass(klass_RInfo, k->constant_encoding());
1687#endif // _LP64
1688        __ jcc(Assembler::equal, *success_target);
1689
1690        __ push(klass_RInfo);
1691#ifdef _LP64
1692        __ push(k_RInfo);
1693#else
1694        __ pushklass(k->constant_encoding());
1695#endif // _LP64
1696        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1697        __ pop(klass_RInfo);
1698        __ pop(klass_RInfo);
1699        // result is a boolean
1700        __ cmpl(klass_RInfo, 0);
1701        __ jcc(Assembler::equal, *failure_target);
1702        // successful cast, fall through to profile or jump
1703      }
1704    } else {
1705      // perform the fast part of the checking logic
1706      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1707      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1708      __ push(klass_RInfo);
1709      __ push(k_RInfo);
1710      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1711      __ pop(klass_RInfo);
1712      __ pop(k_RInfo);
1713      // result is a boolean
1714      __ cmpl(k_RInfo, 0);
1715      __ jcc(Assembler::equal, *failure_target);
1716      // successful cast, fall through to profile or jump
1717    }
1718  }
1719  if (op->should_profile()) {
1720    Register mdo  = klass_RInfo, recv = k_RInfo;
1721    __ bind(profile_cast_success);
1722    __ mov_metadata(mdo, md->constant_encoding());
1723    __ load_klass(recv, obj);
1724    Label update_done;
1725    type_profile_helper(mdo, md, data, recv, success);
1726    __ jmp(*success);
1727
1728    __ bind(profile_cast_failure);
1729    __ mov_metadata(mdo, md->constant_encoding());
1730    Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1731    __ subptr(counter_addr, DataLayout::counter_increment);
1732    __ jmp(*failure);
1733  }
1734  __ jmp(*success);
1735}
1736
1737
1738void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1739  LIR_Code code = op->code();
1740  if (code == lir_store_check) {
1741    Register value = op->object()->as_register();
1742    Register array = op->array()->as_register();
1743    Register k_RInfo = op->tmp1()->as_register();
1744    Register klass_RInfo = op->tmp2()->as_register();
1745    Register Rtmp1 = op->tmp3()->as_register();
1746
1747    CodeStub* stub = op->stub();
1748
1749    // check if it needs to be profiled
1750    ciMethodData* md;
1751    ciProfileData* data;
1752
1753    if (op->should_profile()) {
1754      ciMethod* method = op->profiled_method();
1755      assert(method != NULL, "Should have method");
1756      int bci = op->profiled_bci();
1757      md = method->method_data_or_null();
1758      assert(md != NULL, "Sanity");
1759      data = md->bci_to_data(bci);
1760      assert(data != NULL,                "need data for type check");
1761      assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1762    }
1763    Label profile_cast_success, profile_cast_failure, done;
1764    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1765    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1766
1767    __ cmpptr(value, (int32_t)NULL_WORD);
1768    if (op->should_profile()) {
1769      Label not_null;
1770      __ jccb(Assembler::notEqual, not_null);
1771      // Object is null; update MDO and exit
1772      Register mdo  = klass_RInfo;
1773      __ mov_metadata(mdo, md->constant_encoding());
1774      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1775      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1776      __ orl(data_addr, header_bits);
1777      __ jmp(done);
1778      __ bind(not_null);
1779    } else {
1780      __ jcc(Assembler::equal, done);
1781    }
1782
1783    add_debug_info_for_null_check_here(op->info_for_exception());
1784    __ load_klass(k_RInfo, array);
1785    __ load_klass(klass_RInfo, value);
1786
1787    // get instance klass (it's already uncompressed)
1788    __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1789    // perform the fast part of the checking logic
1790    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1791    // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1792    __ push(klass_RInfo);
1793    __ push(k_RInfo);
1794    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1795    __ pop(klass_RInfo);
1796    __ pop(k_RInfo);
1797    // result is a boolean
1798    __ cmpl(k_RInfo, 0);
1799    __ jcc(Assembler::equal, *failure_target);
1800    // fall through to the success case
1801
1802    if (op->should_profile()) {
1803      Register mdo  = klass_RInfo, recv = k_RInfo;
1804      __ bind(profile_cast_success);
1805      __ mov_metadata(mdo, md->constant_encoding());
1806      __ load_klass(recv, value);
1807      Label update_done;
1808      type_profile_helper(mdo, md, data, recv, &done);
1809      __ jmpb(done);
1810
1811      __ bind(profile_cast_failure);
1812      __ mov_metadata(mdo, md->constant_encoding());
1813      Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1814      __ subptr(counter_addr, DataLayout::counter_increment);
1815      __ jmp(*stub->entry());
1816    }
1817
1818    __ bind(done);
1819  } else
1820    if (code == lir_checkcast) {
1821      Register obj = op->object()->as_register();
1822      Register dst = op->result_opr()->as_register();
1823      Label success;
1824      emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1825      __ bind(success);
1826      if (dst != obj) {
1827        __ mov(dst, obj);
1828      }
1829    } else
1830      if (code == lir_instanceof) {
1831        Register obj = op->object()->as_register();
1832        Register dst = op->result_opr()->as_register();
1833        Label success, failure, done;
1834        emit_typecheck_helper(op, &success, &failure, &failure);
1835        __ bind(failure);
1836        __ xorptr(dst, dst);
1837        __ jmpb(done);
1838        __ bind(success);
1839        __ movptr(dst, 1);
1840        __ bind(done);
1841      } else {
1842        ShouldNotReachHere();
1843      }
1844
1845}
1846
1847
1848void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1849  if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1850    assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1851    assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1852    assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1853    assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1854    Register addr = op->addr()->as_register();
1855    if (os::is_MP()) {
1856      __ lock();
1857    }
1858    NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1859
1860  } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1861    NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1862    Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1863    Register newval = op->new_value()->as_register();
1864    Register cmpval = op->cmp_value()->as_register();
1865    assert(cmpval == rax, "wrong register");
1866    assert(newval != NULL, "new val must be register");
1867    assert(cmpval != newval, "cmp and new values must be in different registers");
1868    assert(cmpval != addr, "cmp and addr must be in different registers");
1869    assert(newval != addr, "new value and addr must be in different registers");
1870
1871    if ( op->code() == lir_cas_obj) {
1872#ifdef _LP64
1873      if (UseCompressedOops) {
1874        __ encode_heap_oop(cmpval);
1875        __ mov(rscratch1, newval);
1876        __ encode_heap_oop(rscratch1);
1877        if (os::is_MP()) {
1878          __ lock();
1879        }
1880        // cmpval (rax) is implicitly used by this instruction
1881        __ cmpxchgl(rscratch1, Address(addr, 0));
1882      } else
1883#endif
1884      {
1885        if (os::is_MP()) {
1886          __ lock();
1887        }
1888        __ cmpxchgptr(newval, Address(addr, 0));
1889      }
1890    } else {
1891      assert(op->code() == lir_cas_int, "lir_cas_int expected");
1892      if (os::is_MP()) {
1893        __ lock();
1894      }
1895      __ cmpxchgl(newval, Address(addr, 0));
1896    }
1897#ifdef _LP64
1898  } else if (op->code() == lir_cas_long) {
1899    Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1900    Register newval = op->new_value()->as_register_lo();
1901    Register cmpval = op->cmp_value()->as_register_lo();
1902    assert(cmpval == rax, "wrong register");
1903    assert(newval != NULL, "new val must be register");
1904    assert(cmpval != newval, "cmp and new values must be in different registers");
1905    assert(cmpval != addr, "cmp and addr must be in different registers");
1906    assert(newval != addr, "new value and addr must be in different registers");
1907    if (os::is_MP()) {
1908      __ lock();
1909    }
1910    __ cmpxchgq(newval, Address(addr, 0));
1911#endif // _LP64
1912  } else {
1913    Unimplemented();
1914  }
1915}
1916
1917void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1918  Assembler::Condition acond, ncond;
1919  switch (condition) {
1920    case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1921    case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1922    case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1923    case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1924    case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1925    case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1926    case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1927    case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1928    default:                    ShouldNotReachHere();
1929  }
1930
1931  if (opr1->is_cpu_register()) {
1932    reg2reg(opr1, result);
1933  } else if (opr1->is_stack()) {
1934    stack2reg(opr1, result, result->type());
1935  } else if (opr1->is_constant()) {
1936    const2reg(opr1, result, lir_patch_none, NULL);
1937  } else {
1938    ShouldNotReachHere();
1939  }
1940
1941  if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1942    // optimized version that does not require a branch
1943    if (opr2->is_single_cpu()) {
1944      assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1945      __ cmov(ncond, result->as_register(), opr2->as_register());
1946    } else if (opr2->is_double_cpu()) {
1947      assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1948      assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1949      __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
1950      NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
1951    } else if (opr2->is_single_stack()) {
1952      __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1953    } else if (opr2->is_double_stack()) {
1954      __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1955      NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
1956    } else {
1957      ShouldNotReachHere();
1958    }
1959
1960  } else {
1961    Label skip;
1962    __ jcc (acond, skip);
1963    if (opr2->is_cpu_register()) {
1964      reg2reg(opr2, result);
1965    } else if (opr2->is_stack()) {
1966      stack2reg(opr2, result, result->type());
1967    } else if (opr2->is_constant()) {
1968      const2reg(opr2, result, lir_patch_none, NULL);
1969    } else {
1970      ShouldNotReachHere();
1971    }
1972    __ bind(skip);
1973  }
1974}
1975
1976
1977void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1978  assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1979
1980  if (left->is_single_cpu()) {
1981    assert(left == dest, "left and dest must be equal");
1982    Register lreg = left->as_register();
1983
1984    if (right->is_single_cpu()) {
1985      // cpu register - cpu register
1986      Register rreg = right->as_register();
1987      switch (code) {
1988        case lir_add: __ addl (lreg, rreg); break;
1989        case lir_sub: __ subl (lreg, rreg); break;
1990        case lir_mul: __ imull(lreg, rreg); break;
1991        default:      ShouldNotReachHere();
1992      }
1993
1994    } else if (right->is_stack()) {
1995      // cpu register - stack
1996      Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1997      switch (code) {
1998        case lir_add: __ addl(lreg, raddr); break;
1999        case lir_sub: __ subl(lreg, raddr); break;
2000        default:      ShouldNotReachHere();
2001      }
2002
2003    } else if (right->is_constant()) {
2004      // cpu register - constant
2005      jint c = right->as_constant_ptr()->as_jint();
2006      switch (code) {
2007        case lir_add: {
2008          __ incrementl(lreg, c);
2009          break;
2010        }
2011        case lir_sub: {
2012          __ decrementl(lreg, c);
2013          break;
2014        }
2015        default: ShouldNotReachHere();
2016      }
2017
2018    } else {
2019      ShouldNotReachHere();
2020    }
2021
2022  } else if (left->is_double_cpu()) {
2023    assert(left == dest, "left and dest must be equal");
2024    Register lreg_lo = left->as_register_lo();
2025    Register lreg_hi = left->as_register_hi();
2026
2027    if (right->is_double_cpu()) {
2028      // cpu register - cpu register
2029      Register rreg_lo = right->as_register_lo();
2030      Register rreg_hi = right->as_register_hi();
2031      NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2032      LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2033      switch (code) {
2034        case lir_add:
2035          __ addptr(lreg_lo, rreg_lo);
2036          NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2037          break;
2038        case lir_sub:
2039          __ subptr(lreg_lo, rreg_lo);
2040          NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2041          break;
2042        case lir_mul:
2043#ifdef _LP64
2044          __ imulq(lreg_lo, rreg_lo);
2045#else
2046          assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2047          __ imull(lreg_hi, rreg_lo);
2048          __ imull(rreg_hi, lreg_lo);
2049          __ addl (rreg_hi, lreg_hi);
2050          __ mull (rreg_lo);
2051          __ addl (lreg_hi, rreg_hi);
2052#endif // _LP64
2053          break;
2054        default:
2055          ShouldNotReachHere();
2056      }
2057
2058    } else if (right->is_constant()) {
2059      // cpu register - constant
2060#ifdef _LP64
2061      jlong c = right->as_constant_ptr()->as_jlong_bits();
2062      __ movptr(r10, (intptr_t) c);
2063      switch (code) {
2064        case lir_add:
2065          __ addptr(lreg_lo, r10);
2066          break;
2067        case lir_sub:
2068          __ subptr(lreg_lo, r10);
2069          break;
2070        default:
2071          ShouldNotReachHere();
2072      }
2073#else
2074      jint c_lo = right->as_constant_ptr()->as_jint_lo();
2075      jint c_hi = right->as_constant_ptr()->as_jint_hi();
2076      switch (code) {
2077        case lir_add:
2078          __ addptr(lreg_lo, c_lo);
2079          __ adcl(lreg_hi, c_hi);
2080          break;
2081        case lir_sub:
2082          __ subptr(lreg_lo, c_lo);
2083          __ sbbl(lreg_hi, c_hi);
2084          break;
2085        default:
2086          ShouldNotReachHere();
2087      }
2088#endif // _LP64
2089
2090    } else {
2091      ShouldNotReachHere();
2092    }
2093
2094  } else if (left->is_single_xmm()) {
2095    assert(left == dest, "left and dest must be equal");
2096    XMMRegister lreg = left->as_xmm_float_reg();
2097
2098    if (right->is_single_xmm()) {
2099      XMMRegister rreg = right->as_xmm_float_reg();
2100      switch (code) {
2101        case lir_add: __ addss(lreg, rreg);  break;
2102        case lir_sub: __ subss(lreg, rreg);  break;
2103        case lir_mul_strictfp: // fall through
2104        case lir_mul: __ mulss(lreg, rreg);  break;
2105        case lir_div_strictfp: // fall through
2106        case lir_div: __ divss(lreg, rreg);  break;
2107        default: ShouldNotReachHere();
2108      }
2109    } else {
2110      Address raddr;
2111      if (right->is_single_stack()) {
2112        raddr = frame_map()->address_for_slot(right->single_stack_ix());
2113      } else if (right->is_constant()) {
2114        // hack for now
2115        raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2116      } else {
2117        ShouldNotReachHere();
2118      }
2119      switch (code) {
2120        case lir_add: __ addss(lreg, raddr);  break;
2121        case lir_sub: __ subss(lreg, raddr);  break;
2122        case lir_mul_strictfp: // fall through
2123        case lir_mul: __ mulss(lreg, raddr);  break;
2124        case lir_div_strictfp: // fall through
2125        case lir_div: __ divss(lreg, raddr);  break;
2126        default: ShouldNotReachHere();
2127      }
2128    }
2129
2130  } else if (left->is_double_xmm()) {
2131    assert(left == dest, "left and dest must be equal");
2132
2133    XMMRegister lreg = left->as_xmm_double_reg();
2134    if (right->is_double_xmm()) {
2135      XMMRegister rreg = right->as_xmm_double_reg();
2136      switch (code) {
2137        case lir_add: __ addsd(lreg, rreg);  break;
2138        case lir_sub: __ subsd(lreg, rreg);  break;
2139        case lir_mul_strictfp: // fall through
2140        case lir_mul: __ mulsd(lreg, rreg);  break;
2141        case lir_div_strictfp: // fall through
2142        case lir_div: __ divsd(lreg, rreg);  break;
2143        default: ShouldNotReachHere();
2144      }
2145    } else {
2146      Address raddr;
2147      if (right->is_double_stack()) {
2148        raddr = frame_map()->address_for_slot(right->double_stack_ix());
2149      } else if (right->is_constant()) {
2150        // hack for now
2151        raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2152      } else {
2153        ShouldNotReachHere();
2154      }
2155      switch (code) {
2156        case lir_add: __ addsd(lreg, raddr);  break;
2157        case lir_sub: __ subsd(lreg, raddr);  break;
2158        case lir_mul_strictfp: // fall through
2159        case lir_mul: __ mulsd(lreg, raddr);  break;
2160        case lir_div_strictfp: // fall through
2161        case lir_div: __ divsd(lreg, raddr);  break;
2162        default: ShouldNotReachHere();
2163      }
2164    }
2165
2166  } else if (left->is_single_fpu()) {
2167    assert(dest->is_single_fpu(),  "fpu stack allocation required");
2168
2169    if (right->is_single_fpu()) {
2170      arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2171
2172    } else {
2173      assert(left->fpu_regnr() == 0, "left must be on TOS");
2174      assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2175
2176      Address raddr;
2177      if (right->is_single_stack()) {
2178        raddr = frame_map()->address_for_slot(right->single_stack_ix());
2179      } else if (right->is_constant()) {
2180        address const_addr = float_constant(right->as_jfloat());
2181        assert(const_addr != NULL, "incorrect float/double constant maintainance");
2182        // hack for now
2183        raddr = __ as_Address(InternalAddress(const_addr));
2184      } else {
2185        ShouldNotReachHere();
2186      }
2187
2188      switch (code) {
2189        case lir_add: __ fadd_s(raddr); break;
2190        case lir_sub: __ fsub_s(raddr); break;
2191        case lir_mul_strictfp: // fall through
2192        case lir_mul: __ fmul_s(raddr); break;
2193        case lir_div_strictfp: // fall through
2194        case lir_div: __ fdiv_s(raddr); break;
2195        default:      ShouldNotReachHere();
2196      }
2197    }
2198
2199  } else if (left->is_double_fpu()) {
2200    assert(dest->is_double_fpu(),  "fpu stack allocation required");
2201
2202    if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2203      // Double values require special handling for strictfp mul/div on x86
2204      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
2205      __ fmulp(left->fpu_regnrLo() + 1);
2206    }
2207
2208    if (right->is_double_fpu()) {
2209      arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2210
2211    } else {
2212      assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2213      assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2214
2215      Address raddr;
2216      if (right->is_double_stack()) {
2217        raddr = frame_map()->address_for_slot(right->double_stack_ix());
2218      } else if (right->is_constant()) {
2219        // hack for now
2220        raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2221      } else {
2222        ShouldNotReachHere();
2223      }
2224
2225      switch (code) {
2226        case lir_add: __ fadd_d(raddr); break;
2227        case lir_sub: __ fsub_d(raddr); break;
2228        case lir_mul_strictfp: // fall through
2229        case lir_mul: __ fmul_d(raddr); break;
2230        case lir_div_strictfp: // fall through
2231        case lir_div: __ fdiv_d(raddr); break;
2232        default: ShouldNotReachHere();
2233      }
2234    }
2235
2236    if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2237      // Double values require special handling for strictfp mul/div on x86
2238      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
2239      __ fmulp(dest->fpu_regnrLo() + 1);
2240    }
2241
2242  } else if (left->is_single_stack() || left->is_address()) {
2243    assert(left == dest, "left and dest must be equal");
2244
2245    Address laddr;
2246    if (left->is_single_stack()) {
2247      laddr = frame_map()->address_for_slot(left->single_stack_ix());
2248    } else if (left->is_address()) {
2249      laddr = as_Address(left->as_address_ptr());
2250    } else {
2251      ShouldNotReachHere();
2252    }
2253
2254    if (right->is_single_cpu()) {
2255      Register rreg = right->as_register();
2256      switch (code) {
2257        case lir_add: __ addl(laddr, rreg); break;
2258        case lir_sub: __ subl(laddr, rreg); break;
2259        default:      ShouldNotReachHere();
2260      }
2261    } else if (right->is_constant()) {
2262      jint c = right->as_constant_ptr()->as_jint();
2263      switch (code) {
2264        case lir_add: {
2265          __ incrementl(laddr, c);
2266          break;
2267        }
2268        case lir_sub: {
2269          __ decrementl(laddr, c);
2270          break;
2271        }
2272        default: ShouldNotReachHere();
2273      }
2274    } else {
2275      ShouldNotReachHere();
2276    }
2277
2278  } else {
2279    ShouldNotReachHere();
2280  }
2281}
2282
2283void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2284  assert(pop_fpu_stack  || (left_index     == dest_index || right_index     == dest_index), "invalid LIR");
2285  assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2286  assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2287
2288  bool left_is_tos = (left_index == 0);
2289  bool dest_is_tos = (dest_index == 0);
2290  int non_tos_index = (left_is_tos ? right_index : left_index);
2291
2292  switch (code) {
2293    case lir_add:
2294      if (pop_fpu_stack)       __ faddp(non_tos_index);
2295      else if (dest_is_tos)    __ fadd (non_tos_index);
2296      else                     __ fadda(non_tos_index);
2297      break;
2298
2299    case lir_sub:
2300      if (left_is_tos) {
2301        if (pop_fpu_stack)     __ fsubrp(non_tos_index);
2302        else if (dest_is_tos)  __ fsub  (non_tos_index);
2303        else                   __ fsubra(non_tos_index);
2304      } else {
2305        if (pop_fpu_stack)     __ fsubp (non_tos_index);
2306        else if (dest_is_tos)  __ fsubr (non_tos_index);
2307        else                   __ fsuba (non_tos_index);
2308      }
2309      break;
2310
2311    case lir_mul_strictfp: // fall through
2312    case lir_mul:
2313      if (pop_fpu_stack)       __ fmulp(non_tos_index);
2314      else if (dest_is_tos)    __ fmul (non_tos_index);
2315      else                     __ fmula(non_tos_index);
2316      break;
2317
2318    case lir_div_strictfp: // fall through
2319    case lir_div:
2320      if (left_is_tos) {
2321        if (pop_fpu_stack)     __ fdivrp(non_tos_index);
2322        else if (dest_is_tos)  __ fdiv  (non_tos_index);
2323        else                   __ fdivra(non_tos_index);
2324      } else {
2325        if (pop_fpu_stack)     __ fdivp (non_tos_index);
2326        else if (dest_is_tos)  __ fdivr (non_tos_index);
2327        else                   __ fdiva (non_tos_index);
2328      }
2329      break;
2330
2331    case lir_rem:
2332      assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2333      __ fremr(noreg);
2334      break;
2335
2336    default:
2337      ShouldNotReachHere();
2338  }
2339}
2340
2341
2342void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2343  if (value->is_double_xmm()) {
2344    switch(code) {
2345      case lir_abs :
2346        {
2347          if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2348            __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2349          }
2350          __ andpd(dest->as_xmm_double_reg(),
2351                    ExternalAddress((address)double_signmask_pool));
2352        }
2353        break;
2354
2355      case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2356      // all other intrinsics are not available in the SSE instruction set, so FPU is used
2357      default      : ShouldNotReachHere();
2358    }
2359
2360  } else if (value->is_double_fpu()) {
2361    assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2362    switch(code) {
2363      case lir_log10 : __ flog10() ; break;
2364      case lir_abs   : __ fabs() ; break;
2365      case lir_sqrt  : __ fsqrt(); break;
2366      case lir_sin   :
2367        // Should consider not saving rbx, if not necessary
2368        __ trigfunc('s', op->as_Op2()->fpu_stack_size());
2369        break;
2370      case lir_cos :
2371        // Should consider not saving rbx, if not necessary
2372        assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
2373        __ trigfunc('c', op->as_Op2()->fpu_stack_size());
2374        break;
2375      case lir_tan :
2376        // Should consider not saving rbx, if not necessary
2377        __ trigfunc('t', op->as_Op2()->fpu_stack_size());
2378        break;
2379      case lir_pow :
2380        __ pow_with_fallback(op->as_Op2()->fpu_stack_size());
2381        break;
2382      default      : ShouldNotReachHere();
2383    }
2384  } else {
2385    Unimplemented();
2386  }
2387}
2388
2389void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2390  // assert(left->destroys_register(), "check");
2391  if (left->is_single_cpu()) {
2392    Register reg = left->as_register();
2393    if (right->is_constant()) {
2394      int val = right->as_constant_ptr()->as_jint();
2395      switch (code) {
2396        case lir_logic_and: __ andl (reg, val); break;
2397        case lir_logic_or:  __ orl  (reg, val); break;
2398        case lir_logic_xor: __ xorl (reg, val); break;
2399        default: ShouldNotReachHere();
2400      }
2401    } else if (right->is_stack()) {
2402      // added support for stack operands
2403      Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2404      switch (code) {
2405        case lir_logic_and: __ andl (reg, raddr); break;
2406        case lir_logic_or:  __ orl  (reg, raddr); break;
2407        case lir_logic_xor: __ xorl (reg, raddr); break;
2408        default: ShouldNotReachHere();
2409      }
2410    } else {
2411      Register rright = right->as_register();
2412      switch (code) {
2413        case lir_logic_and: __ andptr (reg, rright); break;
2414        case lir_logic_or : __ orptr  (reg, rright); break;
2415        case lir_logic_xor: __ xorptr (reg, rright); break;
2416        default: ShouldNotReachHere();
2417      }
2418    }
2419    move_regs(reg, dst->as_register());
2420  } else {
2421    Register l_lo = left->as_register_lo();
2422    Register l_hi = left->as_register_hi();
2423    if (right->is_constant()) {
2424#ifdef _LP64
2425      __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2426      switch (code) {
2427        case lir_logic_and:
2428          __ andq(l_lo, rscratch1);
2429          break;
2430        case lir_logic_or:
2431          __ orq(l_lo, rscratch1);
2432          break;
2433        case lir_logic_xor:
2434          __ xorq(l_lo, rscratch1);
2435          break;
2436        default: ShouldNotReachHere();
2437      }
2438#else
2439      int r_lo = right->as_constant_ptr()->as_jint_lo();
2440      int r_hi = right->as_constant_ptr()->as_jint_hi();
2441      switch (code) {
2442        case lir_logic_and:
2443          __ andl(l_lo, r_lo);
2444          __ andl(l_hi, r_hi);
2445          break;
2446        case lir_logic_or:
2447          __ orl(l_lo, r_lo);
2448          __ orl(l_hi, r_hi);
2449          break;
2450        case lir_logic_xor:
2451          __ xorl(l_lo, r_lo);
2452          __ xorl(l_hi, r_hi);
2453          break;
2454        default: ShouldNotReachHere();
2455      }
2456#endif // _LP64
2457    } else {
2458#ifdef _LP64
2459      Register r_lo;
2460      if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2461        r_lo = right->as_register();
2462      } else {
2463        r_lo = right->as_register_lo();
2464      }
2465#else
2466      Register r_lo = right->as_register_lo();
2467      Register r_hi = right->as_register_hi();
2468      assert(l_lo != r_hi, "overwriting registers");
2469#endif
2470      switch (code) {
2471        case lir_logic_and:
2472          __ andptr(l_lo, r_lo);
2473          NOT_LP64(__ andptr(l_hi, r_hi);)
2474          break;
2475        case lir_logic_or:
2476          __ orptr(l_lo, r_lo);
2477          NOT_LP64(__ orptr(l_hi, r_hi);)
2478          break;
2479        case lir_logic_xor:
2480          __ xorptr(l_lo, r_lo);
2481          NOT_LP64(__ xorptr(l_hi, r_hi);)
2482          break;
2483        default: ShouldNotReachHere();
2484      }
2485    }
2486
2487    Register dst_lo = dst->as_register_lo();
2488    Register dst_hi = dst->as_register_hi();
2489
2490#ifdef _LP64
2491    move_regs(l_lo, dst_lo);
2492#else
2493    if (dst_lo == l_hi) {
2494      assert(dst_hi != l_lo, "overwriting registers");
2495      move_regs(l_hi, dst_hi);
2496      move_regs(l_lo, dst_lo);
2497    } else {
2498      assert(dst_lo != l_hi, "overwriting registers");
2499      move_regs(l_lo, dst_lo);
2500      move_regs(l_hi, dst_hi);
2501    }
2502#endif // _LP64
2503  }
2504}
2505
2506
2507// we assume that rax, and rdx can be overwritten
2508void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2509
2510  assert(left->is_single_cpu(),   "left must be register");
2511  assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
2512  assert(result->is_single_cpu(), "result must be register");
2513
2514  //  assert(left->destroys_register(), "check");
2515  //  assert(right->destroys_register(), "check");
2516
2517  Register lreg = left->as_register();
2518  Register dreg = result->as_register();
2519
2520  if (right->is_constant()) {
2521    int divisor = right->as_constant_ptr()->as_jint();
2522    assert(divisor > 0 && is_power_of_2(divisor), "must be");
2523    if (code == lir_idiv) {
2524      assert(lreg == rax, "must be rax,");
2525      assert(temp->as_register() == rdx, "tmp register must be rdx");
2526      __ cdql(); // sign extend into rdx:rax
2527      if (divisor == 2) {
2528        __ subl(lreg, rdx);
2529      } else {
2530        __ andl(rdx, divisor - 1);
2531        __ addl(lreg, rdx);
2532      }
2533      __ sarl(lreg, log2_intptr(divisor));
2534      move_regs(lreg, dreg);
2535    } else if (code == lir_irem) {
2536      Label done;
2537      __ mov(dreg, lreg);
2538      __ andl(dreg, 0x80000000 | (divisor - 1));
2539      __ jcc(Assembler::positive, done);
2540      __ decrement(dreg);
2541      __ orl(dreg, ~(divisor - 1));
2542      __ increment(dreg);
2543      __ bind(done);
2544    } else {
2545      ShouldNotReachHere();
2546    }
2547  } else {
2548    Register rreg = right->as_register();
2549    assert(lreg == rax, "left register must be rax,");
2550    assert(rreg != rdx, "right register must not be rdx");
2551    assert(temp->as_register() == rdx, "tmp register must be rdx");
2552
2553    move_regs(lreg, rax);
2554
2555    int idivl_offset = __ corrected_idivl(rreg);
2556    add_debug_info_for_div0(idivl_offset, info);
2557    if (code == lir_irem) {
2558      move_regs(rdx, dreg); // result is in rdx
2559    } else {
2560      move_regs(rax, dreg);
2561    }
2562  }
2563}
2564
2565
2566void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2567  if (opr1->is_single_cpu()) {
2568    Register reg1 = opr1->as_register();
2569    if (opr2->is_single_cpu()) {
2570      // cpu register - cpu register
2571      if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2572        __ cmpptr(reg1, opr2->as_register());
2573      } else {
2574        assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2575        __ cmpl(reg1, opr2->as_register());
2576      }
2577    } else if (opr2->is_stack()) {
2578      // cpu register - stack
2579      if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2580        __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2581      } else {
2582        __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2583      }
2584    } else if (opr2->is_constant()) {
2585      // cpu register - constant
2586      LIR_Const* c = opr2->as_constant_ptr();
2587      if (c->type() == T_INT) {
2588        __ cmpl(reg1, c->as_jint());
2589      } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2590        // In 64bit oops are single register
2591        jobject o = c->as_jobject();
2592        if (o == NULL) {
2593          __ cmpptr(reg1, (int32_t)NULL_WORD);
2594        } else {
2595#ifdef _LP64
2596          __ movoop(rscratch1, o);
2597          __ cmpptr(reg1, rscratch1);
2598#else
2599          __ cmpoop(reg1, c->as_jobject());
2600#endif // _LP64
2601        }
2602      } else {
2603        fatal("unexpected type: %s", basictype_to_str(c->type()));
2604      }
2605      // cpu register - address
2606    } else if (opr2->is_address()) {
2607      if (op->info() != NULL) {
2608        add_debug_info_for_null_check_here(op->info());
2609      }
2610      __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2611    } else {
2612      ShouldNotReachHere();
2613    }
2614
2615  } else if(opr1->is_double_cpu()) {
2616    Register xlo = opr1->as_register_lo();
2617    Register xhi = opr1->as_register_hi();
2618    if (opr2->is_double_cpu()) {
2619#ifdef _LP64
2620      __ cmpptr(xlo, opr2->as_register_lo());
2621#else
2622      // cpu register - cpu register
2623      Register ylo = opr2->as_register_lo();
2624      Register yhi = opr2->as_register_hi();
2625      __ subl(xlo, ylo);
2626      __ sbbl(xhi, yhi);
2627      if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2628        __ orl(xhi, xlo);
2629      }
2630#endif // _LP64
2631    } else if (opr2->is_constant()) {
2632      // cpu register - constant 0
2633      assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2634#ifdef _LP64
2635      __ cmpptr(xlo, (int32_t)opr2->as_jlong());
2636#else
2637      assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2638      __ orl(xhi, xlo);
2639#endif // _LP64
2640    } else {
2641      ShouldNotReachHere();
2642    }
2643
2644  } else if (opr1->is_single_xmm()) {
2645    XMMRegister reg1 = opr1->as_xmm_float_reg();
2646    if (opr2->is_single_xmm()) {
2647      // xmm register - xmm register
2648      __ ucomiss(reg1, opr2->as_xmm_float_reg());
2649    } else if (opr2->is_stack()) {
2650      // xmm register - stack
2651      __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2652    } else if (opr2->is_constant()) {
2653      // xmm register - constant
2654      __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2655    } else if (opr2->is_address()) {
2656      // xmm register - address
2657      if (op->info() != NULL) {
2658        add_debug_info_for_null_check_here(op->info());
2659      }
2660      __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2661    } else {
2662      ShouldNotReachHere();
2663    }
2664
2665  } else if (opr1->is_double_xmm()) {
2666    XMMRegister reg1 = opr1->as_xmm_double_reg();
2667    if (opr2->is_double_xmm()) {
2668      // xmm register - xmm register
2669      __ ucomisd(reg1, opr2->as_xmm_double_reg());
2670    } else if (opr2->is_stack()) {
2671      // xmm register - stack
2672      __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2673    } else if (opr2->is_constant()) {
2674      // xmm register - constant
2675      __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2676    } else if (opr2->is_address()) {
2677      // xmm register - address
2678      if (op->info() != NULL) {
2679        add_debug_info_for_null_check_here(op->info());
2680      }
2681      __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2682    } else {
2683      ShouldNotReachHere();
2684    }
2685
2686  } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2687    assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2688    assert(opr2->is_fpu_register(), "both must be registers");
2689    __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2690
2691  } else if (opr1->is_address() && opr2->is_constant()) {
2692    LIR_Const* c = opr2->as_constant_ptr();
2693#ifdef _LP64
2694    if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2695      assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2696      __ movoop(rscratch1, c->as_jobject());
2697    }
2698#endif // LP64
2699    if (op->info() != NULL) {
2700      add_debug_info_for_null_check_here(op->info());
2701    }
2702    // special case: address - constant
2703    LIR_Address* addr = opr1->as_address_ptr();
2704    if (c->type() == T_INT) {
2705      __ cmpl(as_Address(addr), c->as_jint());
2706    } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2707#ifdef _LP64
2708      // %%% Make this explode if addr isn't reachable until we figure out a
2709      // better strategy by giving noreg as the temp for as_Address
2710      __ cmpptr(rscratch1, as_Address(addr, noreg));
2711#else
2712      __ cmpoop(as_Address(addr), c->as_jobject());
2713#endif // _LP64
2714    } else {
2715      ShouldNotReachHere();
2716    }
2717
2718  } else {
2719    ShouldNotReachHere();
2720  }
2721}
2722
2723void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2724  if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2725    if (left->is_single_xmm()) {
2726      assert(right->is_single_xmm(), "must match");
2727      __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2728    } else if (left->is_double_xmm()) {
2729      assert(right->is_double_xmm(), "must match");
2730      __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2731
2732    } else {
2733      assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2734      assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2735
2736      assert(left->fpu() == 0, "left must be on TOS");
2737      __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2738                  op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2739    }
2740  } else {
2741    assert(code == lir_cmp_l2i, "check");
2742#ifdef _LP64
2743    Label done;
2744    Register dest = dst->as_register();
2745    __ cmpptr(left->as_register_lo(), right->as_register_lo());
2746    __ movl(dest, -1);
2747    __ jccb(Assembler::less, done);
2748    __ set_byte_if_not_zero(dest);
2749    __ movzbl(dest, dest);
2750    __ bind(done);
2751#else
2752    __ lcmp2int(left->as_register_hi(),
2753                left->as_register_lo(),
2754                right->as_register_hi(),
2755                right->as_register_lo());
2756    move_regs(left->as_register_hi(), dst->as_register());
2757#endif // _LP64
2758  }
2759}
2760
2761
2762void LIR_Assembler::align_call(LIR_Code code) {
2763  if (os::is_MP()) {
2764    // make sure that the displacement word of the call ends up word aligned
2765    int offset = __ offset();
2766    switch (code) {
2767      case lir_static_call:
2768      case lir_optvirtual_call:
2769      case lir_dynamic_call:
2770        offset += NativeCall::displacement_offset;
2771        break;
2772      case lir_icvirtual_call:
2773        offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2774      break;
2775      case lir_virtual_call:  // currently, sparc-specific for niagara
2776      default: ShouldNotReachHere();
2777    }
2778    __ align(BytesPerWord, offset);
2779  }
2780}
2781
2782
2783void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2784  assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2785         "must be aligned");
2786  __ call(AddressLiteral(op->addr(), rtype));
2787  add_call_info(code_offset(), op->info());
2788}
2789
2790
2791void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2792  __ ic_call(op->addr());
2793  add_call_info(code_offset(), op->info());
2794  assert(!os::is_MP() ||
2795         (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2796         "must be aligned");
2797}
2798
2799
2800/* Currently, vtable-dispatch is only enabled for sparc platforms */
2801void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2802  ShouldNotReachHere();
2803}
2804
2805
2806void LIR_Assembler::emit_static_call_stub() {
2807  address call_pc = __ pc();
2808  address stub = __ start_a_stub(call_stub_size);
2809  if (stub == NULL) {
2810    bailout("static call stub overflow");
2811    return;
2812  }
2813
2814  int start = __ offset();
2815  if (os::is_MP()) {
2816    // make sure that the displacement word of the call ends up word aligned
2817    __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2818  }
2819  __ relocate(static_stub_Relocation::spec(call_pc));
2820  __ mov_metadata(rbx, (Metadata*)NULL);
2821  // must be set to -1 at code generation time
2822  assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2823  // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2824  __ jump(RuntimeAddress(__ pc()));
2825
2826  assert(__ offset() - start <= call_stub_size, "stub too big");
2827  __ end_a_stub();
2828}
2829
2830
2831void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2832  assert(exceptionOop->as_register() == rax, "must match");
2833  assert(exceptionPC->as_register() == rdx, "must match");
2834
2835  // exception object is not added to oop map by LinearScan
2836  // (LinearScan assumes that no oops are in fixed registers)
2837  info->add_register_oop(exceptionOop);
2838  Runtime1::StubID unwind_id;
2839
2840  // get current pc information
2841  // pc is only needed if the method has an exception handler, the unwind code does not need it.
2842  int pc_for_athrow_offset = __ offset();
2843  InternalAddress pc_for_athrow(__ pc());
2844  __ lea(exceptionPC->as_register(), pc_for_athrow);
2845  add_call_info(pc_for_athrow_offset, info); // for exception handler
2846
2847  __ verify_not_null_oop(rax);
2848  // search an exception handler (rax: exception oop, rdx: throwing pc)
2849  if (compilation()->has_fpu_code()) {
2850    unwind_id = Runtime1::handle_exception_id;
2851  } else {
2852    unwind_id = Runtime1::handle_exception_nofpu_id;
2853  }
2854  __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2855
2856  // enough room for two byte trap
2857  __ nop();
2858}
2859
2860
2861void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2862  assert(exceptionOop->as_register() == rax, "must match");
2863
2864  __ jmp(_unwind_handler_entry);
2865}
2866
2867
2868void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2869
2870  // optimized version for linear scan:
2871  // * count must be already in ECX (guaranteed by LinearScan)
2872  // * left and dest must be equal
2873  // * tmp must be unused
2874  assert(count->as_register() == SHIFT_count, "count must be in ECX");
2875  assert(left == dest, "left and dest must be equal");
2876  assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2877
2878  if (left->is_single_cpu()) {
2879    Register value = left->as_register();
2880    assert(value != SHIFT_count, "left cannot be ECX");
2881
2882    switch (code) {
2883      case lir_shl:  __ shll(value); break;
2884      case lir_shr:  __ sarl(value); break;
2885      case lir_ushr: __ shrl(value); break;
2886      default: ShouldNotReachHere();
2887    }
2888  } else if (left->is_double_cpu()) {
2889    Register lo = left->as_register_lo();
2890    Register hi = left->as_register_hi();
2891    assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2892#ifdef _LP64
2893    switch (code) {
2894      case lir_shl:  __ shlptr(lo);        break;
2895      case lir_shr:  __ sarptr(lo);        break;
2896      case lir_ushr: __ shrptr(lo);        break;
2897      default: ShouldNotReachHere();
2898    }
2899#else
2900
2901    switch (code) {
2902      case lir_shl:  __ lshl(hi, lo);        break;
2903      case lir_shr:  __ lshr(hi, lo, true);  break;
2904      case lir_ushr: __ lshr(hi, lo, false); break;
2905      default: ShouldNotReachHere();
2906    }
2907#endif // LP64
2908  } else {
2909    ShouldNotReachHere();
2910  }
2911}
2912
2913
2914void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2915  if (dest->is_single_cpu()) {
2916    // first move left into dest so that left is not destroyed by the shift
2917    Register value = dest->as_register();
2918    count = count & 0x1F; // Java spec
2919
2920    move_regs(left->as_register(), value);
2921    switch (code) {
2922      case lir_shl:  __ shll(value, count); break;
2923      case lir_shr:  __ sarl(value, count); break;
2924      case lir_ushr: __ shrl(value, count); break;
2925      default: ShouldNotReachHere();
2926    }
2927  } else if (dest->is_double_cpu()) {
2928#ifndef _LP64
2929    Unimplemented();
2930#else
2931    // first move left into dest so that left is not destroyed by the shift
2932    Register value = dest->as_register_lo();
2933    count = count & 0x1F; // Java spec
2934
2935    move_regs(left->as_register_lo(), value);
2936    switch (code) {
2937      case lir_shl:  __ shlptr(value, count); break;
2938      case lir_shr:  __ sarptr(value, count); break;
2939      case lir_ushr: __ shrptr(value, count); break;
2940      default: ShouldNotReachHere();
2941    }
2942#endif // _LP64
2943  } else {
2944    ShouldNotReachHere();
2945  }
2946}
2947
2948
2949void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2950  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2951  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2952  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2953  __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
2954}
2955
2956
2957void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2958  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2959  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2960  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2961  __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
2962}
2963
2964
2965void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2966  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2967  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2968  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2969  __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
2970}
2971
2972
2973// This code replaces a call to arraycopy; no exception may
2974// be thrown in this code, they must be thrown in the System.arraycopy
2975// activation frame; we could save some checks if this would not be the case
2976void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2977  ciArrayKlass* default_type = op->expected_type();
2978  Register src = op->src()->as_register();
2979  Register dst = op->dst()->as_register();
2980  Register src_pos = op->src_pos()->as_register();
2981  Register dst_pos = op->dst_pos()->as_register();
2982  Register length  = op->length()->as_register();
2983  Register tmp = op->tmp()->as_register();
2984
2985  CodeStub* stub = op->stub();
2986  int flags = op->flags();
2987  BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2988  if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2989
2990  // if we don't know anything, just go through the generic arraycopy
2991  if (default_type == NULL) {
2992    Label done;
2993    // save outgoing arguments on stack in case call to System.arraycopy is needed
2994    // HACK ALERT. This code used to push the parameters in a hardwired fashion
2995    // for interpreter calling conventions. Now we have to do it in new style conventions.
2996    // For the moment until C1 gets the new register allocator I just force all the
2997    // args to the right place (except the register args) and then on the back side
2998    // reload the register args properly if we go slow path. Yuck
2999
3000    // These are proper for the calling convention
3001    store_parameter(length, 2);
3002    store_parameter(dst_pos, 1);
3003    store_parameter(dst, 0);
3004
3005    // these are just temporary placements until we need to reload
3006    store_parameter(src_pos, 3);
3007    store_parameter(src, 4);
3008    NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3009
3010    address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
3011
3012    address copyfunc_addr = StubRoutines::generic_arraycopy();
3013
3014    // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3015#ifdef _LP64
3016    // The arguments are in java calling convention so we can trivially shift them to C
3017    // convention
3018    assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3019    __ mov(c_rarg0, j_rarg0);
3020    assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3021    __ mov(c_rarg1, j_rarg1);
3022    assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3023    __ mov(c_rarg2, j_rarg2);
3024    assert_different_registers(c_rarg3, j_rarg4);
3025    __ mov(c_rarg3, j_rarg3);
3026#ifdef _WIN64
3027    // Allocate abi space for args but be sure to keep stack aligned
3028    __ subptr(rsp, 6*wordSize);
3029    store_parameter(j_rarg4, 4);
3030    if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3031      __ call(RuntimeAddress(C_entry));
3032    } else {
3033#ifndef PRODUCT
3034      if (PrintC1Statistics) {
3035        __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3036      }
3037#endif
3038      __ call(RuntimeAddress(copyfunc_addr));
3039    }
3040    __ addptr(rsp, 6*wordSize);
3041#else
3042    __ mov(c_rarg4, j_rarg4);
3043    if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3044      __ call(RuntimeAddress(C_entry));
3045    } else {
3046#ifndef PRODUCT
3047      if (PrintC1Statistics) {
3048        __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3049      }
3050#endif
3051      __ call(RuntimeAddress(copyfunc_addr));
3052    }
3053#endif // _WIN64
3054#else
3055    __ push(length);
3056    __ push(dst_pos);
3057    __ push(dst);
3058    __ push(src_pos);
3059    __ push(src);
3060
3061    if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3062      __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
3063    } else {
3064#ifndef PRODUCT
3065      if (PrintC1Statistics) {
3066        __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3067      }
3068#endif
3069      __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3070    }
3071
3072#endif // _LP64
3073
3074    __ cmpl(rax, 0);
3075    __ jcc(Assembler::equal, *stub->continuation());
3076
3077    if (copyfunc_addr != NULL) {
3078      __ mov(tmp, rax);
3079      __ xorl(tmp, -1);
3080    }
3081
3082    // Reload values from the stack so they are where the stub
3083    // expects them.
3084    __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3085    __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3086    __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3087    __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3088    __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3089
3090    if (copyfunc_addr != NULL) {
3091      __ subl(length, tmp);
3092      __ addl(src_pos, tmp);
3093      __ addl(dst_pos, tmp);
3094    }
3095    __ jmp(*stub->entry());
3096
3097    __ bind(*stub->continuation());
3098    return;
3099  }
3100
3101  assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3102
3103  int elem_size = type2aelembytes(basic_type);
3104  int shift_amount;
3105  Address::ScaleFactor scale;
3106
3107  switch (elem_size) {
3108    case 1 :
3109      shift_amount = 0;
3110      scale = Address::times_1;
3111      break;
3112    case 2 :
3113      shift_amount = 1;
3114      scale = Address::times_2;
3115      break;
3116    case 4 :
3117      shift_amount = 2;
3118      scale = Address::times_4;
3119      break;
3120    case 8 :
3121      shift_amount = 3;
3122      scale = Address::times_8;
3123      break;
3124    default:
3125      ShouldNotReachHere();
3126  }
3127
3128  Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3129  Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3130  Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3131  Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3132
3133  // length and pos's are all sign extended at this point on 64bit
3134
3135  // test for NULL
3136  if (flags & LIR_OpArrayCopy::src_null_check) {
3137    __ testptr(src, src);
3138    __ jcc(Assembler::zero, *stub->entry());
3139  }
3140  if (flags & LIR_OpArrayCopy::dst_null_check) {
3141    __ testptr(dst, dst);
3142    __ jcc(Assembler::zero, *stub->entry());
3143  }
3144
3145  // check if negative
3146  if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3147    __ testl(src_pos, src_pos);
3148    __ jcc(Assembler::less, *stub->entry());
3149  }
3150  if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3151    __ testl(dst_pos, dst_pos);
3152    __ jcc(Assembler::less, *stub->entry());
3153  }
3154
3155  if (flags & LIR_OpArrayCopy::src_range_check) {
3156    __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3157    __ cmpl(tmp, src_length_addr);
3158    __ jcc(Assembler::above, *stub->entry());
3159  }
3160  if (flags & LIR_OpArrayCopy::dst_range_check) {
3161    __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3162    __ cmpl(tmp, dst_length_addr);
3163    __ jcc(Assembler::above, *stub->entry());
3164  }
3165
3166  if (flags & LIR_OpArrayCopy::length_positive_check) {
3167    __ testl(length, length);
3168    __ jcc(Assembler::less, *stub->entry());
3169    __ jcc(Assembler::zero, *stub->continuation());
3170  }
3171
3172#ifdef _LP64
3173  __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3174  __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3175#endif
3176
3177  if (flags & LIR_OpArrayCopy::type_check) {
3178    // We don't know the array types are compatible
3179    if (basic_type != T_OBJECT) {
3180      // Simple test for basic type arrays
3181      if (UseCompressedClassPointers) {
3182        __ movl(tmp, src_klass_addr);
3183        __ cmpl(tmp, dst_klass_addr);
3184      } else {
3185        __ movptr(tmp, src_klass_addr);
3186        __ cmpptr(tmp, dst_klass_addr);
3187      }
3188      __ jcc(Assembler::notEqual, *stub->entry());
3189    } else {
3190      // For object arrays, if src is a sub class of dst then we can
3191      // safely do the copy.
3192      Label cont, slow;
3193
3194      __ push(src);
3195      __ push(dst);
3196
3197      __ load_klass(src, src);
3198      __ load_klass(dst, dst);
3199
3200      __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3201
3202      __ push(src);
3203      __ push(dst);
3204      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3205      __ pop(dst);
3206      __ pop(src);
3207
3208      __ cmpl(src, 0);
3209      __ jcc(Assembler::notEqual, cont);
3210
3211      __ bind(slow);
3212      __ pop(dst);
3213      __ pop(src);
3214
3215      address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3216      if (copyfunc_addr != NULL) { // use stub if available
3217        // src is not a sub class of dst so we have to do a
3218        // per-element check.
3219
3220        int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3221        if ((flags & mask) != mask) {
3222          // Check that at least both of them object arrays.
3223          assert(flags & mask, "one of the two should be known to be an object array");
3224
3225          if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3226            __ load_klass(tmp, src);
3227          } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3228            __ load_klass(tmp, dst);
3229          }
3230          int lh_offset = in_bytes(Klass::layout_helper_offset());
3231          Address klass_lh_addr(tmp, lh_offset);
3232          jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3233          __ cmpl(klass_lh_addr, objArray_lh);
3234          __ jcc(Assembler::notEqual, *stub->entry());
3235        }
3236
3237       // Spill because stubs can use any register they like and it's
3238       // easier to restore just those that we care about.
3239       store_parameter(dst, 0);
3240       store_parameter(dst_pos, 1);
3241       store_parameter(length, 2);
3242       store_parameter(src_pos, 3);
3243       store_parameter(src, 4);
3244
3245#ifndef _LP64
3246        __ movptr(tmp, dst_klass_addr);
3247        __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3248        __ push(tmp);
3249        __ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3250        __ push(tmp);
3251        __ push(length);
3252        __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3253        __ push(tmp);
3254        __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3255        __ push(tmp);
3256
3257        __ call_VM_leaf(copyfunc_addr, 5);
3258#else
3259        __ movl2ptr(length, length); //higher 32bits must be null
3260
3261        __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3262        assert_different_registers(c_rarg0, dst, dst_pos, length);
3263        __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3264        assert_different_registers(c_rarg1, dst, length);
3265
3266        __ mov(c_rarg2, length);
3267        assert_different_registers(c_rarg2, dst);
3268
3269#ifdef _WIN64
3270        // Allocate abi space for args but be sure to keep stack aligned
3271        __ subptr(rsp, 6*wordSize);
3272        __ load_klass(c_rarg3, dst);
3273        __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3274        store_parameter(c_rarg3, 4);
3275        __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3276        __ call(RuntimeAddress(copyfunc_addr));
3277        __ addptr(rsp, 6*wordSize);
3278#else
3279        __ load_klass(c_rarg4, dst);
3280        __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3281        __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3282        __ call(RuntimeAddress(copyfunc_addr));
3283#endif
3284
3285#endif
3286
3287#ifndef PRODUCT
3288        if (PrintC1Statistics) {
3289          Label failed;
3290          __ testl(rax, rax);
3291          __ jcc(Assembler::notZero, failed);
3292          __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3293          __ bind(failed);
3294        }
3295#endif
3296
3297        __ testl(rax, rax);
3298        __ jcc(Assembler::zero, *stub->continuation());
3299
3300#ifndef PRODUCT
3301        if (PrintC1Statistics) {
3302          __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3303        }
3304#endif
3305
3306        __ mov(tmp, rax);
3307
3308        __ xorl(tmp, -1);
3309
3310        // Restore previously spilled arguments
3311        __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3312        __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3313        __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3314        __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3315        __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3316
3317
3318        __ subl(length, tmp);
3319        __ addl(src_pos, tmp);
3320        __ addl(dst_pos, tmp);
3321      }
3322
3323      __ jmp(*stub->entry());
3324
3325      __ bind(cont);
3326      __ pop(dst);
3327      __ pop(src);
3328    }
3329  }
3330
3331#ifdef ASSERT
3332  if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3333    // Sanity check the known type with the incoming class.  For the
3334    // primitive case the types must match exactly with src.klass and
3335    // dst.klass each exactly matching the default type.  For the
3336    // object array case, if no type check is needed then either the
3337    // dst type is exactly the expected type and the src type is a
3338    // subtype which we can't check or src is the same array as dst
3339    // but not necessarily exactly of type default_type.
3340    Label known_ok, halt;
3341    __ mov_metadata(tmp, default_type->constant_encoding());
3342#ifdef _LP64
3343    if (UseCompressedClassPointers) {
3344      __ encode_klass_not_null(tmp);
3345    }
3346#endif
3347
3348    if (basic_type != T_OBJECT) {
3349
3350      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
3351      else                   __ cmpptr(tmp, dst_klass_addr);
3352      __ jcc(Assembler::notEqual, halt);
3353      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
3354      else                   __ cmpptr(tmp, src_klass_addr);
3355      __ jcc(Assembler::equal, known_ok);
3356    } else {
3357      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
3358      else                   __ cmpptr(tmp, dst_klass_addr);
3359      __ jcc(Assembler::equal, known_ok);
3360      __ cmpptr(src, dst);
3361      __ jcc(Assembler::equal, known_ok);
3362    }
3363    __ bind(halt);
3364    __ stop("incorrect type information in arraycopy");
3365    __ bind(known_ok);
3366  }
3367#endif
3368
3369#ifndef PRODUCT
3370  if (PrintC1Statistics) {
3371    __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3372  }
3373#endif
3374
3375#ifdef _LP64
3376  assert_different_registers(c_rarg0, dst, dst_pos, length);
3377  __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3378  assert_different_registers(c_rarg1, length);
3379  __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3380  __ mov(c_rarg2, length);
3381
3382#else
3383  __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3384  store_parameter(tmp, 0);
3385  __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3386  store_parameter(tmp, 1);
3387  store_parameter(length, 2);
3388#endif // _LP64
3389
3390  bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3391  bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3392  const char *name;
3393  address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3394  __ call_VM_leaf(entry, 0);
3395
3396  __ bind(*stub->continuation());
3397}
3398
3399void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3400  assert(op->crc()->is_single_cpu(),  "crc must be register");
3401  assert(op->val()->is_single_cpu(),  "byte value must be register");
3402  assert(op->result_opr()->is_single_cpu(), "result must be register");
3403  Register crc = op->crc()->as_register();
3404  Register val = op->val()->as_register();
3405  Register res = op->result_opr()->as_register();
3406
3407  assert_different_registers(val, crc, res);
3408
3409  __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3410  __ notl(crc); // ~crc
3411  __ update_byte_crc32(crc, val, res);
3412  __ notl(crc); // ~crc
3413  __ mov(res, crc);
3414}
3415
3416void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3417  Register obj = op->obj_opr()->as_register();  // may not be an oop
3418  Register hdr = op->hdr_opr()->as_register();
3419  Register lock = op->lock_opr()->as_register();
3420  if (!UseFastLocking) {
3421    __ jmp(*op->stub()->entry());
3422  } else if (op->code() == lir_lock) {
3423    Register scratch = noreg;
3424    if (UseBiasedLocking) {
3425      scratch = op->scratch_opr()->as_register();
3426    }
3427    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3428    // add debug info for NullPointerException only if one is possible
3429    int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3430    if (op->info() != NULL) {
3431      add_debug_info_for_null_check(null_check_offset, op->info());
3432    }
3433    // done
3434  } else if (op->code() == lir_unlock) {
3435    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3436    __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3437  } else {
3438    Unimplemented();
3439  }
3440  __ bind(*op->stub()->continuation());
3441}
3442
3443
3444void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3445  ciMethod* method = op->profiled_method();
3446  int bci          = op->profiled_bci();
3447  ciMethod* callee = op->profiled_callee();
3448
3449  // Update counter for all call types
3450  ciMethodData* md = method->method_data_or_null();
3451  assert(md != NULL, "Sanity");
3452  ciProfileData* data = md->bci_to_data(bci);
3453  assert(data->is_CounterData(), "need CounterData for calls");
3454  assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
3455  Register mdo  = op->mdo()->as_register();
3456  __ mov_metadata(mdo, md->constant_encoding());
3457  Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3458  Bytecodes::Code bc = method->java_code_at_bci(bci);
3459  const bool callee_is_static = callee->is_loaded() && callee->is_static();
3460  // Perform additional virtual call profiling for invokevirtual and
3461  // invokeinterface bytecodes
3462  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3463      !callee_is_static &&  // required for optimized MH invokes
3464      C1ProfileVirtualCalls) {
3465    assert(op->recv()->is_single_cpu(), "recv must be allocated");
3466    Register recv = op->recv()->as_register();
3467    assert_different_registers(mdo, recv);
3468    assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3469    ciKlass* known_klass = op->known_holder();
3470    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3471      // We know the type that will be seen at this call site; we can
3472      // statically update the MethodData* rather than needing to do
3473      // dynamic tests on the receiver type
3474
3475      // NOTE: we should probably put a lock around this search to
3476      // avoid collisions by concurrent compilations
3477      ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3478      uint i;
3479      for (i = 0; i < VirtualCallData::row_limit(); i++) {
3480        ciKlass* receiver = vc_data->receiver(i);
3481        if (known_klass->equals(receiver)) {
3482          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3483          __ addptr(data_addr, DataLayout::counter_increment);
3484          return;
3485        }
3486      }
3487
3488      // Receiver type not found in profile data; select an empty slot
3489
3490      // Note that this is less efficient than it should be because it
3491      // always does a write to the receiver part of the
3492      // VirtualCallData rather than just the first time
3493      for (i = 0; i < VirtualCallData::row_limit(); i++) {
3494        ciKlass* receiver = vc_data->receiver(i);
3495        if (receiver == NULL) {
3496          Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3497          __ mov_metadata(recv_addr, known_klass->constant_encoding());
3498          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3499          __ addptr(data_addr, DataLayout::counter_increment);
3500          return;
3501        }
3502      }
3503    } else {
3504      __ load_klass(recv, recv);
3505      Label update_done;
3506      type_profile_helper(mdo, md, data, recv, &update_done);
3507      // Receiver did not match any saved receiver and there is no empty row for it.
3508      // Increment total counter to indicate polymorphic case.
3509      __ addptr(counter_addr, DataLayout::counter_increment);
3510
3511      __ bind(update_done);
3512    }
3513  } else {
3514    // Static call
3515    __ addptr(counter_addr, DataLayout::counter_increment);
3516  }
3517}
3518
3519void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3520  Register obj = op->obj()->as_register();
3521  Register tmp = op->tmp()->as_pointer_register();
3522  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3523  ciKlass* exact_klass = op->exact_klass();
3524  intptr_t current_klass = op->current_klass();
3525  bool not_null = op->not_null();
3526  bool no_conflict = op->no_conflict();
3527
3528  Label update, next, none;
3529
3530  bool do_null = !not_null;
3531  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3532  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3533
3534  assert(do_null || do_update, "why are we here?");
3535  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3536
3537  __ verify_oop(obj);
3538
3539  if (tmp != obj) {
3540    __ mov(tmp, obj);
3541  }
3542  if (do_null) {
3543    __ testptr(tmp, tmp);
3544    __ jccb(Assembler::notZero, update);
3545    if (!TypeEntries::was_null_seen(current_klass)) {
3546      __ orptr(mdo_addr, TypeEntries::null_seen);
3547    }
3548    if (do_update) {
3549#ifndef ASSERT
3550      __ jmpb(next);
3551    }
3552#else
3553      __ jmp(next);
3554    }
3555  } else {
3556    __ testptr(tmp, tmp);
3557    __ jccb(Assembler::notZero, update);
3558    __ stop("unexpect null obj");
3559#endif
3560  }
3561
3562  __ bind(update);
3563
3564  if (do_update) {
3565#ifdef ASSERT
3566    if (exact_klass != NULL) {
3567      Label ok;
3568      __ load_klass(tmp, tmp);
3569      __ push(tmp);
3570      __ mov_metadata(tmp, exact_klass->constant_encoding());
3571      __ cmpptr(tmp, Address(rsp, 0));
3572      __ jccb(Assembler::equal, ok);
3573      __ stop("exact klass and actual klass differ");
3574      __ bind(ok);
3575      __ pop(tmp);
3576    }
3577#endif
3578    if (!no_conflict) {
3579      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3580        if (exact_klass != NULL) {
3581          __ mov_metadata(tmp, exact_klass->constant_encoding());
3582        } else {
3583          __ load_klass(tmp, tmp);
3584        }
3585
3586        __ xorptr(tmp, mdo_addr);
3587        __ testptr(tmp, TypeEntries::type_klass_mask);
3588        // klass seen before, nothing to do. The unknown bit may have been
3589        // set already but no need to check.
3590        __ jccb(Assembler::zero, next);
3591
3592        __ testptr(tmp, TypeEntries::type_unknown);
3593        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3594
3595        if (TypeEntries::is_type_none(current_klass)) {
3596          __ cmpptr(mdo_addr, 0);
3597          __ jccb(Assembler::equal, none);
3598          __ cmpptr(mdo_addr, TypeEntries::null_seen);
3599          __ jccb(Assembler::equal, none);
3600          // There is a chance that the checks above (re-reading profiling
3601          // data from memory) fail if another thread has just set the
3602          // profiling to this obj's klass
3603          __ xorptr(tmp, mdo_addr);
3604          __ testptr(tmp, TypeEntries::type_klass_mask);
3605          __ jccb(Assembler::zero, next);
3606        }
3607      } else {
3608        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3609               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3610
3611        __ movptr(tmp, mdo_addr);
3612        __ testptr(tmp, TypeEntries::type_unknown);
3613        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3614      }
3615
3616      // different than before. Cannot keep accurate profile.
3617      __ orptr(mdo_addr, TypeEntries::type_unknown);
3618
3619      if (TypeEntries::is_type_none(current_klass)) {
3620        __ jmpb(next);
3621
3622        __ bind(none);
3623        // first time here. Set profile type.
3624        __ movptr(mdo_addr, tmp);
3625      }
3626    } else {
3627      // There's a single possible klass at this profile point
3628      assert(exact_klass != NULL, "should be");
3629      if (TypeEntries::is_type_none(current_klass)) {
3630        __ mov_metadata(tmp, exact_klass->constant_encoding());
3631        __ xorptr(tmp, mdo_addr);
3632        __ testptr(tmp, TypeEntries::type_klass_mask);
3633#ifdef ASSERT
3634        __ jcc(Assembler::zero, next);
3635
3636        {
3637          Label ok;
3638          __ push(tmp);
3639          __ cmpptr(mdo_addr, 0);
3640          __ jcc(Assembler::equal, ok);
3641          __ cmpptr(mdo_addr, TypeEntries::null_seen);
3642          __ jcc(Assembler::equal, ok);
3643          // may have been set by another thread
3644          __ mov_metadata(tmp, exact_klass->constant_encoding());
3645          __ xorptr(tmp, mdo_addr);
3646          __ testptr(tmp, TypeEntries::type_mask);
3647          __ jcc(Assembler::zero, ok);
3648
3649          __ stop("unexpected profiling mismatch");
3650          __ bind(ok);
3651          __ pop(tmp);
3652        }
3653#else
3654        __ jccb(Assembler::zero, next);
3655#endif
3656        // first time here. Set profile type.
3657        __ movptr(mdo_addr, tmp);
3658      } else {
3659        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3660               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3661
3662        __ movptr(tmp, mdo_addr);
3663        __ testptr(tmp, TypeEntries::type_unknown);
3664        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3665
3666        __ orptr(mdo_addr, TypeEntries::type_unknown);
3667      }
3668    }
3669
3670    __ bind(next);
3671  }
3672}
3673
3674void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3675  Unimplemented();
3676}
3677
3678
3679void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3680  __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3681}
3682
3683
3684void LIR_Assembler::align_backward_branch_target() {
3685  __ align(BytesPerWord);
3686}
3687
3688
3689void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3690  if (left->is_single_cpu()) {
3691    __ negl(left->as_register());
3692    move_regs(left->as_register(), dest->as_register());
3693
3694  } else if (left->is_double_cpu()) {
3695    Register lo = left->as_register_lo();
3696#ifdef _LP64
3697    Register dst = dest->as_register_lo();
3698    __ movptr(dst, lo);
3699    __ negptr(dst);
3700#else
3701    Register hi = left->as_register_hi();
3702    __ lneg(hi, lo);
3703    if (dest->as_register_lo() == hi) {
3704      assert(dest->as_register_hi() != lo, "destroying register");
3705      move_regs(hi, dest->as_register_hi());
3706      move_regs(lo, dest->as_register_lo());
3707    } else {
3708      move_regs(lo, dest->as_register_lo());
3709      move_regs(hi, dest->as_register_hi());
3710    }
3711#endif // _LP64
3712
3713  } else if (dest->is_single_xmm()) {
3714    if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3715      __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3716    }
3717    if (UseAVX > 0) {
3718      __ vnegatess(dest->as_xmm_float_reg(), dest->as_xmm_float_reg(),
3719                   ExternalAddress((address)float_signflip_pool));
3720    } else {
3721      __ xorps(dest->as_xmm_float_reg(),
3722               ExternalAddress((address)float_signflip_pool));
3723    }
3724  } else if (dest->is_double_xmm()) {
3725    if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3726      __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3727    }
3728    if (UseAVX > 0) {
3729      __ vnegatesd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg(),
3730                   ExternalAddress((address)double_signflip_pool));
3731    } else {
3732      __ xorpd(dest->as_xmm_double_reg(),
3733               ExternalAddress((address)double_signflip_pool));
3734    }
3735  } else if (left->is_single_fpu() || left->is_double_fpu()) {
3736    assert(left->fpu() == 0, "arg must be on TOS");
3737    assert(dest->fpu() == 0, "dest must be TOS");
3738    __ fchs();
3739
3740  } else {
3741    ShouldNotReachHere();
3742  }
3743}
3744
3745
3746void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
3747  assert(addr->is_address() && dest->is_register(), "check");
3748  Register reg;
3749  reg = dest->as_pointer_register();
3750  __ lea(reg, as_Address(addr->as_address_ptr()));
3751}
3752
3753
3754
3755void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3756  assert(!tmp->is_valid(), "don't need temporary");
3757  __ call(RuntimeAddress(dest));
3758  if (info != NULL) {
3759    add_call_info_here(info);
3760  }
3761}
3762
3763
3764void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3765  assert(type == T_LONG, "only for volatile long fields");
3766
3767  if (info != NULL) {
3768    add_debug_info_for_null_check_here(info);
3769  }
3770
3771  if (src->is_double_xmm()) {
3772    if (dest->is_double_cpu()) {
3773#ifdef _LP64
3774      __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3775#else
3776      __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3777      __ psrlq(src->as_xmm_double_reg(), 32);
3778      __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3779#endif // _LP64
3780    } else if (dest->is_double_stack()) {
3781      __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3782    } else if (dest->is_address()) {
3783      __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3784    } else {
3785      ShouldNotReachHere();
3786    }
3787
3788  } else if (dest->is_double_xmm()) {
3789    if (src->is_double_stack()) {
3790      __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3791    } else if (src->is_address()) {
3792      __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3793    } else {
3794      ShouldNotReachHere();
3795    }
3796
3797  } else if (src->is_double_fpu()) {
3798    assert(src->fpu_regnrLo() == 0, "must be TOS");
3799    if (dest->is_double_stack()) {
3800      __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3801    } else if (dest->is_address()) {
3802      __ fistp_d(as_Address(dest->as_address_ptr()));
3803    } else {
3804      ShouldNotReachHere();
3805    }
3806
3807  } else if (dest->is_double_fpu()) {
3808    assert(dest->fpu_regnrLo() == 0, "must be TOS");
3809    if (src->is_double_stack()) {
3810      __ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3811    } else if (src->is_address()) {
3812      __ fild_d(as_Address(src->as_address_ptr()));
3813    } else {
3814      ShouldNotReachHere();
3815    }
3816  } else {
3817    ShouldNotReachHere();
3818  }
3819}
3820
3821#ifdef ASSERT
3822// emit run-time assertion
3823void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3824  assert(op->code() == lir_assert, "must be");
3825
3826  if (op->in_opr1()->is_valid()) {
3827    assert(op->in_opr2()->is_valid(), "both operands must be valid");
3828    comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3829  } else {
3830    assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3831    assert(op->condition() == lir_cond_always, "no other conditions allowed");
3832  }
3833
3834  Label ok;
3835  if (op->condition() != lir_cond_always) {
3836    Assembler::Condition acond = Assembler::zero;
3837    switch (op->condition()) {
3838      case lir_cond_equal:        acond = Assembler::equal;       break;
3839      case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
3840      case lir_cond_less:         acond = Assembler::less;        break;
3841      case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
3842      case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3843      case lir_cond_greater:      acond = Assembler::greater;     break;
3844      case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
3845      case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
3846      default:                    ShouldNotReachHere();
3847    }
3848    __ jcc(acond, ok);
3849  }
3850  if (op->halt()) {
3851    const char* str = __ code_string(op->msg());
3852    __ stop(str);
3853  } else {
3854    breakpoint();
3855  }
3856  __ bind(ok);
3857}
3858#endif
3859
3860void LIR_Assembler::membar() {
3861  // QQQ sparc TSO uses this,
3862  __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
3863}
3864
3865void LIR_Assembler::membar_acquire() {
3866  // No x86 machines currently require load fences
3867}
3868
3869void LIR_Assembler::membar_release() {
3870  // No x86 machines currently require store fences
3871}
3872
3873void LIR_Assembler::membar_loadload() {
3874  // no-op
3875  //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3876}
3877
3878void LIR_Assembler::membar_storestore() {
3879  // no-op
3880  //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3881}
3882
3883void LIR_Assembler::membar_loadstore() {
3884  // no-op
3885  //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3886}
3887
3888void LIR_Assembler::membar_storeload() {
3889  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3890}
3891
3892void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3893  assert(result_reg->is_register(), "check");
3894#ifdef _LP64
3895  // __ get_thread(result_reg->as_register_lo());
3896  __ mov(result_reg->as_register(), r15_thread);
3897#else
3898  __ get_thread(result_reg->as_register());
3899#endif // _LP64
3900}
3901
3902
3903void LIR_Assembler::peephole(LIR_List*) {
3904  // do nothing for now
3905}
3906
3907void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3908  assert(data == dest, "xchg/xadd uses only 2 operands");
3909
3910  if (data->type() == T_INT) {
3911    if (code == lir_xadd) {
3912      if (os::is_MP()) {
3913        __ lock();
3914      }
3915      __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3916    } else {
3917      __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3918    }
3919  } else if (data->is_oop()) {
3920    assert (code == lir_xchg, "xadd for oops");
3921    Register obj = data->as_register();
3922#ifdef _LP64
3923    if (UseCompressedOops) {
3924      __ encode_heap_oop(obj);
3925      __ xchgl(obj, as_Address(src->as_address_ptr()));
3926      __ decode_heap_oop(obj);
3927    } else {
3928      __ xchgptr(obj, as_Address(src->as_address_ptr()));
3929    }
3930#else
3931    __ xchgl(obj, as_Address(src->as_address_ptr()));
3932#endif
3933  } else if (data->type() == T_LONG) {
3934#ifdef _LP64
3935    assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3936    if (code == lir_xadd) {
3937      if (os::is_MP()) {
3938        __ lock();
3939      }
3940      __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3941    } else {
3942      __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3943    }
3944#else
3945    ShouldNotReachHere();
3946#endif
3947  } else {
3948    ShouldNotReachHere();
3949  }
3950}
3951
3952#undef __
3953