c1_LIRAssembler_x86.cpp revision 29:d5fc211aea19
1250003Sadrian/*
2250003Sadrian * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
3250003Sadrian * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4250003Sadrian *
5250003Sadrian * This code is free software; you can redistribute it and/or modify it
6250003Sadrian * under the terms of the GNU General Public License version 2 only, as
7250003Sadrian * published by the Free Software Foundation.
8250003Sadrian *
9250003Sadrian * This code is distributed in the hope that it will be useful, but WITHOUT
10250003Sadrian * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11250003Sadrian * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12250003Sadrian * version 2 for more details (a copy is included in the LICENSE file that
13250003Sadrian * accompanied this code).
14250003Sadrian *
15250003Sadrian * You should have received a copy of the GNU General Public License version
16250003Sadrian * 2 along with this work; if not, write to the Free Software Foundation,
17250003Sadrian * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18250003Sadrian *
19250003Sadrian * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20250003Sadrian * CA 95054 USA or visit www.sun.com if you need additional information or
21250003Sadrian * have any questions.
22250003Sadrian *
23250003Sadrian */
24250003Sadrian
25250003Sadrian# include "incls/_precompiled.incl"
26250003Sadrian# include "incls/_c1_LIRAssembler_x86.cpp.incl"
27250003Sadrian
28250003Sadrian
29250003Sadrian// These masks are used to provide 128-bit aligned bitmasks to the XMM
30250003Sadrian// instructions, to allow sign-masking or sign-bit flipping.  They allow
31250003Sadrian// fast versions of NegF/NegD and AbsF/AbsD.
32250003Sadrian
33250003Sadrian// Note: 'double' and 'long long' have 32-bits alignment on x86.
34250003Sadrianstatic jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
35250003Sadrian  // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
36250003Sadrian  // of 128-bits operands for SSE instructions.
37250003Sadrian  jlong *operand = (jlong*)(((long)adr)&((long)(~0xF)));
38250003Sadrian  // Store the value to a 128-bits operand.
39250003Sadrian  operand[0] = lo;
40250008Sadrian  operand[1] = hi;
41250008Sadrian  return operand;
42250008Sadrian}
43250008Sadrian
44250003Sadrian// Buffer for 128-bits masks used by SSE instructions.
45250003Sadrianstatic jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
46250003Sadrian
47250003Sadrian// Static initialization during VM startup.
48250003Sadrianstatic jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
49250003Sadrianstatic jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
50250003Sadrianstatic jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
51250003Sadrianstatic jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
52250003Sadrian
53250003Sadrian
54250003Sadrian
55250003SadrianNEEDS_CLEANUP // remove this definitions ?
56250003Sadrianconst Register IC_Klass    = rax;   // where the IC klass is cached
57250003Sadrianconst Register SYNC_header = rax;   // synchronization header
58250003Sadrianconst Register SHIFT_count = rcx;   // where count for shift operations must be
59250003Sadrian
60250003Sadrian#define __ _masm->
61250003Sadrian
62250003Sadrian
63250003Sadrianstatic void select_different_registers(Register preserve,
64250003Sadrian                                       Register extra,
65250003Sadrian                                       Register &tmp1,
66250003Sadrian                                       Register &tmp2) {
67250003Sadrian  if (tmp1 == preserve) {
68250003Sadrian    assert_different_registers(tmp1, tmp2, extra);
69250003Sadrian    tmp1 = extra;
70250003Sadrian  } else if (tmp2 == preserve) {
71250003Sadrian    assert_different_registers(tmp1, tmp2, extra);
72250003Sadrian    tmp2 = extra;
73250003Sadrian  }
74250003Sadrian  assert_different_registers(preserve, tmp1, tmp2);
75250003Sadrian}
76250003Sadrian
77250003Sadrian
78250003Sadrian
79250003Sadrianstatic void select_different_registers(Register preserve,
80250003Sadrian                                       Register extra,
81250003Sadrian                                       Register &tmp1,
82250003Sadrian                                       Register &tmp2,
83250003Sadrian                                       Register &tmp3) {
84250003Sadrian  if (tmp1 == preserve) {
85250003Sadrian    assert_different_registers(tmp1, tmp2, tmp3, extra);
86250003Sadrian    tmp1 = extra;
87250003Sadrian  } else if (tmp2 == preserve) {
88250003Sadrian    assert_different_registers(tmp1, tmp2, tmp3, extra);
89250003Sadrian    tmp2 = extra;
90250003Sadrian  } else if (tmp3 == preserve) {
91250003Sadrian    assert_different_registers(tmp1, tmp2, tmp3, extra);
92250003Sadrian    tmp3 = extra;
93250003Sadrian  }
94250003Sadrian  assert_different_registers(preserve, tmp1, tmp2, tmp3);
95250003Sadrian}
96250003Sadrian
97250003Sadrian
98250003Sadrian
99250003Sadrianbool LIR_Assembler::is_small_constant(LIR_Opr opr) {
100250003Sadrian  if (opr->is_constant()) {
101250003Sadrian    LIR_Const* constant = opr->as_constant_ptr();
102250003Sadrian    switch (constant->type()) {
103250003Sadrian      case T_INT: {
104250003Sadrian        return true;
105250003Sadrian      }
106250003Sadrian
107250003Sadrian      default:
108250003Sadrian        return false;
109250003Sadrian    }
110250003Sadrian  }
111250003Sadrian  return false;
112250003Sadrian}
113250003Sadrian
114250003Sadrian
115250003SadrianLIR_Opr LIR_Assembler::receiverOpr() {
116250003Sadrian  return FrameMap::rcx_oop_opr;
117250003Sadrian}
118250003Sadrian
119250003SadrianLIR_Opr LIR_Assembler::incomingReceiverOpr() {
120250003Sadrian  return receiverOpr();
121250003Sadrian}
122250003Sadrian
123250003SadrianLIR_Opr LIR_Assembler::osrBufferPointer() {
124250003Sadrian  return FrameMap::rcx_opr;
125250003Sadrian}
126250003Sadrian
127250003Sadrian//--------------fpu register translations-----------------------
128250008Sadrian
129250003Sadrian
130250003Sadrianaddress LIR_Assembler::float_constant(float f) {
131250003Sadrian  address const_addr = __ float_constant(f);
132250003Sadrian  if (const_addr == NULL) {
133250003Sadrian    bailout("const section overflow");
134250003Sadrian    return __ code()->consts()->start();
135250008Sadrian  } else {
136250003Sadrian    return const_addr;
137250003Sadrian  }
138250003Sadrian}
139250003Sadrian
140250003Sadrian
141250003Sadrianaddress LIR_Assembler::double_constant(double d) {
142250003Sadrian  address const_addr = __ double_constant(d);
143250003Sadrian  if (const_addr == NULL) {
144250003Sadrian    bailout("const section overflow");
145250003Sadrian    return __ code()->consts()->start();
146250003Sadrian  } else {
147250003Sadrian    return const_addr;
148250003Sadrian  }
149250003Sadrian}
150250003Sadrian
151250003Sadrian
152250003Sadrianvoid LIR_Assembler::set_24bit_FPU() {
153250003Sadrian  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
154250003Sadrian}
155250003Sadrian
156250003Sadrianvoid LIR_Assembler::reset_FPU() {
157250003Sadrian  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
158250003Sadrian}
159250003Sadrian
160250003Sadrianvoid LIR_Assembler::fpop() {
161250003Sadrian  __ fpop();
162250003Sadrian}
163250003Sadrian
164250003Sadrianvoid LIR_Assembler::fxch(int i) {
165250003Sadrian  __ fxch(i);
166250003Sadrian}
167250003Sadrian
168250003Sadrianvoid LIR_Assembler::fld(int i) {
169250003Sadrian  __ fld_s(i);
170250003Sadrian}
171250003Sadrian
172250003Sadrianvoid LIR_Assembler::ffree(int i) {
173250003Sadrian  __ ffree(i);
174250003Sadrian}
175250003Sadrian
176250003Sadrianvoid LIR_Assembler::breakpoint() {
177250003Sadrian  __ int3();
178250003Sadrian}
179250003Sadrian
180250003Sadrianvoid LIR_Assembler::push(LIR_Opr opr) {
181250003Sadrian  if (opr->is_single_cpu()) {
182250003Sadrian    __ push_reg(opr->as_register());
183250003Sadrian  } else if (opr->is_double_cpu()) {
184250008Sadrian    __ push_reg(opr->as_register_hi());
185250003Sadrian    __ push_reg(opr->as_register_lo());
186250003Sadrian  } else if (opr->is_stack()) {
187250003Sadrian    __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
188250003Sadrian  } else if (opr->is_constant()) {
189250003Sadrian    LIR_Const* const_opr = opr->as_constant_ptr();
190250003Sadrian    if (const_opr->type() == T_OBJECT) {
191250003Sadrian      __ push_oop(const_opr->as_jobject());
192250003Sadrian    } else if (const_opr->type() == T_INT) {
193250003Sadrian      __ push_jint(const_opr->as_jint());
194250003Sadrian    } else {
195250003Sadrian      ShouldNotReachHere();
196250003Sadrian    }
197250003Sadrian
198250003Sadrian  } else {
199250003Sadrian    ShouldNotReachHere();
200250003Sadrian  }
201250003Sadrian}
202250003Sadrian
203250003Sadrianvoid LIR_Assembler::pop(LIR_Opr opr) {
204250003Sadrian  if (opr->is_single_cpu()) {
205250003Sadrian    __ pop(opr->as_register());
206250003Sadrian  } else {
207250008Sadrian    ShouldNotReachHere();
208250003Sadrian  }
209250003Sadrian}
210250003Sadrian
211250003Sadrian//-------------------------------------------
212250003SadrianAddress LIR_Assembler::as_Address(LIR_Address* addr) {
213250003Sadrian  if (addr->base()->is_illegal()) {
214250003Sadrian    assert(addr->index()->is_illegal(), "must be illegal too");
215250003Sadrian    //return Address(addr->disp(), relocInfo::none);
216250003Sadrian    // hack for now since this should really return an AddressLiteral
217250003Sadrian    // which will have to await 64bit c1 changes.
218250003Sadrian    return Address(noreg, addr->disp());
219250003Sadrian  }
220250003Sadrian
221250003Sadrian  Register base = addr->base()->as_register();
222250008Sadrian
223250003Sadrian  if (addr->index()->is_illegal()) {
224250003Sadrian    return Address( base, addr->disp());
225250003Sadrian  } else if (addr->index()->is_single_cpu()) {
226250003Sadrian    Register index = addr->index()->as_register();
227250003Sadrian    return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
228250003Sadrian  } else if (addr->index()->is_constant()) {
229250003Sadrian    int addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
230250003Sadrian
231250003Sadrian    return Address(base, addr_offset);
232250003Sadrian  } else {
233250008Sadrian    Unimplemented();
234250003Sadrian    return Address();
235250003Sadrian  }
236250003Sadrian}
237250003Sadrian
238250003Sadrian
239250003SadrianAddress LIR_Assembler::as_Address_hi(LIR_Address* addr) {
240250003Sadrian  Address base = as_Address(addr);
241250003Sadrian  return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
242250003Sadrian}
243250003Sadrian
244250003Sadrian
245250003SadrianAddress LIR_Assembler::as_Address_lo(LIR_Address* addr) {
246250003Sadrian  return as_Address(addr);
247250003Sadrian}
248250003Sadrian
249250003Sadrian
250250003Sadrianvoid LIR_Assembler::osr_entry() {
251250003Sadrian  offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
252250003Sadrian  BlockBegin* osr_entry = compilation()->hir()->osr_entry();
253250003Sadrian  ValueStack* entry_state = osr_entry->state();
254250008Sadrian  int number_of_locks = entry_state->locks_size();
255250003Sadrian
256250003Sadrian  // we jump here if osr happens with the interpreter
257250003Sadrian  // state set up to continue at the beginning of the
258250008Sadrian  // loop that triggered osr - in particular, we have
259250003Sadrian  // the following registers setup:
260250008Sadrian  //
261250003Sadrian  // rcx: osr buffer
262250003Sadrian  //
263250003Sadrian
264250003Sadrian  // build frame
265250003Sadrian  ciMethod* m = compilation()->method();
266250003Sadrian  __ build_frame(initial_frame_size_in_bytes());
267250003Sadrian
268250003Sadrian  // OSR buffer is
269250003Sadrian  //
270250003Sadrian  // locals[nlocals-1..0]
271250003Sadrian  // monitors[0..number_of_locks]
272250003Sadrian  //
273250003Sadrian  // locals is a direct copy of the interpreter frame so in the osr buffer
274250003Sadrian  // so first slot in the local array is the last local from the interpreter
275250003Sadrian  // and last slot is local[0] (receiver) from the interpreter
276250003Sadrian  //
277250003Sadrian  // Similarly with locks. The first lock slot in the osr buffer is the nth lock
278250003Sadrian  // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
279250003Sadrian  // in the interpreter frame (the method lock if a sync method)
280250003Sadrian
281250003Sadrian  // Initialize monitors in the compiled activation.
282250003Sadrian  //   rcx: pointer to osr buffer
283250003Sadrian  //
284250003Sadrian  // All other registers are dead at this point and the locals will be
285250003Sadrian  // copied into place by code emitted in the IR.
286250003Sadrian
287250003Sadrian  Register OSR_buf = osrBufferPointer()->as_register();
288250003Sadrian  { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
289250008Sadrian    int monitor_offset = BytesPerWord * method()->max_locals() +
290250003Sadrian      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
291250003Sadrian    for (int i = 0; i < number_of_locks; i++) {
292250003Sadrian      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
293250003Sadrian#ifdef ASSERT
294250003Sadrian      // verify the interpreter's monitor has a non-null object
295250003Sadrian      {
296250003Sadrian        Label L;
297250003Sadrian        __ cmpl(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
298250003Sadrian        __ jcc(Assembler::notZero, L);
299250003Sadrian        __ stop("locked object is NULL");
300250003Sadrian        __ bind(L);
301250003Sadrian      }
302250003Sadrian#endif
303250008Sadrian      __ movl(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
304250003Sadrian      __ movl(frame_map()->address_for_monitor_lock(i), rbx);
305250003Sadrian      __ movl(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
306250008Sadrian      __ movl(frame_map()->address_for_monitor_object(i), rbx);
307250008Sadrian    }
308250008Sadrian  }
309250003Sadrian}
310250003Sadrian
311250003Sadrian
312250003Sadrian// inline cache check; done before the frame is built.
313250003Sadrianint LIR_Assembler::check_icache() {
314250003Sadrian  Register receiver = FrameMap::receiver_opr->as_register();
315250003Sadrian  Register ic_klass = IC_Klass;
316250003Sadrian
317250003Sadrian  if (!VerifyOops) {
318250003Sadrian    // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
319250003Sadrian    while ((__ offset() + 9) % CodeEntryAlignment != 0) {
320250003Sadrian      __ nop();
321250003Sadrian    }
322250003Sadrian  }
323250003Sadrian  int offset = __ offset();
324250003Sadrian  __ inline_cache_check(receiver, IC_Klass);
325250003Sadrian  assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct");
326250003Sadrian  if (VerifyOops) {
327250003Sadrian    // force alignment after the cache check.
328250003Sadrian    // It's been verified to be aligned if !VerifyOops
329250003Sadrian    __ align(CodeEntryAlignment);
330250003Sadrian  }
331250003Sadrian  return offset;
332250003Sadrian}
333250003Sadrian
334250003Sadrian
335250008Sadrianvoid LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
336250008Sadrian  jobject o = NULL;
337250003Sadrian  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
338250008Sadrian  __ movoop(reg, o);
339250008Sadrian  patching_epilog(patch, lir_patch_normal, reg, info);
340250003Sadrian}
341250003Sadrian
342250003Sadrian
343250008Sadrianvoid LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
344250003Sadrian  if (exception->is_valid()) {
345250008Sadrian    // preserve exception
346250003Sadrian    // note: the monitor_exit runtime call is a leaf routine
347250003Sadrian    //       and cannot block => no GC can happen
348250003Sadrian    // The slow case (MonitorAccessStub) uses the first two stack slots
349250003Sadrian    // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
350250003Sadrian    __ movl (Address(rsp, 2*wordSize), exception);
351250003Sadrian  }
352250003Sadrian
353250003Sadrian  Register obj_reg  = obj_opr->as_register();
354250003Sadrian  Register lock_reg = lock_opr->as_register();
355250003Sadrian
356250003Sadrian  // setup registers (lock_reg must be rax, for lock_object)
357250003Sadrian  assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here");
358250003Sadrian  Register hdr = lock_reg;
359250003Sadrian  assert(new_hdr == SYNC_header, "wrong register");
360250003Sadrian  lock_reg = new_hdr;
361250003Sadrian  // compute pointer to BasicLock
362250003Sadrian  Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
363250003Sadrian  __ leal(lock_reg, lock_addr);
364250003Sadrian  // unlock object
365250003Sadrian  MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
366250008Sadrian  // _slow_case_stubs->append(slow_case);
367250003Sadrian  // temporary fix: must be created after exceptionhandler, therefore as call stub
368250003Sadrian  _slow_case_stubs->append(slow_case);
369250003Sadrian  if (UseFastLocking) {
370250003Sadrian    // try inlined fast unlocking first, revert to slow locking if it fails
371250003Sadrian    // note: lock_reg points to the displaced header since the displaced header offset is 0!
372250003Sadrian    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
373250003Sadrian    __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
374250003Sadrian  } else {
375250003Sadrian    // always do slow unlocking
376250003Sadrian    // note: the slow unlocking code could be inlined here, however if we use
377250003Sadrian    //       slow unlocking, speed doesn't matter anyway and this solution is
378250003Sadrian    //       simpler and requires less duplicated code - additionally, the
379250008Sadrian    //       slow unlocking code is the same in either case which simplifies
380250003Sadrian    //       debugging
381250003Sadrian    __ jmp(*slow_case->entry());
382250003Sadrian  }
383250003Sadrian  // done
384250003Sadrian  __ bind(*slow_case->continuation());
385250003Sadrian
386250003Sadrian  if (exception->is_valid()) {
387250008Sadrian    // restore exception
388250003Sadrian    __ movl (exception, Address(rsp, 2 * wordSize));
389250003Sadrian  }
390250003Sadrian}
391250003Sadrian
392250003Sadrian// This specifies the rsp decrement needed to build the frame
393250003Sadrianint LIR_Assembler::initial_frame_size_in_bytes() {
394250008Sadrian  // if rounding, must let FrameMap know!
395250003Sadrian  return (frame_map()->framesize() - 2)  * BytesPerWord; // subtract two words to account for return address and link
396250003Sadrian}
397250003Sadrian
398250003Sadrian
399250003Sadrianvoid LIR_Assembler::emit_exception_handler() {
400250003Sadrian  // if the last instruction is a call (typically to do a throw which
401250003Sadrian  // is coming at the end after block reordering) the return address
402250003Sadrian  // must still point into the code area in order to avoid assertion
403250003Sadrian  // failures when searching for the corresponding bci => add a nop
404250003Sadrian  // (was bug 5/14/1999 - gri)
405250003Sadrian
406250003Sadrian  __ nop();
407250003Sadrian
408250003Sadrian  // generate code for exception handler
409250003Sadrian  address handler_base = __ start_a_stub(exception_handler_size);
410250003Sadrian  if (handler_base == NULL) {
411250003Sadrian    // not enough space left for the handler
412250003Sadrian    bailout("exception handler overflow");
413250003Sadrian    return;
414250003Sadrian  }
415250003Sadrian#ifdef ASSERT
416250003Sadrian  int offset = code_offset();
417250003Sadrian#endif // ASSERT
418250003Sadrian
419250008Sadrian  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
420250003Sadrian
421250003Sadrian  // if the method does not have an exception handler, then there is
422250003Sadrian  // no reason to search for one
423250003Sadrian  if (compilation()->has_exception_handlers() || JvmtiExport::can_post_exceptions()) {
424250003Sadrian    // the exception oop and pc are in rax, and rdx
425250003Sadrian    // no other registers need to be preserved, so invalidate them
426250003Sadrian    __ invalidate_registers(false, true, true, false, true, true);
427250003Sadrian
428250003Sadrian    // check that there is really an exception
429250003Sadrian    __ verify_not_null_oop(rax);
430250003Sadrian
431250003Sadrian    // search an exception handler (rax: exception oop, rdx: throwing pc)
432250003Sadrian    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
433250003Sadrian
434250003Sadrian    // if the call returns here, then the exception handler for particular
435250003Sadrian    // exception doesn't exist -> unwind activation and forward exception to caller
436250003Sadrian  }
437250003Sadrian
438250003Sadrian  // the exception oop is in rax,
439250003Sadrian  // no other registers need to be preserved, so invalidate them
440250003Sadrian  __ invalidate_registers(false, true, true, true, true, true);
441250008Sadrian
442250003Sadrian  // check that there is really an exception
443250003Sadrian  __ verify_not_null_oop(rax);
444250003Sadrian
445250003Sadrian  // unlock the receiver/klass if necessary
446250003Sadrian  // rax,: exception
447250003Sadrian  ciMethod* method = compilation()->method();
448250003Sadrian  if (method->is_synchronized() && GenerateSynchronizationCode) {
449250003Sadrian    monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
450250003Sadrian  }
451250003Sadrian
452250003Sadrian  // unwind activation and forward exception to caller
453250003Sadrian  // rax,: exception
454250003Sadrian  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
455250003Sadrian
456250008Sadrian  assert(code_offset() - offset <= exception_handler_size, "overflow");
457250003Sadrian
458250003Sadrian  __ end_a_stub();
459250003Sadrian}
460250003Sadrian
461250003Sadrianvoid LIR_Assembler::emit_deopt_handler() {
462250003Sadrian  // if the last instruction is a call (typically to do a throw which
463250003Sadrian  // is coming at the end after block reordering) the return address
464250003Sadrian  // must still point into the code area in order to avoid assertion
465250003Sadrian  // failures when searching for the corresponding bci => add a nop
466250003Sadrian  // (was bug 5/14/1999 - gri)
467250003Sadrian
468250003Sadrian  __ nop();
469250003Sadrian
470250003Sadrian  // generate code for exception handler
471250003Sadrian  address handler_base = __ start_a_stub(deopt_handler_size);
472250003Sadrian  if (handler_base == NULL) {
473250003Sadrian    // not enough space left for the handler
474250003Sadrian    bailout("deopt handler overflow");
475250003Sadrian    return;
476250003Sadrian  }
477250003Sadrian#ifdef ASSERT
478250003Sadrian  int offset = code_offset();
479250003Sadrian#endif // ASSERT
480250003Sadrian
481250003Sadrian  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
482250003Sadrian
483250003Sadrian  InternalAddress here(__ pc());
484250003Sadrian  __ pushptr(here.addr());
485250003Sadrian
486250003Sadrian  __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
487250003Sadrian
488250003Sadrian  assert(code_offset() - offset <= deopt_handler_size, "overflow");
489250003Sadrian
490250003Sadrian  __ end_a_stub();
491250003Sadrian
492250003Sadrian}
493250003Sadrian
494250003Sadrian
495250003Sadrian// This is the fast version of java.lang.String.compare; it has not
496250003Sadrian// OSR-entry and therefore, we generate a slow version for OSR's
497250003Sadrianvoid LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
498250003Sadrian  __ movl (rbx, rcx); // receiver is in rcx
499250003Sadrian  __ movl (rax, arg1->as_register());
500250003Sadrian
501250003Sadrian  // Get addresses of first characters from both Strings
502250008Sadrian  __ movl (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
503250003Sadrian  __ movl (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
504250003Sadrian  __ leal (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
505250003Sadrian
506250008Sadrian
507250008Sadrian  // rbx, may be NULL
508250003Sadrian  add_debug_info_for_null_check_here(info);
509250003Sadrian  __ movl (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
510250003Sadrian  __ movl (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
511250003Sadrian  __ leal (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
512250003Sadrian
513250003Sadrian  // compute minimum length (in rax) and difference of lengths (on top of stack)
514250003Sadrian  if (VM_Version::supports_cmov()) {
515250003Sadrian    __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
516250003Sadrian    __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
517250003Sadrian    __ movl (rcx, rbx);
518250003Sadrian    __ subl (rbx, rax); // subtract lengths
519250003Sadrian    __ pushl(rbx);      // result
520250003Sadrian    __ cmovl(Assembler::lessEqual, rax, rcx);
521250003Sadrian  } else {
522250003Sadrian    Label L;
523250003Sadrian    __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
524250003Sadrian    __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes()));
525250008Sadrian    __ movl (rax, rbx);
526250003Sadrian    __ subl (rbx, rcx);
527250003Sadrian    __ pushl(rbx);
528250003Sadrian    __ jcc  (Assembler::lessEqual, L);
529250003Sadrian    __ movl (rax, rcx);
530250008Sadrian    __ bind (L);
531250003Sadrian  }
532250003Sadrian  // is minimum length 0?
533250003Sadrian  Label noLoop, haveResult;
534250003Sadrian  __ testl (rax, rax);
535250003Sadrian  __ jcc (Assembler::zero, noLoop);
536250003Sadrian
537250003Sadrian  // compare first characters
538250003Sadrian  __ load_unsigned_word(rcx, Address(rdi, 0));
539250003Sadrian  __ load_unsigned_word(rbx, Address(rsi, 0));
540250003Sadrian  __ subl(rcx, rbx);
541250003Sadrian  __ jcc(Assembler::notZero, haveResult);
542250003Sadrian  // starting loop
543250008Sadrian  __ decrement(rax); // we already tested index: skip one
544250008Sadrian  __ jcc(Assembler::zero, noLoop);
545250008Sadrian
546250008Sadrian  // set rsi.edi to the end of the arrays (arrays have same length)
547250008Sadrian  // negate the index
548250003Sadrian
549250003Sadrian  __ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
550250003Sadrian  __ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
551250003Sadrian  __ negl(rax);
552250003Sadrian
553250003Sadrian  // compare the strings in a loop
554250003Sadrian
555250003Sadrian  Label loop;
556250003Sadrian  __ align(wordSize);
557250003Sadrian  __ bind(loop);
558250003Sadrian  __ load_unsigned_word(rcx, Address(rdi, rax, Address::times_2, 0));
559250003Sadrian  __ load_unsigned_word(rbx, Address(rsi, rax, Address::times_2, 0));
560250003Sadrian  __ subl(rcx, rbx);
561250003Sadrian  __ jcc(Assembler::notZero, haveResult);
562250003Sadrian  __ increment(rax);
563250003Sadrian  __ jcc(Assembler::notZero, loop);
564250003Sadrian
565250003Sadrian  // strings are equal up to min length
566250003Sadrian
567250003Sadrian  __ bind(noLoop);
568250003Sadrian  __ popl(rax);
569250003Sadrian  return_op(LIR_OprFact::illegalOpr);
570250003Sadrian
571250003Sadrian  __ bind(haveResult);
572250003Sadrian  // leave instruction is going to discard the TOS value
573250003Sadrian  __ movl (rax, rcx); // result of call is in rax,
574250003Sadrian}
575250003Sadrian
576250003Sadrian
577250003Sadrianvoid LIR_Assembler::return_op(LIR_Opr result) {
578250003Sadrian  assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
579250003Sadrian  if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
580250003Sadrian    assert(result->fpu() == 0, "result must already be on TOS");
581291437Sadrian  }
582250003Sadrian
583250003Sadrian  // Pop the stack before the safepoint code
584250003Sadrian  __ leave();
585250003Sadrian
586250003Sadrian  bool result_is_oop = result->is_valid() ? result->is_oop() : false;
587250003Sadrian
588250003Sadrian  // Note: we do not need to round double result; float result has the right precision
589250003Sadrian  // the poll sets the condition code, but no data registers
590250003Sadrian  AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
591250003Sadrian                              relocInfo::poll_return_type);
592250003Sadrian  __ test32(rax, polling_page);
593250003Sadrian
594250003Sadrian  __ ret(0);
595250003Sadrian}
596250003Sadrian
597250003Sadrian
598250003Sadrianint LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
599250003Sadrian  AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
600250003Sadrian                              relocInfo::poll_type);
601250008Sadrian
602  if (info != NULL) {
603    add_debug_info_for_branch(info);
604  } else {
605    ShouldNotReachHere();
606  }
607
608  int offset = __ offset();
609  __ test32(rax, polling_page);
610  return offset;
611}
612
613
614void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
615  if (from_reg != to_reg) __ movl(to_reg, from_reg);
616}
617
618void LIR_Assembler::swap_reg(Register a, Register b) {
619  __ xchgl(a, b);
620}
621
622
623void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
624  assert(src->is_constant(), "should not call otherwise");
625  assert(dest->is_register(), "should not call otherwise");
626  LIR_Const* c = src->as_constant_ptr();
627
628  switch (c->type()) {
629    case T_INT: {
630      assert(patch_code == lir_patch_none, "no patching handled here");
631      __ movl(dest->as_register(), c->as_jint());
632      break;
633    }
634
635    case T_LONG: {
636      assert(patch_code == lir_patch_none, "no patching handled here");
637      __ movl(dest->as_register_lo(), c->as_jint_lo());
638      __ movl(dest->as_register_hi(), c->as_jint_hi());
639      break;
640    }
641
642    case T_OBJECT: {
643      if (patch_code != lir_patch_none) {
644        jobject2reg_with_patching(dest->as_register(), info);
645      } else {
646        __ movoop(dest->as_register(), c->as_jobject());
647      }
648      break;
649    }
650
651    case T_FLOAT: {
652      if (dest->is_single_xmm()) {
653        if (c->is_zero_float()) {
654          __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
655        } else {
656          __ movflt(dest->as_xmm_float_reg(),
657                   InternalAddress(float_constant(c->as_jfloat())));
658        }
659      } else {
660        assert(dest->is_single_fpu(), "must be");
661        assert(dest->fpu_regnr() == 0, "dest must be TOS");
662        if (c->is_zero_float()) {
663          __ fldz();
664        } else if (c->is_one_float()) {
665          __ fld1();
666        } else {
667          __ fld_s (InternalAddress(float_constant(c->as_jfloat())));
668        }
669      }
670      break;
671    }
672
673    case T_DOUBLE: {
674      if (dest->is_double_xmm()) {
675        if (c->is_zero_double()) {
676          __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
677        } else {
678          __ movdbl(dest->as_xmm_double_reg(),
679                    InternalAddress(double_constant(c->as_jdouble())));
680        }
681      } else {
682        assert(dest->is_double_fpu(), "must be");
683        assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
684        if (c->is_zero_double()) {
685          __ fldz();
686        } else if (c->is_one_double()) {
687          __ fld1();
688        } else {
689          __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
690        }
691      }
692      break;
693    }
694
695    default:
696      ShouldNotReachHere();
697  }
698}
699
700void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
701  assert(src->is_constant(), "should not call otherwise");
702  assert(dest->is_stack(), "should not call otherwise");
703  LIR_Const* c = src->as_constant_ptr();
704
705  switch (c->type()) {
706    case T_INT:  // fall through
707    case T_FLOAT:
708      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
709      break;
710
711    case T_OBJECT:
712      __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
713      break;
714
715    case T_LONG:  // fall through
716    case T_DOUBLE:
717      __ movl(frame_map()->address_for_slot(dest->double_stack_ix(),
718                                            lo_word_offset_in_bytes), c->as_jint_lo_bits());
719      __ movl(frame_map()->address_for_slot(dest->double_stack_ix(),
720                                            hi_word_offset_in_bytes), c->as_jint_hi_bits());
721      break;
722
723    default:
724      ShouldNotReachHere();
725  }
726}
727
728void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
729  assert(src->is_constant(), "should not call otherwise");
730  assert(dest->is_address(), "should not call otherwise");
731  LIR_Const* c = src->as_constant_ptr();
732  LIR_Address* addr = dest->as_address_ptr();
733
734  if (info != NULL) add_debug_info_for_null_check_here(info);
735  switch (type) {
736    case T_INT:    // fall through
737    case T_FLOAT:
738      __ movl(as_Address(addr), c->as_jint_bits());
739      break;
740
741    case T_OBJECT:  // fall through
742    case T_ARRAY:
743      if (c->as_jobject() == NULL) {
744        __ movl(as_Address(addr), NULL_WORD);
745      } else {
746        __ movoop(as_Address(addr), c->as_jobject());
747      }
748      break;
749
750    case T_LONG:    // fall through
751    case T_DOUBLE:
752      __ movl(as_Address_hi(addr), c->as_jint_hi_bits());
753      __ movl(as_Address_lo(addr), c->as_jint_lo_bits());
754      break;
755
756    case T_BOOLEAN: // fall through
757    case T_BYTE:
758      __ movb(as_Address(addr), c->as_jint() & 0xFF);
759      break;
760
761    case T_CHAR:    // fall through
762    case T_SHORT:
763      __ movw(as_Address(addr), c->as_jint() & 0xFFFF);
764      break;
765
766    default:
767      ShouldNotReachHere();
768  };
769}
770
771
772void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
773  assert(src->is_register(), "should not call otherwise");
774  assert(dest->is_register(), "should not call otherwise");
775
776  // move between cpu-registers
777  if (dest->is_single_cpu()) {
778    assert(src->is_single_cpu(), "must match");
779    if (src->type() == T_OBJECT) {
780      __ verify_oop(src->as_register());
781    }
782    move_regs(src->as_register(), dest->as_register());
783
784  } else if (dest->is_double_cpu()) {
785    assert(src->is_double_cpu(), "must match");
786    Register f_lo = src->as_register_lo();
787    Register f_hi = src->as_register_hi();
788    Register t_lo = dest->as_register_lo();
789    Register t_hi = dest->as_register_hi();
790    assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
791
792    if (f_lo == t_hi && f_hi == t_lo) {
793      swap_reg(f_lo, f_hi);
794    } else if (f_hi == t_lo) {
795      assert(f_lo != t_hi, "overwriting register");
796      move_regs(f_hi, t_hi);
797      move_regs(f_lo, t_lo);
798    } else {
799      assert(f_hi != t_lo, "overwriting register");
800      move_regs(f_lo, t_lo);
801      move_regs(f_hi, t_hi);
802    }
803
804    // special moves from fpu-register to xmm-register
805    // necessary for method results
806  } else if (src->is_single_xmm() && !dest->is_single_xmm()) {
807    __ movflt(Address(rsp, 0), src->as_xmm_float_reg());
808    __ fld_s(Address(rsp, 0));
809  } else if (src->is_double_xmm() && !dest->is_double_xmm()) {
810    __ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
811    __ fld_d(Address(rsp, 0));
812  } else if (dest->is_single_xmm() && !src->is_single_xmm()) {
813    __ fstp_s(Address(rsp, 0));
814    __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
815  } else if (dest->is_double_xmm() && !src->is_double_xmm()) {
816    __ fstp_d(Address(rsp, 0));
817    __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
818
819    // move between xmm-registers
820  } else if (dest->is_single_xmm()) {
821    assert(src->is_single_xmm(), "must match");
822    __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
823  } else if (dest->is_double_xmm()) {
824    assert(src->is_double_xmm(), "must match");
825    __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
826
827    // move between fpu-registers (no instruction necessary because of fpu-stack)
828  } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
829    assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
830    assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
831  } else {
832    ShouldNotReachHere();
833  }
834}
835
836void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
837  assert(src->is_register(), "should not call otherwise");
838  assert(dest->is_stack(), "should not call otherwise");
839
840  if (src->is_single_cpu()) {
841    Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
842    if (type == T_OBJECT || type == T_ARRAY) {
843      __ verify_oop(src->as_register());
844    }
845    __ movl (dst, src->as_register());
846
847  } else if (src->is_double_cpu()) {
848    Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
849    Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
850    __ movl (dstLO, src->as_register_lo());
851    __ movl (dstHI, src->as_register_hi());
852
853  } else if (src->is_single_xmm()) {
854    Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
855    __ movflt(dst_addr, src->as_xmm_float_reg());
856
857  } else if (src->is_double_xmm()) {
858    Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
859    __ movdbl(dst_addr, src->as_xmm_double_reg());
860
861  } else if (src->is_single_fpu()) {
862    assert(src->fpu_regnr() == 0, "argument must be on TOS");
863    Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
864    if (pop_fpu_stack)     __ fstp_s (dst_addr);
865    else                   __ fst_s  (dst_addr);
866
867  } else if (src->is_double_fpu()) {
868    assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
869    Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
870    if (pop_fpu_stack)     __ fstp_d (dst_addr);
871    else                   __ fst_d  (dst_addr);
872
873  } else {
874    ShouldNotReachHere();
875  }
876}
877
878
879void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) {
880  LIR_Address* to_addr = dest->as_address_ptr();
881  PatchingStub* patch = NULL;
882
883  if (type == T_ARRAY || type == T_OBJECT) {
884    __ verify_oop(src->as_register());
885  }
886  if (patch_code != lir_patch_none) {
887    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
888  }
889  if (info != NULL) {
890    add_debug_info_for_null_check_here(info);
891  }
892
893  switch (type) {
894    case T_FLOAT: {
895      if (src->is_single_xmm()) {
896        __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
897      } else {
898        assert(src->is_single_fpu(), "must be");
899        assert(src->fpu_regnr() == 0, "argument must be on TOS");
900        if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
901        else                    __ fst_s (as_Address(to_addr));
902      }
903      break;
904    }
905
906    case T_DOUBLE: {
907      if (src->is_double_xmm()) {
908        __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
909      } else {
910        assert(src->is_double_fpu(), "must be");
911        assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
912        if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
913        else                    __ fst_d (as_Address(to_addr));
914      }
915      break;
916    }
917
918    case T_ADDRESS: // fall through
919    case T_ARRAY:   // fall through
920    case T_OBJECT:  // fall through
921    case T_INT:
922      __ movl(as_Address(to_addr), src->as_register());
923      break;
924
925    case T_LONG: {
926      Register from_lo = src->as_register_lo();
927      Register from_hi = src->as_register_hi();
928      Register base = to_addr->base()->as_register();
929      Register index = noreg;
930      if (to_addr->index()->is_register()) {
931        index = to_addr->index()->as_register();
932      }
933      if (base == from_lo || index == from_lo) {
934        assert(base != from_hi, "can't be");
935        assert(index == noreg || (index != base && index != from_hi), "can't handle this");
936        __ movl(as_Address_hi(to_addr), from_hi);
937        if (patch != NULL) {
938          patching_epilog(patch, lir_patch_high, base, info);
939          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
940          patch_code = lir_patch_low;
941        }
942        __ movl(as_Address_lo(to_addr), from_lo);
943      } else {
944        assert(index == noreg || (index != base && index != from_lo), "can't handle this");
945        __ movl(as_Address_lo(to_addr), from_lo);
946        if (patch != NULL) {
947          patching_epilog(patch, lir_patch_low, base, info);
948          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
949          patch_code = lir_patch_high;
950        }
951        __ movl(as_Address_hi(to_addr), from_hi);
952      }
953      break;
954    }
955
956    case T_BYTE:    // fall through
957    case T_BOOLEAN: {
958      Register src_reg = src->as_register();
959      Address dst_addr = as_Address(to_addr);
960      assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
961      __ movb(dst_addr, src_reg);
962      break;
963    }
964
965    case T_CHAR:    // fall through
966    case T_SHORT:
967      __ movw(as_Address(to_addr), src->as_register());
968      break;
969
970    default:
971      ShouldNotReachHere();
972  }
973
974  if (patch_code != lir_patch_none) {
975    patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
976  }
977}
978
979
980void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
981  assert(src->is_stack(), "should not call otherwise");
982  assert(dest->is_register(), "should not call otherwise");
983
984  if (dest->is_single_cpu()) {
985    __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
986    if (type == T_ARRAY || type == T_OBJECT) {
987      __ verify_oop(dest->as_register());
988    }
989
990  } else if (dest->is_double_cpu()) {
991    Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
992    Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
993    __ movl(dest->as_register_hi(), src_addr_HI);
994    __ movl(dest->as_register_lo(), src_addr_LO);
995
996  } else if (dest->is_single_xmm()) {
997    Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
998    __ movflt(dest->as_xmm_float_reg(), src_addr);
999
1000  } else if (dest->is_double_xmm()) {
1001    Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1002    __ movdbl(dest->as_xmm_double_reg(), src_addr);
1003
1004  } else if (dest->is_single_fpu()) {
1005    assert(dest->fpu_regnr() == 0, "dest must be TOS");
1006    Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1007    __ fld_s(src_addr);
1008
1009  } else if (dest->is_double_fpu()) {
1010    assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1011    Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1012    __ fld_d(src_addr);
1013
1014  } else {
1015    ShouldNotReachHere();
1016  }
1017}
1018
1019
1020void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1021  if (src->is_single_stack()) {
1022    __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1023    __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1024
1025  } else if (src->is_double_stack()) {
1026    __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1027    // push and pop the part at src + 4, adding 4 for the previous push
1028    __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 4 + 4));
1029    __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 4 + 4));
1030    __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1031
1032  } else {
1033    ShouldNotReachHere();
1034  }
1035}
1036
1037
1038void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) {
1039  assert(src->is_address(), "should not call otherwise");
1040  assert(dest->is_register(), "should not call otherwise");
1041
1042  LIR_Address* addr = src->as_address_ptr();
1043  Address from_addr = as_Address(addr);
1044
1045  switch (type) {
1046    case T_BOOLEAN: // fall through
1047    case T_BYTE:    // fall through
1048    case T_CHAR:    // fall through
1049    case T_SHORT:
1050      if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1051        // on pre P6 processors we may get partial register stalls
1052        // so blow away the value of to_rinfo before loading a
1053        // partial word into it.  Do it here so that it precedes
1054        // the potential patch point below.
1055        __ xorl(dest->as_register(), dest->as_register());
1056      }
1057      break;
1058  }
1059
1060  PatchingStub* patch = NULL;
1061  if (patch_code != lir_patch_none) {
1062    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1063  }
1064  if (info != NULL) {
1065    add_debug_info_for_null_check_here(info);
1066  }
1067
1068  switch (type) {
1069    case T_FLOAT: {
1070      if (dest->is_single_xmm()) {
1071        __ movflt(dest->as_xmm_float_reg(), from_addr);
1072      } else {
1073        assert(dest->is_single_fpu(), "must be");
1074        assert(dest->fpu_regnr() == 0, "dest must be TOS");
1075        __ fld_s(from_addr);
1076      }
1077      break;
1078    }
1079
1080    case T_DOUBLE: {
1081      if (dest->is_double_xmm()) {
1082        __ movdbl(dest->as_xmm_double_reg(), from_addr);
1083      } else {
1084        assert(dest->is_double_fpu(), "must be");
1085        assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1086        __ fld_d(from_addr);
1087      }
1088      break;
1089    }
1090
1091    case T_ADDRESS: // fall through
1092    case T_OBJECT:  // fall through
1093    case T_ARRAY:   // fall through
1094    case T_INT:
1095      __ movl(dest->as_register(), from_addr);
1096      break;
1097
1098    case T_LONG: {
1099      Register to_lo = dest->as_register_lo();
1100      Register to_hi = dest->as_register_hi();
1101      Register base = addr->base()->as_register();
1102      Register index = noreg;
1103      if (addr->index()->is_register()) {
1104        index = addr->index()->as_register();
1105      }
1106      if ((base == to_lo && index == to_hi) ||
1107          (base == to_hi && index == to_lo)) {
1108        // addresses with 2 registers are only formed as a result of
1109        // array access so this code will never have to deal with
1110        // patches or null checks.
1111        assert(info == NULL && patch == NULL, "must be");
1112        __ leal(to_hi, as_Address(addr));
1113        __ movl(to_lo, Address(to_hi, 0));
1114        __ movl(to_hi, Address(to_hi, BytesPerWord));
1115      } else if (base == to_lo || index == to_lo) {
1116        assert(base != to_hi, "can't be");
1117        assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1118        __ movl(to_hi, as_Address_hi(addr));
1119        if (patch != NULL) {
1120          patching_epilog(patch, lir_patch_high, base, info);
1121          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1122          patch_code = lir_patch_low;
1123        }
1124        __ movl(to_lo, as_Address_lo(addr));
1125      } else {
1126        assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1127        __ movl(to_lo, as_Address_lo(addr));
1128        if (patch != NULL) {
1129          patching_epilog(patch, lir_patch_low, base, info);
1130          patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1131          patch_code = lir_patch_high;
1132        }
1133        __ movl(to_hi, as_Address_hi(addr));
1134      }
1135      break;
1136    }
1137
1138    case T_BOOLEAN: // fall through
1139    case T_BYTE: {
1140      Register dest_reg = dest->as_register();
1141      assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1142      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1143        __ movsxb(dest_reg, from_addr);
1144      } else {
1145        __ movb(dest_reg, from_addr);
1146        __ shll(dest_reg, 24);
1147        __ sarl(dest_reg, 24);
1148      }
1149      break;
1150    }
1151
1152    case T_CHAR: {
1153      Register dest_reg = dest->as_register();
1154      assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1155      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1156        __ movzxw(dest_reg, from_addr);
1157      } else {
1158        __ movw(dest_reg, from_addr);
1159      }
1160      break;
1161    }
1162
1163    case T_SHORT: {
1164      Register dest_reg = dest->as_register();
1165      if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1166        __ movsxw(dest_reg, from_addr);
1167      } else {
1168        __ movw(dest_reg, from_addr);
1169        __ shll(dest_reg, 16);
1170        __ sarl(dest_reg, 16);
1171      }
1172      break;
1173    }
1174
1175    default:
1176      ShouldNotReachHere();
1177  }
1178
1179  if (patch != NULL) {
1180    patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1181  }
1182
1183  if (type == T_ARRAY || type == T_OBJECT) {
1184    __ verify_oop(dest->as_register());
1185  }
1186}
1187
1188
1189void LIR_Assembler::prefetchr(LIR_Opr src) {
1190  LIR_Address* addr = src->as_address_ptr();
1191  Address from_addr = as_Address(addr);
1192
1193  if (VM_Version::supports_sse()) {
1194    switch (ReadPrefetchInstr) {
1195      case 0:
1196        __ prefetchnta(from_addr); break;
1197      case 1:
1198        __ prefetcht0(from_addr); break;
1199      case 2:
1200        __ prefetcht2(from_addr); break;
1201      default:
1202        ShouldNotReachHere(); break;
1203    }
1204  } else if (VM_Version::supports_3dnow()) {
1205    __ prefetchr(from_addr);
1206  }
1207}
1208
1209
1210void LIR_Assembler::prefetchw(LIR_Opr src) {
1211  LIR_Address* addr = src->as_address_ptr();
1212  Address from_addr = as_Address(addr);
1213
1214  if (VM_Version::supports_sse()) {
1215    switch (AllocatePrefetchInstr) {
1216      case 0:
1217        __ prefetchnta(from_addr); break;
1218      case 1:
1219        __ prefetcht0(from_addr); break;
1220      case 2:
1221        __ prefetcht2(from_addr); break;
1222      case 3:
1223        __ prefetchw(from_addr); break;
1224      default:
1225        ShouldNotReachHere(); break;
1226    }
1227  } else if (VM_Version::supports_3dnow()) {
1228    __ prefetchw(from_addr);
1229  }
1230}
1231
1232
1233NEEDS_CLEANUP; // This could be static?
1234Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1235  int elem_size = type2aelembytes(type);
1236  switch (elem_size) {
1237    case 1: return Address::times_1;
1238    case 2: return Address::times_2;
1239    case 4: return Address::times_4;
1240    case 8: return Address::times_8;
1241  }
1242  ShouldNotReachHere();
1243  return Address::no_scale;
1244}
1245
1246
1247void LIR_Assembler::emit_op3(LIR_Op3* op) {
1248  switch (op->code()) {
1249    case lir_idiv:
1250    case lir_irem:
1251      arithmetic_idiv(op->code(),
1252                      op->in_opr1(),
1253                      op->in_opr2(),
1254                      op->in_opr3(),
1255                      op->result_opr(),
1256                      op->info());
1257      break;
1258    default:      ShouldNotReachHere(); break;
1259  }
1260}
1261
1262void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1263#ifdef ASSERT
1264  assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1265  if (op->block() != NULL)  _branch_target_blocks.append(op->block());
1266  if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1267#endif
1268
1269  if (op->cond() == lir_cond_always) {
1270    if (op->info() != NULL) add_debug_info_for_branch(op->info());
1271    __ jmp (*(op->label()));
1272  } else {
1273    Assembler::Condition acond = Assembler::zero;
1274    if (op->code() == lir_cond_float_branch) {
1275      assert(op->ublock() != NULL, "must have unordered successor");
1276      __ jcc(Assembler::parity, *(op->ublock()->label()));
1277      switch(op->cond()) {
1278        case lir_cond_equal:        acond = Assembler::equal;      break;
1279        case lir_cond_notEqual:     acond = Assembler::notEqual;   break;
1280        case lir_cond_less:         acond = Assembler::below;      break;
1281        case lir_cond_lessEqual:    acond = Assembler::belowEqual; break;
1282        case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1283        case lir_cond_greater:      acond = Assembler::above;      break;
1284        default:                         ShouldNotReachHere();
1285      }
1286    } else {
1287      switch (op->cond()) {
1288        case lir_cond_equal:        acond = Assembler::equal;       break;
1289        case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
1290        case lir_cond_less:         acond = Assembler::less;        break;
1291        case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
1292        case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1293        case lir_cond_greater:      acond = Assembler::greater;     break;
1294        case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
1295        case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
1296        default:                         ShouldNotReachHere();
1297      }
1298    }
1299    __ jcc(acond,*(op->label()));
1300  }
1301}
1302
1303void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1304  LIR_Opr src  = op->in_opr();
1305  LIR_Opr dest = op->result_opr();
1306
1307  switch (op->bytecode()) {
1308    case Bytecodes::_i2l:
1309      move_regs(src->as_register(), dest->as_register_lo());
1310      move_regs(src->as_register(), dest->as_register_hi());
1311      __ sarl(dest->as_register_hi(), 31);
1312      break;
1313
1314    case Bytecodes::_l2i:
1315      move_regs(src->as_register_lo(), dest->as_register());
1316      break;
1317
1318    case Bytecodes::_i2b:
1319      move_regs(src->as_register(), dest->as_register());
1320      __ sign_extend_byte(dest->as_register());
1321      break;
1322
1323    case Bytecodes::_i2c:
1324      move_regs(src->as_register(), dest->as_register());
1325      __ andl(dest->as_register(), 0xFFFF);
1326      break;
1327
1328    case Bytecodes::_i2s:
1329      move_regs(src->as_register(), dest->as_register());
1330      __ sign_extend_short(dest->as_register());
1331      break;
1332
1333
1334    case Bytecodes::_f2d:
1335    case Bytecodes::_d2f:
1336      if (dest->is_single_xmm()) {
1337        __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1338      } else if (dest->is_double_xmm()) {
1339        __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1340      } else {
1341        assert(src->fpu() == dest->fpu(), "register must be equal");
1342        // do nothing (float result is rounded later through spilling)
1343      }
1344      break;
1345
1346    case Bytecodes::_i2f:
1347    case Bytecodes::_i2d:
1348      if (dest->is_single_xmm()) {
1349        __ cvtsi2ss(dest->as_xmm_float_reg(), src->as_register());
1350      } else if (dest->is_double_xmm()) {
1351        __ cvtsi2sd(dest->as_xmm_double_reg(), src->as_register());
1352      } else {
1353        assert(dest->fpu() == 0, "result must be on TOS");
1354        __ movl(Address(rsp, 0), src->as_register());
1355        __ fild_s(Address(rsp, 0));
1356      }
1357      break;
1358
1359    case Bytecodes::_f2i:
1360    case Bytecodes::_d2i:
1361      if (src->is_single_xmm()) {
1362        __ cvttss2si(dest->as_register(), src->as_xmm_float_reg());
1363      } else if (src->is_double_xmm()) {
1364        __ cvttsd2si(dest->as_register(), src->as_xmm_double_reg());
1365      } else {
1366        assert(src->fpu() == 0, "input must be on TOS");
1367        __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
1368        __ fist_s(Address(rsp, 0));
1369        __ movl(dest->as_register(), Address(rsp, 0));
1370        __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1371      }
1372
1373      // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1374      assert(op->stub() != NULL, "stub required");
1375      __ cmpl(dest->as_register(), 0x80000000);
1376      __ jcc(Assembler::equal, *op->stub()->entry());
1377      __ bind(*op->stub()->continuation());
1378      break;
1379
1380    case Bytecodes::_l2f:
1381    case Bytecodes::_l2d:
1382      assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1383      assert(dest->fpu() == 0, "result must be on TOS");
1384
1385      __ movl(Address(rsp, 0),            src->as_register_lo());
1386      __ movl(Address(rsp, BytesPerWord), src->as_register_hi());
1387      __ fild_d(Address(rsp, 0));
1388      // float result is rounded later through spilling
1389      break;
1390
1391    case Bytecodes::_f2l:
1392    case Bytecodes::_d2l:
1393      assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1394      assert(src->fpu() == 0, "input must be on TOS");
1395      assert(dest == FrameMap::rax_rdx_long_opr, "runtime stub places result in these registers");
1396
1397      // instruction sequence too long to inline it here
1398      {
1399        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1400      }
1401      break;
1402
1403    default: ShouldNotReachHere();
1404  }
1405}
1406
1407void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1408  if (op->init_check()) {
1409    __ cmpl(Address(op->klass()->as_register(),
1410                    instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)),
1411            instanceKlass::fully_initialized);
1412    add_debug_info_for_null_check_here(op->stub()->info());
1413    __ jcc(Assembler::notEqual, *op->stub()->entry());
1414  }
1415  __ allocate_object(op->obj()->as_register(),
1416                     op->tmp1()->as_register(),
1417                     op->tmp2()->as_register(),
1418                     op->header_size(),
1419                     op->object_size(),
1420                     op->klass()->as_register(),
1421                     *op->stub()->entry());
1422  __ bind(*op->stub()->continuation());
1423}
1424
1425void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1426  if (UseSlowPath ||
1427      (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1428      (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1429    __ jmp(*op->stub()->entry());
1430  } else {
1431    Register len =  op->len()->as_register();
1432    Register tmp1 = op->tmp1()->as_register();
1433    Register tmp2 = op->tmp2()->as_register();
1434    Register tmp3 = op->tmp3()->as_register();
1435    if (len == tmp1) {
1436      tmp1 = tmp3;
1437    } else if (len == tmp2) {
1438      tmp2 = tmp3;
1439    } else if (len == tmp3) {
1440      // everything is ok
1441    } else {
1442      __ movl(tmp3, len);
1443    }
1444    __ allocate_array(op->obj()->as_register(),
1445                      len,
1446                      tmp1,
1447                      tmp2,
1448                      arrayOopDesc::header_size(op->type()),
1449                      array_element_size(op->type()),
1450                      op->klass()->as_register(),
1451                      *op->stub()->entry());
1452  }
1453  __ bind(*op->stub()->continuation());
1454}
1455
1456
1457
1458void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1459  LIR_Code code = op->code();
1460  if (code == lir_store_check) {
1461    Register value = op->object()->as_register();
1462    Register array = op->array()->as_register();
1463    Register k_RInfo = op->tmp1()->as_register();
1464    Register klass_RInfo = op->tmp2()->as_register();
1465    Register Rtmp1 = op->tmp3()->as_register();
1466
1467    CodeStub* stub = op->stub();
1468    Label done;
1469    __ cmpl(value, 0);
1470    __ jcc(Assembler::equal, done);
1471    add_debug_info_for_null_check_here(op->info_for_exception());
1472    __ movl(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
1473    __ movl(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
1474
1475    // get instance klass
1476    __ movl(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
1477    // get super_check_offset
1478    __ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
1479    // See if we get an immediate positive hit
1480    __ cmpl(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
1481    __ jcc(Assembler::equal, done);
1482    // check for immediate negative hit
1483    __ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
1484    __ jcc(Assembler::notEqual, *stub->entry());
1485    // check for self
1486    __ cmpl(klass_RInfo, k_RInfo);
1487    __ jcc(Assembler::equal, done);
1488
1489    __ pushl(klass_RInfo);
1490    __ pushl(k_RInfo);
1491    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1492    __ popl(klass_RInfo);
1493    __ popl(k_RInfo);
1494    __ cmpl(k_RInfo, 0);
1495    __ jcc(Assembler::equal, *stub->entry());
1496    __ bind(done);
1497  } else if (op->code() == lir_checkcast) {
1498    // we always need a stub for the failure case.
1499    CodeStub* stub = op->stub();
1500    Register obj = op->object()->as_register();
1501    Register k_RInfo = op->tmp1()->as_register();
1502    Register klass_RInfo = op->tmp2()->as_register();
1503    Register dst = op->result_opr()->as_register();
1504    ciKlass* k = op->klass();
1505    Register Rtmp1 = noreg;
1506
1507    Label done;
1508    if (obj == k_RInfo) {
1509      k_RInfo = dst;
1510    } else if (obj == klass_RInfo) {
1511      klass_RInfo = dst;
1512    }
1513    if (k->is_loaded()) {
1514      select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1515    } else {
1516      Rtmp1 = op->tmp3()->as_register();
1517      select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1518    }
1519
1520    assert_different_registers(obj, k_RInfo, klass_RInfo);
1521    if (!k->is_loaded()) {
1522      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
1523    } else {
1524      k_RInfo = noreg;
1525    }
1526    assert(obj != k_RInfo, "must be different");
1527    __ cmpl(obj, 0);
1528    if (op->profiled_method() != NULL) {
1529      ciMethod* method = op->profiled_method();
1530      int bci          = op->profiled_bci();
1531
1532      Label profile_done;
1533      __ jcc(Assembler::notEqual, profile_done);
1534      // Object is null; update methodDataOop
1535      ciMethodData* md = method->method_data();
1536      if (md == NULL) {
1537        bailout("out of memory building methodDataOop");
1538        return;
1539      }
1540      ciProfileData* data = md->bci_to_data(bci);
1541      assert(data != NULL,       "need data for checkcast");
1542      assert(data->is_BitData(), "need BitData for checkcast");
1543      Register mdo  = klass_RInfo;
1544      __ movoop(mdo, md->encoding());
1545      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1546      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1547      __ orl(data_addr, header_bits);
1548      __ jmp(done);
1549      __ bind(profile_done);
1550    } else {
1551      __ jcc(Assembler::equal, done);
1552    }
1553    __ verify_oop(obj);
1554
1555    if (op->fast_check()) {
1556      // get object classo
1557      // not a safepoint as obj null check happens earlier
1558      if (k->is_loaded()) {
1559        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
1560      } else {
1561        __ cmpl(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1562
1563      }
1564      __ jcc(Assembler::notEqual, *stub->entry());
1565      __ bind(done);
1566    } else {
1567      // get object class
1568      // not a safepoint as obj null check happens earlier
1569      __ movl(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1570      if (k->is_loaded()) {
1571        // See if we get an immediate positive hit
1572        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
1573        if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
1574          __ jcc(Assembler::notEqual, *stub->entry());
1575        } else {
1576          // See if we get an immediate positive hit
1577          __ jcc(Assembler::equal, done);
1578          // check for self
1579          __ cmpoop(klass_RInfo, k->encoding());
1580          __ jcc(Assembler::equal, done);
1581
1582          __ pushl(klass_RInfo);
1583          __ pushoop(k->encoding());
1584          __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1585          __ popl(klass_RInfo);
1586          __ popl(klass_RInfo);
1587          __ cmpl(klass_RInfo, 0);
1588          __ jcc(Assembler::equal, *stub->entry());
1589        }
1590        __ bind(done);
1591      } else {
1592        __ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
1593        // See if we get an immediate positive hit
1594        __ cmpl(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
1595        __ jcc(Assembler::equal, done);
1596        // check for immediate negative hit
1597        __ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
1598        __ jcc(Assembler::notEqual, *stub->entry());
1599        // check for self
1600        __ cmpl(klass_RInfo, k_RInfo);
1601        __ jcc(Assembler::equal, done);
1602
1603        __ pushl(klass_RInfo);
1604        __ pushl(k_RInfo);
1605        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1606        __ popl(klass_RInfo);
1607        __ popl(k_RInfo);
1608        __ cmpl(k_RInfo, 0);
1609        __ jcc(Assembler::equal, *stub->entry());
1610        __ bind(done);
1611      }
1612
1613    }
1614    if (dst != obj) {
1615      __ movl(dst, obj);
1616    }
1617  } else if (code == lir_instanceof) {
1618    Register obj = op->object()->as_register();
1619    Register k_RInfo = op->tmp1()->as_register();
1620    Register klass_RInfo = op->tmp2()->as_register();
1621    Register dst = op->result_opr()->as_register();
1622    ciKlass* k = op->klass();
1623
1624    Label done;
1625    Label zero;
1626    Label one;
1627    if (obj == k_RInfo) {
1628      k_RInfo = klass_RInfo;
1629      klass_RInfo = obj;
1630    }
1631    // patching may screw with our temporaries on sparc,
1632    // so let's do it before loading the class
1633    if (!k->is_loaded()) {
1634      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
1635    }
1636    assert(obj != k_RInfo, "must be different");
1637
1638    __ verify_oop(obj);
1639    if (op->fast_check()) {
1640      __ cmpl(obj, 0);
1641      __ jcc(Assembler::equal, zero);
1642      // get object class
1643      // not a safepoint as obj null check happens earlier
1644      if (k->is_loaded()) {
1645        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
1646        k_RInfo = noreg;
1647      } else {
1648        __ cmpl(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1649
1650      }
1651      __ jcc(Assembler::equal, one);
1652    } else {
1653      // get object class
1654      // not a safepoint as obj null check happens earlier
1655      __ cmpl(obj, 0);
1656      __ jcc(Assembler::equal, zero);
1657      __ movl(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1658      if (k->is_loaded()) {
1659        // See if we get an immediate positive hit
1660        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
1661        __ jcc(Assembler::equal, one);
1662        if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
1663          // check for self
1664          __ cmpoop(klass_RInfo, k->encoding());
1665          __ jcc(Assembler::equal, one);
1666          __ pushl(klass_RInfo);
1667          __ pushoop(k->encoding());
1668          __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1669          __ popl(klass_RInfo);
1670          __ popl(dst);
1671          __ jmp(done);
1672        }
1673      } else {
1674        assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
1675
1676        __ movl(dst, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
1677        // See if we get an immediate positive hit
1678        __ cmpl(k_RInfo, Address(klass_RInfo, dst, Address::times_1));
1679        __ jcc(Assembler::equal, one);
1680        // check for immediate negative hit
1681        __ cmpl(dst, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
1682        __ jcc(Assembler::notEqual, zero);
1683        // check for self
1684        __ cmpl(klass_RInfo, k_RInfo);
1685        __ jcc(Assembler::equal, one);
1686
1687        __ pushl(klass_RInfo);
1688        __ pushl(k_RInfo);
1689        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1690        __ popl(klass_RInfo);
1691        __ popl(dst);
1692        __ jmp(done);
1693      }
1694    }
1695    __ bind(zero);
1696    __ xorl(dst, dst);
1697    __ jmp(done);
1698    __ bind(one);
1699    __ movl(dst, 1);
1700    __ bind(done);
1701  } else {
1702    ShouldNotReachHere();
1703  }
1704
1705}
1706
1707
1708void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1709  if (op->code() == lir_cas_long) {
1710    assert(VM_Version::supports_cx8(), "wrong machine");
1711    assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1712    assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1713    assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1714    assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1715    Register addr = op->addr()->as_register();
1716    if (os::is_MP()) {
1717      __ lock();
1718    }
1719    __ cmpxchg8(Address(addr, 0));
1720
1721  } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1722    Register addr = op->addr()->as_register();
1723    Register newval = op->new_value()->as_register();
1724    Register cmpval = op->cmp_value()->as_register();
1725    assert(cmpval == rax, "wrong register");
1726    assert(newval != NULL, "new val must be register");
1727    assert(cmpval != newval, "cmp and new values must be in different registers");
1728    assert(cmpval != addr, "cmp and addr must be in different registers");
1729    assert(newval != addr, "new value and addr must be in different registers");
1730    if (os::is_MP()) {
1731      __ lock();
1732    }
1733    __ cmpxchg(newval, Address(addr, 0));
1734  } else {
1735    Unimplemented();
1736  }
1737}
1738
1739
1740void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
1741  Assembler::Condition acond, ncond;
1742  switch (condition) {
1743    case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1744    case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1745    case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1746    case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1747    case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1748    case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1749    case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1750    case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1751    default:                    ShouldNotReachHere();
1752  }
1753
1754  if (opr1->is_cpu_register()) {
1755    reg2reg(opr1, result);
1756  } else if (opr1->is_stack()) {
1757    stack2reg(opr1, result, result->type());
1758  } else if (opr1->is_constant()) {
1759    const2reg(opr1, result, lir_patch_none, NULL);
1760  } else {
1761    ShouldNotReachHere();
1762  }
1763
1764  if (VM_Version::supports_cmov() && !opr2->is_constant()) {
1765    // optimized version that does not require a branch
1766    if (opr2->is_single_cpu()) {
1767      assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1768      __ cmovl(ncond, result->as_register(), opr2->as_register());
1769    } else if (opr2->is_double_cpu()) {
1770      assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1771      assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1772      __ cmovl(ncond, result->as_register_lo(), opr2->as_register_lo());
1773      __ cmovl(ncond, result->as_register_hi(), opr2->as_register_hi());
1774    } else if (opr2->is_single_stack()) {
1775      __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
1776    } else if (opr2->is_double_stack()) {
1777      __ cmovl(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
1778      __ cmovl(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));
1779    } else {
1780      ShouldNotReachHere();
1781    }
1782
1783  } else {
1784    Label skip;
1785    __ jcc (acond, skip);
1786    if (opr2->is_cpu_register()) {
1787      reg2reg(opr2, result);
1788    } else if (opr2->is_stack()) {
1789      stack2reg(opr2, result, result->type());
1790    } else if (opr2->is_constant()) {
1791      const2reg(opr2, result, lir_patch_none, NULL);
1792    } else {
1793      ShouldNotReachHere();
1794    }
1795    __ bind(skip);
1796  }
1797}
1798
1799
1800void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1801  assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1802
1803  if (left->is_single_cpu()) {
1804    assert(left == dest, "left and dest must be equal");
1805    Register lreg = left->as_register();
1806
1807    if (right->is_single_cpu()) {
1808      // cpu register - cpu register
1809      Register rreg = right->as_register();
1810      switch (code) {
1811        case lir_add: __ addl (lreg, rreg); break;
1812        case lir_sub: __ subl (lreg, rreg); break;
1813        case lir_mul: __ imull(lreg, rreg); break;
1814        default:      ShouldNotReachHere();
1815      }
1816
1817    } else if (right->is_stack()) {
1818      // cpu register - stack
1819      Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1820      switch (code) {
1821        case lir_add: __ addl(lreg, raddr); break;
1822        case lir_sub: __ subl(lreg, raddr); break;
1823        default:      ShouldNotReachHere();
1824      }
1825
1826    } else if (right->is_constant()) {
1827      // cpu register - constant
1828      jint c = right->as_constant_ptr()->as_jint();
1829      switch (code) {
1830        case lir_add: {
1831          __ increment(lreg, c);
1832          break;
1833        }
1834        case lir_sub: {
1835          __ decrement(lreg, c);
1836          break;
1837        }
1838        default: ShouldNotReachHere();
1839      }
1840
1841    } else {
1842      ShouldNotReachHere();
1843    }
1844
1845  } else if (left->is_double_cpu()) {
1846    assert(left == dest, "left and dest must be equal");
1847    Register lreg_lo = left->as_register_lo();
1848    Register lreg_hi = left->as_register_hi();
1849
1850    if (right->is_double_cpu()) {
1851      // cpu register - cpu register
1852      Register rreg_lo = right->as_register_lo();
1853      Register rreg_hi = right->as_register_hi();
1854      assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi);
1855      switch (code) {
1856        case lir_add:
1857          __ addl(lreg_lo, rreg_lo);
1858          __ adcl(lreg_hi, rreg_hi);
1859          break;
1860        case lir_sub:
1861          __ subl(lreg_lo, rreg_lo);
1862          __ sbbl(lreg_hi, rreg_hi);
1863          break;
1864        case lir_mul:
1865          assert(lreg_lo == rax && lreg_hi == rdx, "must be");
1866          __ imull(lreg_hi, rreg_lo);
1867          __ imull(rreg_hi, lreg_lo);
1868          __ addl (rreg_hi, lreg_hi);
1869          __ mull (rreg_lo);
1870          __ addl (lreg_hi, rreg_hi);
1871          break;
1872        default:
1873          ShouldNotReachHere();
1874      }
1875
1876    } else if (right->is_constant()) {
1877      // cpu register - constant
1878      jint c_lo = right->as_constant_ptr()->as_jint_lo();
1879      jint c_hi = right->as_constant_ptr()->as_jint_hi();
1880      switch (code) {
1881        case lir_add:
1882          __ addl(lreg_lo, c_lo);
1883          __ adcl(lreg_hi, c_hi);
1884          break;
1885        case lir_sub:
1886          __ subl(lreg_lo, c_lo);
1887          __ sbbl(lreg_hi, c_hi);
1888          break;
1889        default:
1890          ShouldNotReachHere();
1891      }
1892
1893    } else {
1894      ShouldNotReachHere();
1895    }
1896
1897  } else if (left->is_single_xmm()) {
1898    assert(left == dest, "left and dest must be equal");
1899    XMMRegister lreg = left->as_xmm_float_reg();
1900
1901    if (right->is_single_xmm()) {
1902      XMMRegister rreg = right->as_xmm_float_reg();
1903      switch (code) {
1904        case lir_add: __ addss(lreg, rreg);  break;
1905        case lir_sub: __ subss(lreg, rreg);  break;
1906        case lir_mul_strictfp: // fall through
1907        case lir_mul: __ mulss(lreg, rreg);  break;
1908        case lir_div_strictfp: // fall through
1909        case lir_div: __ divss(lreg, rreg);  break;
1910        default: ShouldNotReachHere();
1911      }
1912    } else {
1913      Address raddr;
1914      if (right->is_single_stack()) {
1915        raddr = frame_map()->address_for_slot(right->single_stack_ix());
1916      } else if (right->is_constant()) {
1917        // hack for now
1918        raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
1919      } else {
1920        ShouldNotReachHere();
1921      }
1922      switch (code) {
1923        case lir_add: __ addss(lreg, raddr);  break;
1924        case lir_sub: __ subss(lreg, raddr);  break;
1925        case lir_mul_strictfp: // fall through
1926        case lir_mul: __ mulss(lreg, raddr);  break;
1927        case lir_div_strictfp: // fall through
1928        case lir_div: __ divss(lreg, raddr);  break;
1929        default: ShouldNotReachHere();
1930      }
1931    }
1932
1933  } else if (left->is_double_xmm()) {
1934    assert(left == dest, "left and dest must be equal");
1935
1936    XMMRegister lreg = left->as_xmm_double_reg();
1937    if (right->is_double_xmm()) {
1938      XMMRegister rreg = right->as_xmm_double_reg();
1939      switch (code) {
1940        case lir_add: __ addsd(lreg, rreg);  break;
1941        case lir_sub: __ subsd(lreg, rreg);  break;
1942        case lir_mul_strictfp: // fall through
1943        case lir_mul: __ mulsd(lreg, rreg);  break;
1944        case lir_div_strictfp: // fall through
1945        case lir_div: __ divsd(lreg, rreg);  break;
1946        default: ShouldNotReachHere();
1947      }
1948    } else {
1949      Address raddr;
1950      if (right->is_double_stack()) {
1951        raddr = frame_map()->address_for_slot(right->double_stack_ix());
1952      } else if (right->is_constant()) {
1953        // hack for now
1954        raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
1955      } else {
1956        ShouldNotReachHere();
1957      }
1958      switch (code) {
1959        case lir_add: __ addsd(lreg, raddr);  break;
1960        case lir_sub: __ subsd(lreg, raddr);  break;
1961        case lir_mul_strictfp: // fall through
1962        case lir_mul: __ mulsd(lreg, raddr);  break;
1963        case lir_div_strictfp: // fall through
1964        case lir_div: __ divsd(lreg, raddr);  break;
1965        default: ShouldNotReachHere();
1966      }
1967    }
1968
1969  } else if (left->is_single_fpu()) {
1970    assert(dest->is_single_fpu(),  "fpu stack allocation required");
1971
1972    if (right->is_single_fpu()) {
1973      arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
1974
1975    } else {
1976      assert(left->fpu_regnr() == 0, "left must be on TOS");
1977      assert(dest->fpu_regnr() == 0, "dest must be on TOS");
1978
1979      Address raddr;
1980      if (right->is_single_stack()) {
1981        raddr = frame_map()->address_for_slot(right->single_stack_ix());
1982      } else if (right->is_constant()) {
1983        address const_addr = float_constant(right->as_jfloat());
1984        assert(const_addr != NULL, "incorrect float/double constant maintainance");
1985        // hack for now
1986        raddr = __ as_Address(InternalAddress(const_addr));
1987      } else {
1988        ShouldNotReachHere();
1989      }
1990
1991      switch (code) {
1992        case lir_add: __ fadd_s(raddr); break;
1993        case lir_sub: __ fsub_s(raddr); break;
1994        case lir_mul_strictfp: // fall through
1995        case lir_mul: __ fmul_s(raddr); break;
1996        case lir_div_strictfp: // fall through
1997        case lir_div: __ fdiv_s(raddr); break;
1998        default:      ShouldNotReachHere();
1999      }
2000    }
2001
2002  } else if (left->is_double_fpu()) {
2003    assert(dest->is_double_fpu(),  "fpu stack allocation required");
2004
2005    if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2006      // Double values require special handling for strictfp mul/div on x86
2007      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
2008      __ fmulp(left->fpu_regnrLo() + 1);
2009    }
2010
2011    if (right->is_double_fpu()) {
2012      arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2013
2014    } else {
2015      assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2016      assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2017
2018      Address raddr;
2019      if (right->is_double_stack()) {
2020        raddr = frame_map()->address_for_slot(right->double_stack_ix());
2021      } else if (right->is_constant()) {
2022        // hack for now
2023        raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2024      } else {
2025        ShouldNotReachHere();
2026      }
2027
2028      switch (code) {
2029        case lir_add: __ fadd_d(raddr); break;
2030        case lir_sub: __ fsub_d(raddr); break;
2031        case lir_mul_strictfp: // fall through
2032        case lir_mul: __ fmul_d(raddr); break;
2033        case lir_div_strictfp: // fall through
2034        case lir_div: __ fdiv_d(raddr); break;
2035        default: ShouldNotReachHere();
2036      }
2037    }
2038
2039    if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2040      // Double values require special handling for strictfp mul/div on x86
2041      __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
2042      __ fmulp(dest->fpu_regnrLo() + 1);
2043    }
2044
2045  } else if (left->is_single_stack() || left->is_address()) {
2046    assert(left == dest, "left and dest must be equal");
2047
2048    Address laddr;
2049    if (left->is_single_stack()) {
2050      laddr = frame_map()->address_for_slot(left->single_stack_ix());
2051    } else if (left->is_address()) {
2052      laddr = as_Address(left->as_address_ptr());
2053    } else {
2054      ShouldNotReachHere();
2055    }
2056
2057    if (right->is_single_cpu()) {
2058      Register rreg = right->as_register();
2059      switch (code) {
2060        case lir_add: __ addl(laddr, rreg); break;
2061        case lir_sub: __ subl(laddr, rreg); break;
2062        default:      ShouldNotReachHere();
2063      }
2064    } else if (right->is_constant()) {
2065      jint c = right->as_constant_ptr()->as_jint();
2066      switch (code) {
2067        case lir_add: {
2068          __ increment(laddr, c);
2069          break;
2070        }
2071        case lir_sub: {
2072          __ decrement(laddr, c);
2073          break;
2074        }
2075        default: ShouldNotReachHere();
2076      }
2077    } else {
2078      ShouldNotReachHere();
2079    }
2080
2081  } else {
2082    ShouldNotReachHere();
2083  }
2084}
2085
2086void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2087  assert(pop_fpu_stack  || (left_index     == dest_index || right_index     == dest_index), "invalid LIR");
2088  assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2089  assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2090
2091  bool left_is_tos = (left_index == 0);
2092  bool dest_is_tos = (dest_index == 0);
2093  int non_tos_index = (left_is_tos ? right_index : left_index);
2094
2095  switch (code) {
2096    case lir_add:
2097      if (pop_fpu_stack)       __ faddp(non_tos_index);
2098      else if (dest_is_tos)    __ fadd (non_tos_index);
2099      else                     __ fadda(non_tos_index);
2100      break;
2101
2102    case lir_sub:
2103      if (left_is_tos) {
2104        if (pop_fpu_stack)     __ fsubrp(non_tos_index);
2105        else if (dest_is_tos)  __ fsub  (non_tos_index);
2106        else                   __ fsubra(non_tos_index);
2107      } else {
2108        if (pop_fpu_stack)     __ fsubp (non_tos_index);
2109        else if (dest_is_tos)  __ fsubr (non_tos_index);
2110        else                   __ fsuba (non_tos_index);
2111      }
2112      break;
2113
2114    case lir_mul_strictfp: // fall through
2115    case lir_mul:
2116      if (pop_fpu_stack)       __ fmulp(non_tos_index);
2117      else if (dest_is_tos)    __ fmul (non_tos_index);
2118      else                     __ fmula(non_tos_index);
2119      break;
2120
2121    case lir_div_strictfp: // fall through
2122    case lir_div:
2123      if (left_is_tos) {
2124        if (pop_fpu_stack)     __ fdivrp(non_tos_index);
2125        else if (dest_is_tos)  __ fdiv  (non_tos_index);
2126        else                   __ fdivra(non_tos_index);
2127      } else {
2128        if (pop_fpu_stack)     __ fdivp (non_tos_index);
2129        else if (dest_is_tos)  __ fdivr (non_tos_index);
2130        else                   __ fdiva (non_tos_index);
2131      }
2132      break;
2133
2134    case lir_rem:
2135      assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2136      __ fremr(noreg);
2137      break;
2138
2139    default:
2140      ShouldNotReachHere();
2141  }
2142}
2143
2144
2145void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2146  if (value->is_double_xmm()) {
2147    switch(code) {
2148      case lir_abs :
2149        {
2150          if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2151            __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2152          }
2153          __ andpd(dest->as_xmm_double_reg(),
2154                    ExternalAddress((address)double_signmask_pool));
2155        }
2156        break;
2157
2158      case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2159      // all other intrinsics are not available in the SSE instruction set, so FPU is used
2160      default      : ShouldNotReachHere();
2161    }
2162
2163  } else if (value->is_double_fpu()) {
2164    assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2165    switch(code) {
2166      case lir_log   : __ flog() ; break;
2167      case lir_log10 : __ flog10() ; break;
2168      case lir_abs   : __ fabs() ; break;
2169      case lir_sqrt  : __ fsqrt(); break;
2170      case lir_sin   :
2171        // Should consider not saving rbx, if not necessary
2172        __ trigfunc('s', op->as_Op2()->fpu_stack_size());
2173        break;
2174      case lir_cos :
2175        // Should consider not saving rbx, if not necessary
2176        assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
2177        __ trigfunc('c', op->as_Op2()->fpu_stack_size());
2178        break;
2179      case lir_tan :
2180        // Should consider not saving rbx, if not necessary
2181        __ trigfunc('t', op->as_Op2()->fpu_stack_size());
2182        break;
2183      default      : ShouldNotReachHere();
2184    }
2185  } else {
2186    Unimplemented();
2187  }
2188}
2189
2190void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2191  // assert(left->destroys_register(), "check");
2192  if (left->is_single_cpu()) {
2193    Register reg = left->as_register();
2194    if (right->is_constant()) {
2195      int val = right->as_constant_ptr()->as_jint();
2196      switch (code) {
2197        case lir_logic_and: __ andl (reg, val); break;
2198        case lir_logic_or:  __ orl  (reg, val); break;
2199        case lir_logic_xor: __ xorl (reg, val); break;
2200        default: ShouldNotReachHere();
2201      }
2202    } else if (right->is_stack()) {
2203      // added support for stack operands
2204      Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2205      switch (code) {
2206        case lir_logic_and: __ andl (reg, raddr); break;
2207        case lir_logic_or:  __ orl  (reg, raddr); break;
2208        case lir_logic_xor: __ xorl (reg, raddr); break;
2209        default: ShouldNotReachHere();
2210      }
2211    } else {
2212      Register rright = right->as_register();
2213      switch (code) {
2214        case lir_logic_and: __ andl (reg, rright); break;
2215        case lir_logic_or : __ orl  (reg, rright); break;
2216        case lir_logic_xor: __ xorl (reg, rright); break;
2217        default: ShouldNotReachHere();
2218      }
2219    }
2220    move_regs(reg, dst->as_register());
2221  } else {
2222    Register l_lo = left->as_register_lo();
2223    Register l_hi = left->as_register_hi();
2224    if (right->is_constant()) {
2225      int r_lo = right->as_constant_ptr()->as_jint_lo();
2226      int r_hi = right->as_constant_ptr()->as_jint_hi();
2227      switch (code) {
2228        case lir_logic_and:
2229          __ andl(l_lo, r_lo);
2230          __ andl(l_hi, r_hi);
2231          break;
2232        case lir_logic_or:
2233          __ orl(l_lo, r_lo);
2234          __ orl(l_hi, r_hi);
2235          break;
2236        case lir_logic_xor:
2237          __ xorl(l_lo, r_lo);
2238          __ xorl(l_hi, r_hi);
2239          break;
2240        default: ShouldNotReachHere();
2241      }
2242    } else {
2243      Register r_lo = right->as_register_lo();
2244      Register r_hi = right->as_register_hi();
2245      assert(l_lo != r_hi, "overwriting registers");
2246      switch (code) {
2247        case lir_logic_and:
2248          __ andl(l_lo, r_lo);
2249          __ andl(l_hi, r_hi);
2250          break;
2251        case lir_logic_or:
2252          __ orl(l_lo, r_lo);
2253          __ orl(l_hi, r_hi);
2254          break;
2255        case lir_logic_xor:
2256          __ xorl(l_lo, r_lo);
2257          __ xorl(l_hi, r_hi);
2258          break;
2259        default: ShouldNotReachHere();
2260      }
2261    }
2262
2263    Register dst_lo = dst->as_register_lo();
2264    Register dst_hi = dst->as_register_hi();
2265
2266    if (dst_lo == l_hi) {
2267      assert(dst_hi != l_lo, "overwriting registers");
2268      move_regs(l_hi, dst_hi);
2269      move_regs(l_lo, dst_lo);
2270    } else {
2271      assert(dst_lo != l_hi, "overwriting registers");
2272      move_regs(l_lo, dst_lo);
2273      move_regs(l_hi, dst_hi);
2274    }
2275  }
2276}
2277
2278
2279// we assume that rax, and rdx can be overwritten
2280void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2281
2282  assert(left->is_single_cpu(),   "left must be register");
2283  assert(right->is_single_cpu() || right->is_constant(),  "right must be register or constant");
2284  assert(result->is_single_cpu(), "result must be register");
2285
2286  //  assert(left->destroys_register(), "check");
2287  //  assert(right->destroys_register(), "check");
2288
2289  Register lreg = left->as_register();
2290  Register dreg = result->as_register();
2291
2292  if (right->is_constant()) {
2293    int divisor = right->as_constant_ptr()->as_jint();
2294    assert(divisor > 0 && is_power_of_2(divisor), "must be");
2295    if (code == lir_idiv) {
2296      assert(lreg == rax, "must be rax,");
2297      assert(temp->as_register() == rdx, "tmp register must be rdx");
2298      __ cdql(); // sign extend into rdx:rax
2299      if (divisor == 2) {
2300        __ subl(lreg, rdx);
2301      } else {
2302        __ andl(rdx, divisor - 1);
2303        __ addl(lreg, rdx);
2304      }
2305      __ sarl(lreg, log2_intptr(divisor));
2306      move_regs(lreg, dreg);
2307    } else if (code == lir_irem) {
2308      Label done;
2309      __ movl(dreg, lreg);
2310      __ andl(dreg, 0x80000000 | (divisor - 1));
2311      __ jcc(Assembler::positive, done);
2312      __ decrement(dreg);
2313      __ orl(dreg, ~(divisor - 1));
2314      __ increment(dreg);
2315      __ bind(done);
2316    } else {
2317      ShouldNotReachHere();
2318    }
2319  } else {
2320    Register rreg = right->as_register();
2321    assert(lreg == rax, "left register must be rax,");
2322    assert(rreg != rdx, "right register must not be rdx");
2323    assert(temp->as_register() == rdx, "tmp register must be rdx");
2324
2325    move_regs(lreg, rax);
2326
2327    int idivl_offset = __ corrected_idivl(rreg);
2328    add_debug_info_for_div0(idivl_offset, info);
2329    if (code == lir_irem) {
2330      move_regs(rdx, dreg); // result is in rdx
2331    } else {
2332      move_regs(rax, dreg);
2333    }
2334  }
2335}
2336
2337
2338void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2339  if (opr1->is_single_cpu()) {
2340    Register reg1 = opr1->as_register();
2341    if (opr2->is_single_cpu()) {
2342      // cpu register - cpu register
2343      __ cmpl(reg1, opr2->as_register());
2344    } else if (opr2->is_stack()) {
2345      // cpu register - stack
2346      __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2347    } else if (opr2->is_constant()) {
2348      // cpu register - constant
2349      LIR_Const* c = opr2->as_constant_ptr();
2350      if (c->type() == T_INT) {
2351        __ cmpl(reg1, c->as_jint());
2352      } else if (c->type() == T_OBJECT) {
2353        jobject o = c->as_jobject();
2354        if (o == NULL) {
2355          __ cmpl(reg1, NULL_WORD);
2356        } else {
2357          __ cmpoop(reg1, c->as_jobject());
2358        }
2359      } else {
2360        ShouldNotReachHere();
2361      }
2362      // cpu register - address
2363    } else if (opr2->is_address()) {
2364      if (op->info() != NULL) {
2365        add_debug_info_for_null_check_here(op->info());
2366      }
2367      __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2368    } else {
2369      ShouldNotReachHere();
2370    }
2371
2372  } else if(opr1->is_double_cpu()) {
2373    Register xlo = opr1->as_register_lo();
2374    Register xhi = opr1->as_register_hi();
2375    if (opr2->is_double_cpu()) {
2376      // cpu register - cpu register
2377      Register ylo = opr2->as_register_lo();
2378      Register yhi = opr2->as_register_hi();
2379      __ subl(xlo, ylo);
2380      __ sbbl(xhi, yhi);
2381      if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2382        __ orl(xhi, xlo);
2383      }
2384    } else if (opr2->is_constant()) {
2385      // cpu register - constant 0
2386      assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2387      assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2388      __ orl(xhi, xlo);
2389    } else {
2390      ShouldNotReachHere();
2391    }
2392
2393  } else if (opr1->is_single_xmm()) {
2394    XMMRegister reg1 = opr1->as_xmm_float_reg();
2395    if (opr2->is_single_xmm()) {
2396      // xmm register - xmm register
2397      __ ucomiss(reg1, opr2->as_xmm_float_reg());
2398    } else if (opr2->is_stack()) {
2399      // xmm register - stack
2400      __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2401    } else if (opr2->is_constant()) {
2402      // xmm register - constant
2403      __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2404    } else if (opr2->is_address()) {
2405      // xmm register - address
2406      if (op->info() != NULL) {
2407        add_debug_info_for_null_check_here(op->info());
2408      }
2409      __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2410    } else {
2411      ShouldNotReachHere();
2412    }
2413
2414  } else if (opr1->is_double_xmm()) {
2415    XMMRegister reg1 = opr1->as_xmm_double_reg();
2416    if (opr2->is_double_xmm()) {
2417      // xmm register - xmm register
2418      __ ucomisd(reg1, opr2->as_xmm_double_reg());
2419    } else if (opr2->is_stack()) {
2420      // xmm register - stack
2421      __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2422    } else if (opr2->is_constant()) {
2423      // xmm register - constant
2424      __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2425    } else if (opr2->is_address()) {
2426      // xmm register - address
2427      if (op->info() != NULL) {
2428        add_debug_info_for_null_check_here(op->info());
2429      }
2430      __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2431    } else {
2432      ShouldNotReachHere();
2433    }
2434
2435  } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2436    assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2437    assert(opr2->is_fpu_register(), "both must be registers");
2438    __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2439
2440  } else if (opr1->is_address() && opr2->is_constant()) {
2441    if (op->info() != NULL) {
2442      add_debug_info_for_null_check_here(op->info());
2443    }
2444    // special case: address - constant
2445    LIR_Address* addr = opr1->as_address_ptr();
2446    LIR_Const* c = opr2->as_constant_ptr();
2447    if (c->type() == T_INT) {
2448      __ cmpl(as_Address(addr), c->as_jint());
2449    } else if (c->type() == T_OBJECT) {
2450      __ cmpoop(as_Address(addr), c->as_jobject());
2451    } else {
2452      ShouldNotReachHere();
2453    }
2454
2455  } else {
2456    ShouldNotReachHere();
2457  }
2458}
2459
2460void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2461  if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2462    if (left->is_single_xmm()) {
2463      assert(right->is_single_xmm(), "must match");
2464      __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2465    } else if (left->is_double_xmm()) {
2466      assert(right->is_double_xmm(), "must match");
2467      __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2468
2469    } else {
2470      assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2471      assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2472
2473      assert(left->fpu() == 0, "left must be on TOS");
2474      __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2475                  op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2476    }
2477  } else {
2478    assert(code == lir_cmp_l2i, "check");
2479    __ lcmp2int(left->as_register_hi(),
2480                left->as_register_lo(),
2481                right->as_register_hi(),
2482                right->as_register_lo());
2483    move_regs(left->as_register_hi(), dst->as_register());
2484  }
2485}
2486
2487
2488void LIR_Assembler::align_call(LIR_Code code) {
2489  if (os::is_MP()) {
2490    // make sure that the displacement word of the call ends up word aligned
2491    int offset = __ offset();
2492    switch (code) {
2493      case lir_static_call:
2494      case lir_optvirtual_call:
2495        offset += NativeCall::displacement_offset;
2496        break;
2497      case lir_icvirtual_call:
2498        offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2499      break;
2500      case lir_virtual_call:  // currently, sparc-specific for niagara
2501      default: ShouldNotReachHere();
2502    }
2503    while (offset++ % BytesPerWord != 0) {
2504      __ nop();
2505    }
2506  }
2507}
2508
2509
2510void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
2511  assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2512         "must be aligned");
2513  __ call(AddressLiteral(entry, rtype));
2514  add_call_info(code_offset(), info);
2515}
2516
2517
2518void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
2519  RelocationHolder rh = virtual_call_Relocation::spec(pc());
2520  __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
2521  assert(!os::is_MP() ||
2522         (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2523         "must be aligned");
2524  __ call(AddressLiteral(entry, rh));
2525  add_call_info(code_offset(), info);
2526}
2527
2528
2529/* Currently, vtable-dispatch is only enabled for sparc platforms */
2530void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
2531  ShouldNotReachHere();
2532}
2533
2534void LIR_Assembler::emit_static_call_stub() {
2535  address call_pc = __ pc();
2536  address stub = __ start_a_stub(call_stub_size);
2537  if (stub == NULL) {
2538    bailout("static call stub overflow");
2539    return;
2540  }
2541
2542  int start = __ offset();
2543  if (os::is_MP()) {
2544    // make sure that the displacement word of the call ends up word aligned
2545    int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
2546    while (offset++ % BytesPerWord != 0) {
2547      __ nop();
2548    }
2549  }
2550  __ relocate(static_stub_Relocation::spec(call_pc));
2551  __ movoop(rbx, (jobject)NULL);
2552  // must be set to -1 at code generation time
2553  assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2554  __ jump(RuntimeAddress((address)-1));
2555
2556  assert(__ offset() - start <= call_stub_size, "stub too big")
2557  __ end_a_stub();
2558}
2559
2560
2561void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
2562  assert(exceptionOop->as_register() == rax, "must match");
2563  assert(unwind || exceptionPC->as_register() == rdx, "must match");
2564
2565  // exception object is not added to oop map by LinearScan
2566  // (LinearScan assumes that no oops are in fixed registers)
2567  info->add_register_oop(exceptionOop);
2568  Runtime1::StubID unwind_id;
2569
2570  if (!unwind) {
2571    // get current pc information
2572    // pc is only needed if the method has an exception handler, the unwind code does not need it.
2573    int pc_for_athrow_offset = __ offset();
2574    InternalAddress pc_for_athrow(__ pc());
2575    __ lea(exceptionPC->as_register(), pc_for_athrow);
2576    add_call_info(pc_for_athrow_offset, info); // for exception handler
2577
2578    __ verify_not_null_oop(rax);
2579    // search an exception handler (rax: exception oop, rdx: throwing pc)
2580    if (compilation()->has_fpu_code()) {
2581      unwind_id = Runtime1::handle_exception_id;
2582    } else {
2583      unwind_id = Runtime1::handle_exception_nofpu_id;
2584    }
2585  } else {
2586    unwind_id = Runtime1::unwind_exception_id;
2587  }
2588  __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2589
2590  // enough room for two byte trap
2591  __ nop();
2592}
2593
2594
2595void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2596
2597  // optimized version for linear scan:
2598  // * count must be already in ECX (guaranteed by LinearScan)
2599  // * left and dest must be equal
2600  // * tmp must be unused
2601  assert(count->as_register() == SHIFT_count, "count must be in ECX");
2602  assert(left == dest, "left and dest must be equal");
2603  assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2604
2605  if (left->is_single_cpu()) {
2606    Register value = left->as_register();
2607    assert(value != SHIFT_count, "left cannot be ECX");
2608
2609    switch (code) {
2610      case lir_shl:  __ shll(value); break;
2611      case lir_shr:  __ sarl(value); break;
2612      case lir_ushr: __ shrl(value); break;
2613      default: ShouldNotReachHere();
2614    }
2615  } else if (left->is_double_cpu()) {
2616    Register lo = left->as_register_lo();
2617    Register hi = left->as_register_hi();
2618    assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
2619
2620    switch (code) {
2621      case lir_shl:  __ lshl(hi, lo);        break;
2622      case lir_shr:  __ lshr(hi, lo, true);  break;
2623      case lir_ushr: __ lshr(hi, lo, false); break;
2624      default: ShouldNotReachHere();
2625    }
2626  } else {
2627    ShouldNotReachHere();
2628  }
2629}
2630
2631
2632void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2633  if (dest->is_single_cpu()) {
2634    // first move left into dest so that left is not destroyed by the shift
2635    Register value = dest->as_register();
2636    count = count & 0x1F; // Java spec
2637
2638    move_regs(left->as_register(), value);
2639    switch (code) {
2640      case lir_shl:  __ shll(value, count); break;
2641      case lir_shr:  __ sarl(value, count); break;
2642      case lir_ushr: __ shrl(value, count); break;
2643      default: ShouldNotReachHere();
2644    }
2645  } else if (dest->is_double_cpu()) {
2646    Unimplemented();
2647  } else {
2648    ShouldNotReachHere();
2649  }
2650}
2651
2652
2653void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2654  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2655  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2656  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2657  __ movl (Address(rsp, offset_from_rsp_in_bytes), r);
2658}
2659
2660
2661void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2662  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2663  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2664  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2665  __ movl (Address(rsp, offset_from_rsp_in_bytes), c);
2666}
2667
2668
2669void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2670  assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2671  int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2672  assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2673  __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
2674}
2675
2676
2677// This code replaces a call to arraycopy; no exception may
2678// be thrown in this code, they must be thrown in the System.arraycopy
2679// activation frame; we could save some checks if this would not be the case
2680void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2681  ciArrayKlass* default_type = op->expected_type();
2682  Register src = op->src()->as_register();
2683  Register dst = op->dst()->as_register();
2684  Register src_pos = op->src_pos()->as_register();
2685  Register dst_pos = op->dst_pos()->as_register();
2686  Register length  = op->length()->as_register();
2687  Register tmp = op->tmp()->as_register();
2688
2689  CodeStub* stub = op->stub();
2690  int flags = op->flags();
2691  BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2692  if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2693
2694  // if we don't know anything or it's an object array, just go through the generic arraycopy
2695  if (default_type == NULL) {
2696    Label done;
2697    // save outgoing arguments on stack in case call to System.arraycopy is needed
2698    // HACK ALERT. This code used to push the parameters in a hardwired fashion
2699    // for interpreter calling conventions. Now we have to do it in new style conventions.
2700    // For the moment until C1 gets the new register allocator I just force all the
2701    // args to the right place (except the register args) and then on the back side
2702    // reload the register args properly if we go slow path. Yuck
2703
2704    // These are proper for the calling convention
2705
2706    store_parameter(length, 2);
2707    store_parameter(dst_pos, 1);
2708    store_parameter(dst, 0);
2709
2710    // these are just temporary placements until we need to reload
2711    store_parameter(src_pos, 3);
2712    store_parameter(src, 4);
2713    assert(src == rcx && src_pos == rdx, "mismatch in calling convention");
2714
2715    // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
2716    __ pushl(length);
2717    __ pushl(dst_pos);
2718    __ pushl(dst);
2719    __ pushl(src_pos);
2720    __ pushl(src);
2721    address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
2722    __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack
2723
2724    __ cmpl(rax, 0);
2725    __ jcc(Assembler::equal, *stub->continuation());
2726
2727    // Reload values from the stack so they are where the stub
2728    // expects them.
2729    __ movl (dst,     Address(rsp, 0*BytesPerWord));
2730    __ movl (dst_pos, Address(rsp, 1*BytesPerWord));
2731    __ movl (length,  Address(rsp, 2*BytesPerWord));
2732    __ movl (src_pos, Address(rsp, 3*BytesPerWord));
2733    __ movl (src,     Address(rsp, 4*BytesPerWord));
2734    __ jmp(*stub->entry());
2735
2736    __ bind(*stub->continuation());
2737    return;
2738  }
2739
2740  assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2741
2742  int elem_size = type2aelembytes(basic_type);
2743  int shift_amount;
2744  Address::ScaleFactor scale;
2745
2746  switch (elem_size) {
2747    case 1 :
2748      shift_amount = 0;
2749      scale = Address::times_1;
2750      break;
2751    case 2 :
2752      shift_amount = 1;
2753      scale = Address::times_2;
2754      break;
2755    case 4 :
2756      shift_amount = 2;
2757      scale = Address::times_4;
2758      break;
2759    case 8 :
2760      shift_amount = 3;
2761      scale = Address::times_8;
2762      break;
2763    default:
2764      ShouldNotReachHere();
2765  }
2766
2767  Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2768  Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2769  Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2770  Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2771
2772  // test for NULL
2773  if (flags & LIR_OpArrayCopy::src_null_check) {
2774    __ testl(src, src);
2775    __ jcc(Assembler::zero, *stub->entry());
2776  }
2777  if (flags & LIR_OpArrayCopy::dst_null_check) {
2778    __ testl(dst, dst);
2779    __ jcc(Assembler::zero, *stub->entry());
2780  }
2781
2782  // check if negative
2783  if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2784    __ testl(src_pos, src_pos);
2785    __ jcc(Assembler::less, *stub->entry());
2786  }
2787  if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2788    __ testl(dst_pos, dst_pos);
2789    __ jcc(Assembler::less, *stub->entry());
2790  }
2791  if (flags & LIR_OpArrayCopy::length_positive_check) {
2792    __ testl(length, length);
2793    __ jcc(Assembler::less, *stub->entry());
2794  }
2795
2796  if (flags & LIR_OpArrayCopy::src_range_check) {
2797    __ leal(tmp, Address(src_pos, length, Address::times_1, 0));
2798    __ cmpl(tmp, src_length_addr);
2799    __ jcc(Assembler::above, *stub->entry());
2800  }
2801  if (flags & LIR_OpArrayCopy::dst_range_check) {
2802    __ leal(tmp, Address(dst_pos, length, Address::times_1, 0));
2803    __ cmpl(tmp, dst_length_addr);
2804    __ jcc(Assembler::above, *stub->entry());
2805  }
2806
2807  if (flags & LIR_OpArrayCopy::type_check) {
2808    __ movl(tmp, src_klass_addr);
2809    __ cmpl(tmp, dst_klass_addr);
2810    __ jcc(Assembler::notEqual, *stub->entry());
2811  }
2812
2813#ifdef ASSERT
2814  if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2815    // Sanity check the known type with the incoming class.  For the
2816    // primitive case the types must match exactly with src.klass and
2817    // dst.klass each exactly matching the default type.  For the
2818    // object array case, if no type check is needed then either the
2819    // dst type is exactly the expected type and the src type is a
2820    // subtype which we can't check or src is the same array as dst
2821    // but not necessarily exactly of type default_type.
2822    Label known_ok, halt;
2823    __ movoop(tmp, default_type->encoding());
2824    if (basic_type != T_OBJECT) {
2825      __ cmpl(tmp, dst_klass_addr);
2826      __ jcc(Assembler::notEqual, halt);
2827      __ cmpl(tmp, src_klass_addr);
2828      __ jcc(Assembler::equal, known_ok);
2829    } else {
2830      __ cmpl(tmp, dst_klass_addr);
2831      __ jcc(Assembler::equal, known_ok);
2832      __ cmpl(src, dst);
2833      __ jcc(Assembler::equal, known_ok);
2834    }
2835    __ bind(halt);
2836    __ stop("incorrect type information in arraycopy");
2837    __ bind(known_ok);
2838  }
2839#endif
2840
2841  __ leal(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2842  store_parameter(tmp, 0);
2843  __ leal(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
2844  store_parameter(tmp, 1);
2845  if (shift_amount > 0 && basic_type != T_OBJECT) {
2846    __ shll(length, shift_amount);
2847  }
2848  store_parameter(length, 2);
2849  if (basic_type == T_OBJECT) {
2850    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0);
2851  } else {
2852    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0);
2853  }
2854
2855  __ bind(*stub->continuation());
2856}
2857
2858
2859void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2860  Register obj = op->obj_opr()->as_register();  // may not be an oop
2861  Register hdr = op->hdr_opr()->as_register();
2862  Register lock = op->lock_opr()->as_register();
2863  if (!UseFastLocking) {
2864    __ jmp(*op->stub()->entry());
2865  } else if (op->code() == lir_lock) {
2866    Register scratch = noreg;
2867    if (UseBiasedLocking) {
2868      scratch = op->scratch_opr()->as_register();
2869    }
2870    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2871    // add debug info for NullPointerException only if one is possible
2872    int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2873    if (op->info() != NULL) {
2874      add_debug_info_for_null_check(null_check_offset, op->info());
2875    }
2876    // done
2877  } else if (op->code() == lir_unlock) {
2878    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2879    __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2880  } else {
2881    Unimplemented();
2882  }
2883  __ bind(*op->stub()->continuation());
2884}
2885
2886
2887void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2888  ciMethod* method = op->profiled_method();
2889  int bci          = op->profiled_bci();
2890
2891  // Update counter for all call types
2892  ciMethodData* md = method->method_data();
2893  if (md == NULL) {
2894    bailout("out of memory building methodDataOop");
2895    return;
2896  }
2897  ciProfileData* data = md->bci_to_data(bci);
2898  assert(data->is_CounterData(), "need CounterData for calls");
2899  assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2900  Register mdo  = op->mdo()->as_register();
2901  __ movoop(mdo, md->encoding());
2902  Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2903  __ addl(counter_addr, DataLayout::counter_increment);
2904  Bytecodes::Code bc = method->java_code_at_bci(bci);
2905  // Perform additional virtual call profiling for invokevirtual and
2906  // invokeinterface bytecodes
2907  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2908      Tier1ProfileVirtualCalls) {
2909    assert(op->recv()->is_single_cpu(), "recv must be allocated");
2910    Register recv = op->recv()->as_register();
2911    assert_different_registers(mdo, recv);
2912    assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2913    ciKlass* known_klass = op->known_holder();
2914    if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
2915      // We know the type that will be seen at this call site; we can
2916      // statically update the methodDataOop rather than needing to do
2917      // dynamic tests on the receiver type
2918
2919      // NOTE: we should probably put a lock around this search to
2920      // avoid collisions by concurrent compilations
2921      ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2922      uint i;
2923      for (i = 0; i < VirtualCallData::row_limit(); i++) {
2924        ciKlass* receiver = vc_data->receiver(i);
2925        if (known_klass->equals(receiver)) {
2926          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2927          __ addl(data_addr, DataLayout::counter_increment);
2928          return;
2929        }
2930      }
2931
2932      // Receiver type not found in profile data; select an empty slot
2933
2934      // Note that this is less efficient than it should be because it
2935      // always does a write to the receiver part of the
2936      // VirtualCallData rather than just the first time
2937      for (i = 0; i < VirtualCallData::row_limit(); i++) {
2938        ciKlass* receiver = vc_data->receiver(i);
2939        if (receiver == NULL) {
2940          Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2941          __ movoop(recv_addr, known_klass->encoding());
2942          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2943          __ addl(data_addr, DataLayout::counter_increment);
2944          return;
2945        }
2946      }
2947    } else {
2948      __ movl(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
2949      Label update_done;
2950      uint i;
2951      for (i = 0; i < VirtualCallData::row_limit(); i++) {
2952        Label next_test;
2953        // See if the receiver is receiver[n].
2954        __ cmpl(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
2955        __ jcc(Assembler::notEqual, next_test);
2956        Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2957        __ addl(data_addr, DataLayout::counter_increment);
2958        __ jmp(update_done);
2959        __ bind(next_test);
2960      }
2961
2962      // Didn't find receiver; find next empty slot and fill it in
2963      for (i = 0; i < VirtualCallData::row_limit(); i++) {
2964        Label next_test;
2965        Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2966        __ cmpl(recv_addr, NULL_WORD);
2967        __ jcc(Assembler::notEqual, next_test);
2968        __ movl(recv_addr, recv);
2969        __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
2970        if (i < (VirtualCallData::row_limit() - 1)) {
2971          __ jmp(update_done);
2972        }
2973        __ bind(next_test);
2974      }
2975
2976      __ bind(update_done);
2977    }
2978  }
2979}
2980
2981
2982void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2983  Unimplemented();
2984}
2985
2986
2987void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2988  __ leal(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2989}
2990
2991
2992void LIR_Assembler::align_backward_branch_target() {
2993  __ align(BytesPerWord);
2994}
2995
2996
2997void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2998  if (left->is_single_cpu()) {
2999    __ negl(left->as_register());
3000    move_regs(left->as_register(), dest->as_register());
3001
3002  } else if (left->is_double_cpu()) {
3003    Register lo = left->as_register_lo();
3004    Register hi = left->as_register_hi();
3005    __ lneg(hi, lo);
3006    if (dest->as_register_lo() == hi) {
3007      assert(dest->as_register_hi() != lo, "destroying register");
3008      move_regs(hi, dest->as_register_hi());
3009      move_regs(lo, dest->as_register_lo());
3010    } else {
3011      move_regs(lo, dest->as_register_lo());
3012      move_regs(hi, dest->as_register_hi());
3013    }
3014
3015  } else if (dest->is_single_xmm()) {
3016    if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3017      __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3018    }
3019    __ xorps(dest->as_xmm_float_reg(),
3020             ExternalAddress((address)float_signflip_pool));
3021
3022  } else if (dest->is_double_xmm()) {
3023    if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3024      __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3025    }
3026    __ xorpd(dest->as_xmm_double_reg(),
3027             ExternalAddress((address)double_signflip_pool));
3028
3029  } else if (left->is_single_fpu() || left->is_double_fpu()) {
3030    assert(left->fpu() == 0, "arg must be on TOS");
3031    assert(dest->fpu() == 0, "dest must be TOS");
3032    __ fchs();
3033
3034  } else {
3035    ShouldNotReachHere();
3036  }
3037}
3038
3039
3040void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
3041  assert(addr->is_address() && dest->is_register(), "check");
3042  Register reg = dest->as_register();
3043  __ leal(dest->as_register(), as_Address(addr->as_address_ptr()));
3044}
3045
3046
3047
3048void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3049  assert(!tmp->is_valid(), "don't need temporary");
3050  __ call(RuntimeAddress(dest));
3051  if (info != NULL) {
3052    add_call_info_here(info);
3053  }
3054}
3055
3056
3057void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3058  assert(type == T_LONG, "only for volatile long fields");
3059
3060  if (info != NULL) {
3061    add_debug_info_for_null_check_here(info);
3062  }
3063
3064  if (src->is_double_xmm()) {
3065    if (dest->is_double_cpu()) {
3066      __ movd(dest->as_register_lo(), src->as_xmm_double_reg());
3067      __ psrlq(src->as_xmm_double_reg(), 32);
3068      __ movd(dest->as_register_hi(), src->as_xmm_double_reg());
3069    } else if (dest->is_double_stack()) {
3070      __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3071    } else if (dest->is_address()) {
3072      __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3073    } else {
3074      ShouldNotReachHere();
3075    }
3076
3077  } else if (dest->is_double_xmm()) {
3078    if (src->is_double_stack()) {
3079      __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3080    } else if (src->is_address()) {
3081      __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3082    } else {
3083      ShouldNotReachHere();
3084    }
3085
3086  } else if (src->is_double_fpu()) {
3087    assert(src->fpu_regnrLo() == 0, "must be TOS");
3088    if (dest->is_double_stack()) {
3089      __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3090    } else if (dest->is_address()) {
3091      __ fistp_d(as_Address(dest->as_address_ptr()));
3092    } else {
3093      ShouldNotReachHere();
3094    }
3095
3096  } else if (dest->is_double_fpu()) {
3097    assert(dest->fpu_regnrLo() == 0, "must be TOS");
3098    if (src->is_double_stack()) {
3099      __ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3100    } else if (src->is_address()) {
3101      __ fild_d(as_Address(src->as_address_ptr()));
3102    } else {
3103      ShouldNotReachHere();
3104    }
3105  } else {
3106    ShouldNotReachHere();
3107  }
3108}
3109
3110
3111void LIR_Assembler::membar() {
3112  __ membar();
3113}
3114
3115void LIR_Assembler::membar_acquire() {
3116  // No x86 machines currently require load fences
3117  // __ load_fence();
3118}
3119
3120void LIR_Assembler::membar_release() {
3121  // No x86 machines currently require store fences
3122  // __ store_fence();
3123}
3124
3125void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3126  assert(result_reg->is_register(), "check");
3127  __ get_thread(result_reg->as_register());
3128}
3129
3130
3131void LIR_Assembler::peephole(LIR_List*) {
3132  // do nothing for now
3133}
3134
3135
3136#undef __
3137