1/*
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "c1/c1_Compilation.hpp"
28#include "c1/c1_FrameMap.hpp"
29#include "c1/c1_Instruction.hpp"
30#include "c1/c1_LIRAssembler.hpp"
31#include "c1/c1_LIRGenerator.hpp"
32#include "c1/c1_Runtime1.hpp"
33#include "c1/c1_ValueStack.hpp"
34#include "ci/ciArray.hpp"
35#include "ci/ciObjArrayKlass.hpp"
36#include "ci/ciTypeArrayKlass.hpp"
37#include "runtime/sharedRuntime.hpp"
38#include "runtime/stubRoutines.hpp"
39#include "vmreg_aarch64.inline.hpp"
40
41#ifdef ASSERT
42#define __ gen()->lir(__FILE__, __LINE__)->
43#else
44#define __ gen()->lir()->
45#endif
46
47// Item will be loaded into a byte register; Intel only
48void LIRItem::load_byte_item() {
49  load_item();
50}
51
52
53void LIRItem::load_nonconstant() {
54  LIR_Opr r = value()->operand();
55  if (r->is_constant()) {
56    _result = r;
57  } else {
58    load_item();
59  }
60}
61
62//--------------------------------------------------------------
63//               LIRGenerator
64//--------------------------------------------------------------
65
66
67LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
68LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r3_opr; }
69LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
70LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
71LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
72LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
73LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
74LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r0_opr; }
75LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
76
77
78LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
79  LIR_Opr opr;
80  switch (type->tag()) {
81    case intTag:     opr = FrameMap::r0_opr;          break;
82    case objectTag:  opr = FrameMap::r0_oop_opr;      break;
83    case longTag:    opr = FrameMap::long0_opr;        break;
84    case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
85    case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
86
87    case addressTag:
88    default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
89  }
90
91  assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
92  return opr;
93}
94
95
96LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
97  LIR_Opr reg = new_register(T_INT);
98  set_vreg_flag(reg, LIRGenerator::byte_reg);
99  return reg;
100}
101
102
103//--------- loading items into registers --------------------------------
104
105
106bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
107  if (v->type()->as_IntConstant() != NULL) {
108    return v->type()->as_IntConstant()->value() == 0L;
109  } else if (v->type()->as_LongConstant() != NULL) {
110    return v->type()->as_LongConstant()->value() == 0L;
111  } else if (v->type()->as_ObjectConstant() != NULL) {
112    return v->type()->as_ObjectConstant()->value()->is_null_object();
113  } else {
114    return false;
115  }
116}
117
118bool LIRGenerator::can_inline_as_constant(Value v) const {
119  // FIXME: Just a guess
120  if (v->type()->as_IntConstant() != NULL) {
121    return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
122  } else if (v->type()->as_LongConstant() != NULL) {
123    return v->type()->as_LongConstant()->value() == 0L;
124  } else if (v->type()->as_ObjectConstant() != NULL) {
125    return v->type()->as_ObjectConstant()->value()->is_null_object();
126  } else {
127    return false;
128  }
129}
130
131
132bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
133
134
135LIR_Opr LIRGenerator::safepoint_poll_register() {
136  return LIR_OprFact::illegalOpr;
137}
138
139
140LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
141                                            int shift, int disp, BasicType type) {
142  assert(base->is_register(), "must be");
143  intx large_disp = disp;
144
145  // accumulate fixed displacements
146  if (index->is_constant()) {
147    large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
148    index = LIR_OprFact::illegalOpr;
149  }
150
151  if (index->is_register()) {
152    // apply the shift and accumulate the displacement
153    if (shift > 0) {
154      LIR_Opr tmp = new_pointer_register();
155      __ shift_left(index, shift, tmp);
156      index = tmp;
157    }
158    if (large_disp != 0) {
159      LIR_Opr tmp = new_pointer_register();
160      if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
161        __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
162        index = tmp;
163      } else {
164        __ move(tmp, LIR_OprFact::intptrConst(large_disp));
165        __ add(tmp, index, tmp);
166        index = tmp;
167      }
168      large_disp = 0;
169    }
170  } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
171    // index is illegal so replace it with the displacement loaded into a register
172    index = new_pointer_register();
173    __ move(LIR_OprFact::intptrConst(large_disp), index);
174    large_disp = 0;
175  }
176
177  // at this point we either have base + index or base + displacement
178  if (large_disp == 0) {
179    return new LIR_Address(base, index, type);
180  } else {
181    assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
182    return new LIR_Address(base, large_disp, type);
183  }
184}
185
186
187LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
188                                              BasicType type, bool needs_card_mark) {
189  int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
190  int elem_size = type2aelembytes(type);
191  int shift = exact_log2(elem_size);
192
193  LIR_Address* addr;
194  if (index_opr->is_constant()) {
195    addr = new LIR_Address(array_opr,
196                           offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
197  } else {
198    if (offset_in_bytes) {
199      LIR_Opr tmp = new_pointer_register();
200      __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
201      array_opr = tmp;
202      offset_in_bytes = 0;
203    }
204    addr =  new LIR_Address(array_opr,
205                            index_opr,
206                            LIR_Address::scale(type),
207                            offset_in_bytes, type);
208  }
209  if (needs_card_mark) {
210    // This store will need a precise card mark, so go ahead and
211    // compute the full adddres instead of computing once for the
212    // store and again for the card mark.
213    LIR_Opr tmp = new_pointer_register();
214    __ leal(LIR_OprFact::address(addr), tmp);
215    return new LIR_Address(tmp, type);
216  } else {
217    return addr;
218  }
219}
220
221LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
222  LIR_Opr r;
223  if (type == T_LONG) {
224    r = LIR_OprFact::longConst(x);
225    if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
226      LIR_Opr tmp = new_register(type);
227      __ move(r, tmp);
228      return tmp;
229    }
230  } else if (type == T_INT) {
231    r = LIR_OprFact::intConst(x);
232    if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
233      // This is all rather nasty.  We don't know whether our constant
234      // is required for a logical or an arithmetic operation, wo we
235      // don't know what the range of valid values is!!
236      LIR_Opr tmp = new_register(type);
237      __ move(r, tmp);
238      return tmp;
239    }
240  } else {
241    ShouldNotReachHere();
242    r = NULL;  // unreachable
243  }
244  return r;
245}
246
247
248
249void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
250  LIR_Opr pointer = new_pointer_register();
251  __ move(LIR_OprFact::intptrConst(counter), pointer);
252  LIR_Address* addr = new LIR_Address(pointer, type);
253  increment_counter(addr, step);
254}
255
256
257void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
258  LIR_Opr imm = NULL;
259  switch(addr->type()) {
260  case T_INT:
261    imm = LIR_OprFact::intConst(step);
262    break;
263  case T_LONG:
264    imm = LIR_OprFact::longConst(step);
265    break;
266  default:
267    ShouldNotReachHere();
268  }
269  LIR_Opr reg = new_register(addr->type());
270  __ load(addr, reg);
271  __ add(reg, imm, reg);
272  __ store(reg, addr);
273}
274
275void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
276  LIR_Opr reg = new_register(T_INT);
277  __ load(generate_address(base, disp, T_INT), reg, info);
278  __ cmp(condition, reg, LIR_OprFact::intConst(c));
279}
280
281void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
282  LIR_Opr reg1 = new_register(T_INT);
283  __ load(generate_address(base, disp, type), reg1, info);
284  __ cmp(condition, reg, reg1);
285}
286
287
288bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
289
290  if (is_power_of_2(c - 1)) {
291    __ shift_left(left, exact_log2(c - 1), tmp);
292    __ add(tmp, left, result);
293    return true;
294  } else if (is_power_of_2(c + 1)) {
295    __ shift_left(left, exact_log2(c + 1), tmp);
296    __ sub(tmp, left, result);
297    return true;
298  } else {
299    return false;
300  }
301}
302
303void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
304  BasicType type = item->type();
305  __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
306}
307
308//----------------------------------------------------------------------
309//             visitor functions
310//----------------------------------------------------------------------
311
312
313void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
314  assert(x->is_pinned(),"");
315  bool needs_range_check = x->compute_needs_range_check();
316  bool use_length = x->length() != NULL;
317  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
318  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
319                                         !get_jobject_constant(x->value())->is_null_object() ||
320                                         x->should_profile());
321
322  LIRItem array(x->array(), this);
323  LIRItem index(x->index(), this);
324  LIRItem value(x->value(), this);
325  LIRItem length(this);
326
327  array.load_item();
328  index.load_nonconstant();
329
330  if (use_length && needs_range_check) {
331    length.set_instruction(x->length());
332    length.load_item();
333
334  }
335  if (needs_store_check || x->check_boolean()) {
336    value.load_item();
337  } else {
338    value.load_for_store(x->elt_type());
339  }
340
341  set_no_result(x);
342
343  // the CodeEmitInfo must be duplicated for each different
344  // LIR-instruction because spilling can occur anywhere between two
345  // instructions and so the debug information must be different
346  CodeEmitInfo* range_check_info = state_for(x);
347  CodeEmitInfo* null_check_info = NULL;
348  if (x->needs_null_check()) {
349    null_check_info = new CodeEmitInfo(range_check_info);
350  }
351
352  // emit array address setup early so it schedules better
353  // FIXME?  No harm in this on aarch64, and it might help
354  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
355
356  if (GenerateRangeChecks && needs_range_check) {
357    if (use_length) {
358      __ cmp(lir_cond_belowEqual, length.result(), index.result());
359      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
360    } else {
361      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
362      // range_check also does the null check
363      null_check_info = NULL;
364    }
365  }
366
367  if (GenerateArrayStoreCheck && needs_store_check) {
368    LIR_Opr tmp1 = new_register(objectType);
369    LIR_Opr tmp2 = new_register(objectType);
370    LIR_Opr tmp3 = new_register(objectType);
371
372    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
373    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
374  }
375
376  if (obj_store) {
377    // Needs GC write barriers.
378    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
379                true /* do_load */, false /* patch */, NULL);
380    __ move(value.result(), array_addr, null_check_info);
381    // Seems to be a precise
382    post_barrier(LIR_OprFact::address(array_addr), value.result());
383  } else {
384    LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
385    __ move(result, array_addr, null_check_info);
386  }
387}
388
389void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
390  assert(x->is_pinned(),"");
391  LIRItem obj(x->obj(), this);
392  obj.load_item();
393
394  set_no_result(x);
395
396  // "lock" stores the address of the monitor stack slot, so this is not an oop
397  LIR_Opr lock = new_register(T_INT);
398  // Need a scratch register for biased locking
399  LIR_Opr scratch = LIR_OprFact::illegalOpr;
400  if (UseBiasedLocking) {
401    scratch = new_register(T_INT);
402  }
403
404  CodeEmitInfo* info_for_exception = NULL;
405  if (x->needs_null_check()) {
406    info_for_exception = state_for(x);
407  }
408  // this CodeEmitInfo must not have the xhandlers because here the
409  // object is already locked (xhandlers expect object to be unlocked)
410  CodeEmitInfo* info = state_for(x, x->state(), true);
411  monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
412                        x->monitor_no(), info_for_exception, info);
413}
414
415
416void LIRGenerator::do_MonitorExit(MonitorExit* x) {
417  assert(x->is_pinned(),"");
418
419  LIRItem obj(x->obj(), this);
420  obj.dont_load_item();
421
422  LIR_Opr lock = new_register(T_INT);
423  LIR_Opr obj_temp = new_register(T_INT);
424  set_no_result(x);
425  monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
426}
427
428
429void LIRGenerator::do_NegateOp(NegateOp* x) {
430
431  LIRItem from(x->x(), this);
432  from.load_item();
433  LIR_Opr result = rlock_result(x);
434  __ negate (from.result(), result);
435
436}
437
438// for  _fadd, _fmul, _fsub, _fdiv, _frem
439//      _dadd, _dmul, _dsub, _ddiv, _drem
440void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
441
442  if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
443    // float remainder is implemented as a direct call into the runtime
444    LIRItem right(x->x(), this);
445    LIRItem left(x->y(), this);
446
447    BasicTypeList signature(2);
448    if (x->op() == Bytecodes::_frem) {
449      signature.append(T_FLOAT);
450      signature.append(T_FLOAT);
451    } else {
452      signature.append(T_DOUBLE);
453      signature.append(T_DOUBLE);
454    }
455    CallingConvention* cc = frame_map()->c_calling_convention(&signature);
456
457    const LIR_Opr result_reg = result_register_for(x->type());
458    left.load_item_force(cc->at(1));
459    right.load_item();
460
461    __ move(right.result(), cc->at(0));
462
463    address entry;
464    if (x->op() == Bytecodes::_frem) {
465      entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
466    } else {
467      entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
468    }
469
470    LIR_Opr result = rlock_result(x);
471    __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
472    __ move(result_reg, result);
473
474    return;
475  }
476
477  LIRItem left(x->x(),  this);
478  LIRItem right(x->y(), this);
479  LIRItem* left_arg  = &left;
480  LIRItem* right_arg = &right;
481
482  // Always load right hand side.
483  right.load_item();
484
485  if (!left.is_register())
486    left.load_item();
487
488  LIR_Opr reg = rlock(x);
489  LIR_Opr tmp = LIR_OprFact::illegalOpr;
490  if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
491    tmp = new_register(T_DOUBLE);
492  }
493
494  arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL);
495
496  set_result(x, round_item(reg));
497}
498
499// for  _ladd, _lmul, _lsub, _ldiv, _lrem
500void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
501
502  // missing test if instr is commutative and if we should swap
503  LIRItem left(x->x(), this);
504  LIRItem right(x->y(), this);
505
506  if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
507
508    // the check for division by zero destroys the right operand
509    right.set_destroys_register();
510
511    // check for division by zero (destroys registers of right operand!)
512    CodeEmitInfo* info = state_for(x);
513
514    left.load_item();
515    right.load_item();
516
517    __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
518    __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
519
520    rlock_result(x);
521    switch (x->op()) {
522    case Bytecodes::_lrem:
523      __ rem (left.result(), right.result(), x->operand());
524      break;
525    case Bytecodes::_ldiv:
526      __ div (left.result(), right.result(), x->operand());
527      break;
528    default:
529      ShouldNotReachHere();
530      break;
531    }
532
533
534  } else {
535    assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
536            "expect lmul, ladd or lsub");
537    // add, sub, mul
538    left.load_item();
539    if (! right.is_register()) {
540      if (x->op() == Bytecodes::_lmul
541          || ! right.is_constant()
542          || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
543        right.load_item();
544      } else { // add, sub
545        assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
546        // don't load constants to save register
547        right.load_nonconstant();
548      }
549    }
550    rlock_result(x);
551    arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
552  }
553}
554
555// for: _iadd, _imul, _isub, _idiv, _irem
556void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
557
558  // Test if instr is commutative and if we should swap
559  LIRItem left(x->x(),  this);
560  LIRItem right(x->y(), this);
561  LIRItem* left_arg = &left;
562  LIRItem* right_arg = &right;
563  if (x->is_commutative() && left.is_stack() && right.is_register()) {
564    // swap them if left is real stack (or cached) and right is real register(not cached)
565    left_arg = &right;
566    right_arg = &left;
567  }
568
569  left_arg->load_item();
570
571  // do not need to load right, as we can handle stack and constants
572  if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
573
574    right_arg->load_item();
575    rlock_result(x);
576
577    CodeEmitInfo* info = state_for(x);
578    LIR_Opr tmp = new_register(T_INT);
579    __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
580    __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
581    info = state_for(x);
582
583    if (x->op() == Bytecodes::_irem) {
584      __ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
585    } else if (x->op() == Bytecodes::_idiv) {
586      __ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
587    }
588
589  } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
590    if (right.is_constant()
591        && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
592      right.load_nonconstant();
593    } else {
594      right.load_item();
595    }
596    rlock_result(x);
597    arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
598  } else {
599    assert (x->op() == Bytecodes::_imul, "expect imul");
600    if (right.is_constant()) {
601      jint c = right.get_jint_constant();
602      if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
603        right_arg->dont_load_item();
604      } else {
605        // Cannot use constant op.
606        right_arg->load_item();
607      }
608    } else {
609      right.load_item();
610    }
611    rlock_result(x);
612    arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
613  }
614}
615
616void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
617  // when an operand with use count 1 is the left operand, then it is
618  // likely that no move for 2-operand-LIR-form is necessary
619  if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
620    x->swap_operands();
621  }
622
623  ValueTag tag = x->type()->tag();
624  assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
625  switch (tag) {
626    case floatTag:
627    case doubleTag:  do_ArithmeticOp_FPU(x);  return;
628    case longTag:    do_ArithmeticOp_Long(x); return;
629    case intTag:     do_ArithmeticOp_Int(x);  return;
630  }
631  ShouldNotReachHere();
632}
633
634// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
635void LIRGenerator::do_ShiftOp(ShiftOp* x) {
636
637  LIRItem left(x->x(),  this);
638  LIRItem right(x->y(), this);
639
640  left.load_item();
641
642  rlock_result(x);
643  if (right.is_constant()) {
644    right.dont_load_item();
645
646    switch (x->op()) {
647    case Bytecodes::_ishl: {
648      int c = right.get_jint_constant() & 0x1f;
649      __ shift_left(left.result(), c, x->operand());
650      break;
651    }
652    case Bytecodes::_ishr: {
653      int c = right.get_jint_constant() & 0x1f;
654      __ shift_right(left.result(), c, x->operand());
655      break;
656    }
657    case Bytecodes::_iushr: {
658      int c = right.get_jint_constant() & 0x1f;
659      __ unsigned_shift_right(left.result(), c, x->operand());
660      break;
661    }
662    case Bytecodes::_lshl: {
663      int c = right.get_jint_constant() & 0x3f;
664      __ shift_left(left.result(), c, x->operand());
665      break;
666    }
667    case Bytecodes::_lshr: {
668      int c = right.get_jint_constant() & 0x3f;
669      __ shift_right(left.result(), c, x->operand());
670      break;
671    }
672    case Bytecodes::_lushr: {
673      int c = right.get_jint_constant() & 0x3f;
674      __ unsigned_shift_right(left.result(), c, x->operand());
675      break;
676    }
677    default:
678      ShouldNotReachHere();
679    }
680  } else {
681    right.load_item();
682    LIR_Opr tmp = new_register(T_INT);
683    switch (x->op()) {
684    case Bytecodes::_ishl: {
685      __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
686      __ shift_left(left.result(), tmp, x->operand(), tmp);
687      break;
688    }
689    case Bytecodes::_ishr: {
690      __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
691      __ shift_right(left.result(), tmp, x->operand(), tmp);
692      break;
693    }
694    case Bytecodes::_iushr: {
695      __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
696      __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
697      break;
698    }
699    case Bytecodes::_lshl: {
700      __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
701      __ shift_left(left.result(), tmp, x->operand(), tmp);
702      break;
703    }
704    case Bytecodes::_lshr: {
705      __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
706      __ shift_right(left.result(), tmp, x->operand(), tmp);
707      break;
708    }
709    case Bytecodes::_lushr: {
710      __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
711      __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
712      break;
713    }
714    default:
715      ShouldNotReachHere();
716    }
717  }
718}
719
720// _iand, _land, _ior, _lor, _ixor, _lxor
721void LIRGenerator::do_LogicOp(LogicOp* x) {
722
723  LIRItem left(x->x(),  this);
724  LIRItem right(x->y(), this);
725
726  left.load_item();
727
728  rlock_result(x);
729  if (right.is_constant()
730      && ((right.type()->tag() == intTag
731           && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
732          || (right.type()->tag() == longTag
733              && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant()))))  {
734    right.dont_load_item();
735  } else {
736    right.load_item();
737  }
738  switch (x->op()) {
739  case Bytecodes::_iand:
740  case Bytecodes::_land:
741    __ logical_and(left.result(), right.result(), x->operand()); break;
742  case Bytecodes::_ior:
743  case Bytecodes::_lor:
744    __ logical_or (left.result(), right.result(), x->operand()); break;
745  case Bytecodes::_ixor:
746  case Bytecodes::_lxor:
747    __ logical_xor(left.result(), right.result(), x->operand()); break;
748  default: Unimplemented();
749  }
750}
751
752// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
753void LIRGenerator::do_CompareOp(CompareOp* x) {
754  LIRItem left(x->x(), this);
755  LIRItem right(x->y(), this);
756  ValueTag tag = x->x()->type()->tag();
757  if (tag == longTag) {
758    left.set_destroys_register();
759  }
760  left.load_item();
761  right.load_item();
762  LIR_Opr reg = rlock_result(x);
763
764  if (x->x()->type()->is_float_kind()) {
765    Bytecodes::Code code = x->op();
766    __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
767  } else if (x->x()->type()->tag() == longTag) {
768    __ lcmp2int(left.result(), right.result(), reg);
769  } else {
770    Unimplemented();
771  }
772}
773
774void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
775  assert(x->number_of_arguments() == 4, "wrong type");
776  LIRItem obj   (x->argument_at(0), this);  // object
777  LIRItem offset(x->argument_at(1), this);  // offset of field
778  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
779  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
780
781  assert(obj.type()->tag() == objectTag, "invalid type");
782
783  // In 64bit the type can be long, sparc doesn't have this assert
784  // assert(offset.type()->tag() == intTag, "invalid type");
785
786  assert(cmp.type()->tag() == type->tag(), "invalid type");
787  assert(val.type()->tag() == type->tag(), "invalid type");
788
789  // get address of field
790  obj.load_item();
791  offset.load_nonconstant();
792  val.load_item();
793  cmp.load_item();
794
795  LIR_Address* a;
796  if(offset.result()->is_constant()) {
797    jlong c = offset.result()->as_jlong();
798    if ((jlong)((jint)c) == c) {
799      a = new LIR_Address(obj.result(),
800                          (jint)c,
801                          as_BasicType(type));
802    } else {
803      LIR_Opr tmp = new_register(T_LONG);
804      __ move(offset.result(), tmp);
805      a = new LIR_Address(obj.result(),
806                          tmp,
807                          as_BasicType(type));
808    }
809  } else {
810    a = new LIR_Address(obj.result(),
811                        offset.result(),
812                        0,
813                        as_BasicType(type));
814  }
815  LIR_Opr addr = new_pointer_register();
816  __ leal(LIR_OprFact::address(a), addr);
817
818  if (type == objectType) {  // Write-barrier needed for Object fields.
819    // Do the pre-write barrier, if any.
820    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
821                true /* do_load */, false /* patch */, NULL);
822  }
823
824  LIR_Opr result = rlock_result(x);
825
826  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
827  if (type == objectType)
828    __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
829               result);
830  else if (type == intType)
831    __ cas_int(addr, cmp.result(), val.result(), ill, ill);
832  else if (type == longType)
833    __ cas_long(addr, cmp.result(), val.result(), ill, ill);
834  else {
835    ShouldNotReachHere();
836  }
837
838  __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
839
840  if (type == objectType) {   // Write-barrier needed for Object fields.
841    // Seems to be precise
842    post_barrier(addr, val.result());
843  }
844}
845
846void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
847  switch (x->id()) {
848    case vmIntrinsics::_dabs:
849    case vmIntrinsics::_dsqrt: {
850      assert(x->number_of_arguments() == 1, "wrong type");
851      LIRItem value(x->argument_at(0), this);
852      value.load_item();
853      LIR_Opr dst = rlock_result(x);
854
855      switch (x->id()) {
856      case vmIntrinsics::_dsqrt: {
857        __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
858        break;
859      }
860      case vmIntrinsics::_dabs: {
861        __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
862        break;
863      }
864      }
865      break;
866    }
867    case vmIntrinsics::_dlog10: // fall through
868    case vmIntrinsics::_dlog: // fall through
869    case vmIntrinsics::_dsin: // fall through
870    case vmIntrinsics::_dtan: // fall through
871    case vmIntrinsics::_dcos: // fall through
872    case vmIntrinsics::_dexp: {
873      assert(x->number_of_arguments() == 1, "wrong type");
874
875      address runtime_entry = NULL;
876      switch (x->id()) {
877      case vmIntrinsics::_dsin:
878        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
879        break;
880      case vmIntrinsics::_dcos:
881        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
882        break;
883      case vmIntrinsics::_dtan:
884        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
885        break;
886      case vmIntrinsics::_dlog:
887        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
888        break;
889      case vmIntrinsics::_dlog10:
890        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
891        break;
892      case vmIntrinsics::_dexp:
893        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
894        break;
895      default:
896        ShouldNotReachHere();
897      }
898
899      LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
900      set_result(x, result);
901      break;
902    }
903    case vmIntrinsics::_dpow: {
904      assert(x->number_of_arguments() == 2, "wrong type");
905      address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
906      LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
907      set_result(x, result);
908      break;
909    }
910  }
911}
912
913
914void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
915  assert(x->number_of_arguments() == 5, "wrong type");
916
917  // Make all state_for calls early since they can emit code
918  CodeEmitInfo* info = state_for(x, x->state());
919
920  LIRItem src(x->argument_at(0), this);
921  LIRItem src_pos(x->argument_at(1), this);
922  LIRItem dst(x->argument_at(2), this);
923  LIRItem dst_pos(x->argument_at(3), this);
924  LIRItem length(x->argument_at(4), this);
925
926  // operands for arraycopy must use fixed registers, otherwise
927  // LinearScan will fail allocation (because arraycopy always needs a
928  // call)
929
930  // The java calling convention will give us enough registers
931  // so that on the stub side the args will be perfect already.
932  // On the other slow/special case side we call C and the arg
933  // positions are not similar enough to pick one as the best.
934  // Also because the java calling convention is a "shifted" version
935  // of the C convention we can process the java args trivially into C
936  // args without worry of overwriting during the xfer
937
938  src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
939  src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
940  dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
941  dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
942  length.load_item_force  (FrameMap::as_opr(j_rarg4));
943
944  LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
945
946  set_no_result(x);
947
948  int flags;
949  ciArrayKlass* expected_type;
950  arraycopy_helper(x, &flags, &expected_type);
951
952  __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
953}
954
955void LIRGenerator::do_update_CRC32(Intrinsic* x) {
956  assert(UseCRC32Intrinsics, "why are we here?");
957  // Make all state_for calls early since they can emit code
958  LIR_Opr result = rlock_result(x);
959  int flags = 0;
960  switch (x->id()) {
961    case vmIntrinsics::_updateCRC32: {
962      LIRItem crc(x->argument_at(0), this);
963      LIRItem val(x->argument_at(1), this);
964      // val is destroyed by update_crc32
965      val.set_destroys_register();
966      crc.load_item();
967      val.load_item();
968      __ update_crc32(crc.result(), val.result(), result);
969      break;
970    }
971    case vmIntrinsics::_updateBytesCRC32:
972    case vmIntrinsics::_updateByteBufferCRC32: {
973      bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
974
975      LIRItem crc(x->argument_at(0), this);
976      LIRItem buf(x->argument_at(1), this);
977      LIRItem off(x->argument_at(2), this);
978      LIRItem len(x->argument_at(3), this);
979      buf.load_item();
980      off.load_nonconstant();
981
982      LIR_Opr index = off.result();
983      int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
984      if(off.result()->is_constant()) {
985        index = LIR_OprFact::illegalOpr;
986       offset += off.result()->as_jint();
987      }
988      LIR_Opr base_op = buf.result();
989
990      if (index->is_valid()) {
991        LIR_Opr tmp = new_register(T_LONG);
992        __ convert(Bytecodes::_i2l, index, tmp);
993        index = tmp;
994      }
995
996      if (offset) {
997        LIR_Opr tmp = new_pointer_register();
998        __ add(base_op, LIR_OprFact::intConst(offset), tmp);
999        base_op = tmp;
1000        offset = 0;
1001      }
1002
1003      LIR_Address* a = new LIR_Address(base_op,
1004                                       index,
1005                                       offset,
1006                                       T_BYTE);
1007      BasicTypeList signature(3);
1008      signature.append(T_INT);
1009      signature.append(T_ADDRESS);
1010      signature.append(T_INT);
1011      CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1012      const LIR_Opr result_reg = result_register_for(x->type());
1013
1014      LIR_Opr addr = new_pointer_register();
1015      __ leal(LIR_OprFact::address(a), addr);
1016
1017      crc.load_item_force(cc->at(0));
1018      __ move(addr, cc->at(1));
1019      len.load_item_force(cc->at(2));
1020
1021      __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1022      __ move(result_reg, result);
1023
1024      break;
1025    }
1026    default: {
1027      ShouldNotReachHere();
1028    }
1029  }
1030}
1031
1032void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1033  Unimplemented();
1034}
1035
1036void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1037  assert(x->number_of_arguments() == 3, "wrong type");
1038  assert(UseFMA, "Needs FMA instructions support.");
1039  LIRItem value(x->argument_at(0), this);
1040  LIRItem value1(x->argument_at(1), this);
1041  LIRItem value2(x->argument_at(2), this);
1042
1043  value.load_item();
1044  value1.load_item();
1045  value2.load_item();
1046
1047  LIR_Opr calc_input = value.result();
1048  LIR_Opr calc_input1 = value1.result();
1049  LIR_Opr calc_input2 = value2.result();
1050  LIR_Opr calc_result = rlock_result(x);
1051
1052  switch (x->id()) {
1053  case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1054  case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1055  default:                    ShouldNotReachHere();
1056  }
1057}
1058
1059void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1060  fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1061}
1062
1063// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1064// _i2b, _i2c, _i2s
1065void LIRGenerator::do_Convert(Convert* x) {
1066  LIRItem value(x->value(), this);
1067  value.load_item();
1068  LIR_Opr input = value.result();
1069  LIR_Opr result = rlock(x);
1070
1071  // arguments of lir_convert
1072  LIR_Opr conv_input = input;
1073  LIR_Opr conv_result = result;
1074  ConversionStub* stub = NULL;
1075
1076  __ convert(x->op(), conv_input, conv_result);
1077
1078  assert(result->is_virtual(), "result must be virtual register");
1079  set_result(x, result);
1080}
1081
1082void LIRGenerator::do_NewInstance(NewInstance* x) {
1083#ifndef PRODUCT
1084  if (PrintNotLoaded && !x->klass()->is_loaded()) {
1085    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1086  }
1087#endif
1088  CodeEmitInfo* info = state_for(x, x->state());
1089  LIR_Opr reg = result_register_for(x->type());
1090  new_instance(reg, x->klass(), x->is_unresolved(),
1091                       FrameMap::r2_oop_opr,
1092                       FrameMap::r5_oop_opr,
1093                       FrameMap::r4_oop_opr,
1094                       LIR_OprFact::illegalOpr,
1095                       FrameMap::r3_metadata_opr, info);
1096  LIR_Opr result = rlock_result(x);
1097  __ move(reg, result);
1098}
1099
1100void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1101  CodeEmitInfo* info = state_for(x, x->state());
1102
1103  LIRItem length(x->length(), this);
1104  length.load_item_force(FrameMap::r19_opr);
1105
1106  LIR_Opr reg = result_register_for(x->type());
1107  LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1108  LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1109  LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1110  LIR_Opr tmp4 = reg;
1111  LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1112  LIR_Opr len = length.result();
1113  BasicType elem_type = x->elt_type();
1114
1115  __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1116
1117  CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1118  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1119
1120  LIR_Opr result = rlock_result(x);
1121  __ move(reg, result);
1122}
1123
1124void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1125  LIRItem length(x->length(), this);
1126  // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1127  // and therefore provide the state before the parameters have been consumed
1128  CodeEmitInfo* patching_info = NULL;
1129  if (!x->klass()->is_loaded() || PatchALot) {
1130    patching_info =  state_for(x, x->state_before());
1131  }
1132
1133  CodeEmitInfo* info = state_for(x, x->state());
1134
1135  LIR_Opr reg = result_register_for(x->type());
1136  LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1137  LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1138  LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1139  LIR_Opr tmp4 = reg;
1140  LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1141
1142  length.load_item_force(FrameMap::r19_opr);
1143  LIR_Opr len = length.result();
1144
1145  CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1146  ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1147  if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1148    BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1149  }
1150  klass2reg_with_patching(klass_reg, obj, patching_info);
1151  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1152
1153  LIR_Opr result = rlock_result(x);
1154  __ move(reg, result);
1155}
1156
1157
1158void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1159  Values* dims = x->dims();
1160  int i = dims->length();
1161  LIRItemList* items = new LIRItemList(i, i, NULL);
1162  while (i-- > 0) {
1163    LIRItem* size = new LIRItem(dims->at(i), this);
1164    items->at_put(i, size);
1165  }
1166
1167  // Evaluate state_for early since it may emit code.
1168  CodeEmitInfo* patching_info = NULL;
1169  if (!x->klass()->is_loaded() || PatchALot) {
1170    patching_info = state_for(x, x->state_before());
1171
1172    // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1173    // clone all handlers (NOTE: Usually this is handled transparently
1174    // by the CodeEmitInfo cloning logic in CodeStub constructors but
1175    // is done explicitly here because a stub isn't being used).
1176    x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1177  }
1178  CodeEmitInfo* info = state_for(x, x->state());
1179
1180  i = dims->length();
1181  while (i-- > 0) {
1182    LIRItem* size = items->at(i);
1183    size->load_item();
1184
1185    store_stack_parameter(size->result(), in_ByteSize(i*4));
1186  }
1187
1188  LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1189  klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1190
1191  LIR_Opr rank = FrameMap::r19_opr;
1192  __ move(LIR_OprFact::intConst(x->rank()), rank);
1193  LIR_Opr varargs = FrameMap::r2_opr;
1194  __ move(FrameMap::sp_opr, varargs);
1195  LIR_OprList* args = new LIR_OprList(3);
1196  args->append(klass_reg);
1197  args->append(rank);
1198  args->append(varargs);
1199  LIR_Opr reg = result_register_for(x->type());
1200  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1201                  LIR_OprFact::illegalOpr,
1202                  reg, args, info);
1203
1204  LIR_Opr result = rlock_result(x);
1205  __ move(reg, result);
1206}
1207
1208void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1209  // nothing to do for now
1210}
1211
1212void LIRGenerator::do_CheckCast(CheckCast* x) {
1213  LIRItem obj(x->obj(), this);
1214
1215  CodeEmitInfo* patching_info = NULL;
1216  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1217    // must do this before locking the destination register as an oop register,
1218    // and before the obj is loaded (the latter is for deoptimization)
1219    patching_info = state_for(x, x->state_before());
1220  }
1221  obj.load_item();
1222
1223  // info for exceptions
1224  CodeEmitInfo* info_for_exception =
1225      (x->needs_exception_state() ? state_for(x) :
1226                                    state_for(x, x->state_before(), true /*ignore_xhandler*/));
1227
1228  CodeStub* stub;
1229  if (x->is_incompatible_class_change_check()) {
1230    assert(patching_info == NULL, "can't patch this");
1231    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1232  } else if (x->is_invokespecial_receiver_check()) {
1233    assert(patching_info == NULL, "can't patch this");
1234    stub = new DeoptimizeStub(info_for_exception,
1235                              Deoptimization::Reason_class_check,
1236                              Deoptimization::Action_none);
1237  } else {
1238    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1239  }
1240  LIR_Opr reg = rlock_result(x);
1241  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1242  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1243    tmp3 = new_register(objectType);
1244  }
1245  __ checkcast(reg, obj.result(), x->klass(),
1246               new_register(objectType), new_register(objectType), tmp3,
1247               x->direct_compare(), info_for_exception, patching_info, stub,
1248               x->profiled_method(), x->profiled_bci());
1249}
1250
1251void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1252  LIRItem obj(x->obj(), this);
1253
1254  // result and test object may not be in same register
1255  LIR_Opr reg = rlock_result(x);
1256  CodeEmitInfo* patching_info = NULL;
1257  if ((!x->klass()->is_loaded() || PatchALot)) {
1258    // must do this before locking the destination register as an oop register
1259    patching_info = state_for(x, x->state_before());
1260  }
1261  obj.load_item();
1262  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1263  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1264    tmp3 = new_register(objectType);
1265  }
1266  __ instanceof(reg, obj.result(), x->klass(),
1267                new_register(objectType), new_register(objectType), tmp3,
1268                x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1269}
1270
1271void LIRGenerator::do_If(If* x) {
1272  assert(x->number_of_sux() == 2, "inconsistency");
1273  ValueTag tag = x->x()->type()->tag();
1274  bool is_safepoint = x->is_safepoint();
1275
1276  If::Condition cond = x->cond();
1277
1278  LIRItem xitem(x->x(), this);
1279  LIRItem yitem(x->y(), this);
1280  LIRItem* xin = &xitem;
1281  LIRItem* yin = &yitem;
1282
1283  if (tag == longTag) {
1284    // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1285    // mirror for other conditions
1286    if (cond == If::gtr || cond == If::leq) {
1287      cond = Instruction::mirror(cond);
1288      xin = &yitem;
1289      yin = &xitem;
1290    }
1291    xin->set_destroys_register();
1292  }
1293  xin->load_item();
1294
1295  if (tag == longTag) {
1296    if (yin->is_constant()
1297        && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1298      yin->dont_load_item();
1299    } else {
1300      yin->load_item();
1301    }
1302  } else if (tag == intTag) {
1303    if (yin->is_constant()
1304        && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant()))  {
1305      yin->dont_load_item();
1306    } else {
1307      yin->load_item();
1308    }
1309  } else {
1310    yin->load_item();
1311  }
1312
1313  // add safepoint before generating condition code so it can be recomputed
1314  if (x->is_safepoint()) {
1315    // increment backedge counter if needed
1316    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1317    __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1318  }
1319  set_no_result(x);
1320
1321  LIR_Opr left = xin->result();
1322  LIR_Opr right = yin->result();
1323
1324  __ cmp(lir_cond(cond), left, right);
1325  // Generate branch profiling. Profiling code doesn't kill flags.
1326  profile_branch(x, cond);
1327  move_to_phi(x->state());
1328  if (x->x()->type()->is_float_kind()) {
1329    __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1330  } else {
1331    __ branch(lir_cond(cond), right->type(), x->tsux());
1332  }
1333  assert(x->default_sux() == x->fsux(), "wrong destination above");
1334  __ jump(x->default_sux());
1335}
1336
1337LIR_Opr LIRGenerator::getThreadPointer() {
1338   return FrameMap::as_pointer_opr(rthread);
1339}
1340
1341void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1342
1343void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1344                                        CodeEmitInfo* info) {
1345  __ volatile_store_mem_reg(value, address, info);
1346}
1347
1348void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1349                                       CodeEmitInfo* info) {
1350  // 8179954: We need to make sure that the code generated for
1351  // volatile accesses forms a sequentially-consistent set of
1352  // operations when combined with STLR and LDAR.  Without a leading
1353  // membar it's possible for a simple Dekker test to fail if loads
1354  // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1355  // the stores in one method and C1 compiles the loads in another.
1356  if (! UseBarriersForVolatile) {
1357    __ membar();
1358  }
1359
1360  __ volatile_load_mem_reg(address, result, info);
1361}
1362
1363void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1364                                     BasicType type, bool is_volatile) {
1365  LIR_Address* addr = new LIR_Address(src, offset, type);
1366  __ load(addr, dst);
1367}
1368
1369
1370void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1371                                     BasicType type, bool is_volatile) {
1372  LIR_Address* addr = new LIR_Address(src, offset, type);
1373  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1374  if (is_obj) {
1375    // Do the pre-write barrier, if any.
1376    pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1377                true /* do_load */, false /* patch */, NULL);
1378    __ move(data, addr);
1379    assert(src->is_register(), "must be register");
1380    // Seems to be a precise address
1381    post_barrier(LIR_OprFact::address(addr), data);
1382  } else {
1383    __ move(data, addr);
1384  }
1385}
1386
1387void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1388  BasicType type = x->basic_type();
1389  LIRItem src(x->object(), this);
1390  LIRItem off(x->offset(), this);
1391  LIRItem value(x->value(), this);
1392
1393  src.load_item();
1394  off.load_nonconstant();
1395
1396  // We can cope with a constant increment in an xadd
1397  if (! (x->is_add()
1398         && value.is_constant()
1399         && can_inline_as_constant(x->value()))) {
1400    value.load_item();
1401  }
1402
1403  LIR_Opr dst = rlock_result(x, type);
1404  LIR_Opr data = value.result();
1405  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1406  LIR_Opr offset = off.result();
1407
1408  if (data == dst) {
1409    LIR_Opr tmp = new_register(data->type());
1410    __ move(data, tmp);
1411    data = tmp;
1412  }
1413
1414  LIR_Address* addr;
1415  if (offset->is_constant()) {
1416    jlong l = offset->as_jlong();
1417    assert((jlong)((jint)l) == l, "offset too large for constant");
1418    jint c = (jint)l;
1419    addr = new LIR_Address(src.result(), c, type);
1420  } else {
1421    addr = new LIR_Address(src.result(), offset, type);
1422  }
1423
1424  LIR_Opr tmp = new_register(T_INT);
1425  LIR_Opr ptr = LIR_OprFact::illegalOpr;
1426
1427  if (x->is_add()) {
1428    __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1429  } else {
1430    if (is_obj) {
1431      // Do the pre-write barrier, if any.
1432      ptr = new_pointer_register();
1433      __ add(src.result(), off.result(), ptr);
1434      pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1435                  true /* do_load */, false /* patch */, NULL);
1436    }
1437    __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1438    if (is_obj) {
1439      post_barrier(ptr, data);
1440    }
1441  }
1442}
1443