c1_LIRGenerator.cpp revision 7866:759a167d2381
1/*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_FrameMap.hpp"
28#include "c1/c1_Instruction.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_LIRGenerator.hpp"
31#include "c1/c1_ValueStack.hpp"
32#include "ci/ciArrayKlass.hpp"
33#include "ci/ciInstance.hpp"
34#include "ci/ciObjArray.hpp"
35#include "memory/cardTableModRefBS.hpp"
36#include "runtime/arguments.hpp"
37#include "runtime/sharedRuntime.hpp"
38#include "runtime/stubRoutines.hpp"
39#include "runtime/vm_version.hpp"
40#include "utilities/bitMap.inline.hpp"
41#include "utilities/macros.hpp"
42#if INCLUDE_ALL_GCS
43#include "gc_implementation/g1/heapRegion.hpp"
44#endif // INCLUDE_ALL_GCS
45
46#ifdef ASSERT
47#define __ gen()->lir(__FILE__, __LINE__)->
48#else
49#define __ gen()->lir()->
50#endif
51
52// TODO: ARM - Use some recognizable constant which still fits architectural constraints
53#ifdef ARM
54#define PATCHED_ADDR  (204)
55#else
56#define PATCHED_ADDR  (max_jint)
57#endif
58
59void PhiResolverState::reset(int max_vregs) {
60  // Initialize array sizes
61  _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
62  _virtual_operands.trunc_to(0);
63  _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
64  _other_operands.trunc_to(0);
65  _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
66  _vreg_table.trunc_to(0);
67}
68
69
70
71//--------------------------------------------------------------
72// PhiResolver
73
74// Resolves cycles:
75//
76//  r1 := r2  becomes  temp := r1
77//  r2 := r1           r1 := r2
78//                     r2 := temp
79// and orders moves:
80//
81//  r2 := r3  becomes  r1 := r2
82//  r1 := r2           r2 := r3
83
84PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
85 : _gen(gen)
86 , _state(gen->resolver_state())
87 , _temp(LIR_OprFact::illegalOpr)
88{
89  // reinitialize the shared state arrays
90  _state.reset(max_vregs);
91}
92
93
94void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
95  assert(src->is_valid(), "");
96  assert(dest->is_valid(), "");
97  __ move(src, dest);
98}
99
100
101void PhiResolver::move_temp_to(LIR_Opr dest) {
102  assert(_temp->is_valid(), "");
103  emit_move(_temp, dest);
104  NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
105}
106
107
108void PhiResolver::move_to_temp(LIR_Opr src) {
109  assert(_temp->is_illegal(), "");
110  _temp = _gen->new_register(src->type());
111  emit_move(src, _temp);
112}
113
114
115// Traverse assignment graph in depth first order and generate moves in post order
116// ie. two assignments: b := c, a := b start with node c:
117// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
118// Generates moves in this order: move b to a and move c to b
119// ie. cycle a := b, b := a start with node a
120// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
121// Generates moves in this order: move b to temp, move a to b, move temp to a
122void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
123  if (!dest->visited()) {
124    dest->set_visited();
125    for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
126      move(dest, dest->destination_at(i));
127    }
128  } else if (!dest->start_node()) {
129    // cylce in graph detected
130    assert(_loop == NULL, "only one loop valid!");
131    _loop = dest;
132    move_to_temp(src->operand());
133    return;
134  } // else dest is a start node
135
136  if (!dest->assigned()) {
137    if (_loop == dest) {
138      move_temp_to(dest->operand());
139      dest->set_assigned();
140    } else if (src != NULL) {
141      emit_move(src->operand(), dest->operand());
142      dest->set_assigned();
143    }
144  }
145}
146
147
148PhiResolver::~PhiResolver() {
149  int i;
150  // resolve any cycles in moves from and to virtual registers
151  for (i = virtual_operands().length() - 1; i >= 0; i --) {
152    ResolveNode* node = virtual_operands()[i];
153    if (!node->visited()) {
154      _loop = NULL;
155      move(NULL, node);
156      node->set_start_node();
157      assert(_temp->is_illegal(), "move_temp_to() call missing");
158    }
159  }
160
161  // generate move for move from non virtual register to abitrary destination
162  for (i = other_operands().length() - 1; i >= 0; i --) {
163    ResolveNode* node = other_operands()[i];
164    for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
165      emit_move(node->operand(), node->destination_at(j)->operand());
166    }
167  }
168}
169
170
171ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
172  ResolveNode* node;
173  if (opr->is_virtual()) {
174    int vreg_num = opr->vreg_number();
175    node = vreg_table().at_grow(vreg_num, NULL);
176    assert(node == NULL || node->operand() == opr, "");
177    if (node == NULL) {
178      node = new ResolveNode(opr);
179      vreg_table()[vreg_num] = node;
180    }
181    // Make sure that all virtual operands show up in the list when
182    // they are used as the source of a move.
183    if (source && !virtual_operands().contains(node)) {
184      virtual_operands().append(node);
185    }
186  } else {
187    assert(source, "");
188    node = new ResolveNode(opr);
189    other_operands().append(node);
190  }
191  return node;
192}
193
194
195void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
196  assert(dest->is_virtual(), "");
197  // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
198  assert(src->is_valid(), "");
199  assert(dest->is_valid(), "");
200  ResolveNode* source = source_node(src);
201  source->append(destination_node(dest));
202}
203
204
205//--------------------------------------------------------------
206// LIRItem
207
208void LIRItem::set_result(LIR_Opr opr) {
209  assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
210  value()->set_operand(opr);
211
212  if (opr->is_virtual()) {
213    _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
214  }
215
216  _result = opr;
217}
218
219void LIRItem::load_item() {
220  if (result()->is_illegal()) {
221    // update the items result
222    _result = value()->operand();
223  }
224  if (!result()->is_register()) {
225    LIR_Opr reg = _gen->new_register(value()->type());
226    __ move(result(), reg);
227    if (result()->is_constant()) {
228      _result = reg;
229    } else {
230      set_result(reg);
231    }
232  }
233}
234
235
236void LIRItem::load_for_store(BasicType type) {
237  if (_gen->can_store_as_constant(value(), type)) {
238    _result = value()->operand();
239    if (!_result->is_constant()) {
240      _result = LIR_OprFact::value_type(value()->type());
241    }
242  } else if (type == T_BYTE || type == T_BOOLEAN) {
243    load_byte_item();
244  } else {
245    load_item();
246  }
247}
248
249void LIRItem::load_item_force(LIR_Opr reg) {
250  LIR_Opr r = result();
251  if (r != reg) {
252#if !defined(ARM) && !defined(E500V2)
253    if (r->type() != reg->type()) {
254      // moves between different types need an intervening spill slot
255      r = _gen->force_to_spill(r, reg->type());
256    }
257#endif
258    __ move(r, reg);
259    _result = reg;
260  }
261}
262
263ciObject* LIRItem::get_jobject_constant() const {
264  ObjectType* oc = type()->as_ObjectType();
265  if (oc) {
266    return oc->constant_value();
267  }
268  return NULL;
269}
270
271
272jint LIRItem::get_jint_constant() const {
273  assert(is_constant() && value() != NULL, "");
274  assert(type()->as_IntConstant() != NULL, "type check");
275  return type()->as_IntConstant()->value();
276}
277
278
279jint LIRItem::get_address_constant() const {
280  assert(is_constant() && value() != NULL, "");
281  assert(type()->as_AddressConstant() != NULL, "type check");
282  return type()->as_AddressConstant()->value();
283}
284
285
286jfloat LIRItem::get_jfloat_constant() const {
287  assert(is_constant() && value() != NULL, "");
288  assert(type()->as_FloatConstant() != NULL, "type check");
289  return type()->as_FloatConstant()->value();
290}
291
292
293jdouble LIRItem::get_jdouble_constant() const {
294  assert(is_constant() && value() != NULL, "");
295  assert(type()->as_DoubleConstant() != NULL, "type check");
296  return type()->as_DoubleConstant()->value();
297}
298
299
300jlong LIRItem::get_jlong_constant() const {
301  assert(is_constant() && value() != NULL, "");
302  assert(type()->as_LongConstant() != NULL, "type check");
303  return type()->as_LongConstant()->value();
304}
305
306
307
308//--------------------------------------------------------------
309
310
311void LIRGenerator::init() {
312  _bs = Universe::heap()->barrier_set();
313}
314
315
316void LIRGenerator::block_do_prolog(BlockBegin* block) {
317#ifndef PRODUCT
318  if (PrintIRWithLIR) {
319    block->print();
320  }
321#endif
322
323  // set up the list of LIR instructions
324  assert(block->lir() == NULL, "LIR list already computed for this block");
325  _lir = new LIR_List(compilation(), block);
326  block->set_lir(_lir);
327
328  __ branch_destination(block->label());
329
330  if (LIRTraceExecution &&
331      Compilation::current()->hir()->start()->block_id() != block->block_id() &&
332      !block->is_set(BlockBegin::exception_entry_flag)) {
333    assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
334    trace_block_entry(block);
335  }
336}
337
338
339void LIRGenerator::block_do_epilog(BlockBegin* block) {
340#ifndef PRODUCT
341  if (PrintIRWithLIR) {
342    tty->cr();
343  }
344#endif
345
346  // LIR_Opr for unpinned constants shouldn't be referenced by other
347  // blocks so clear them out after processing the block.
348  for (int i = 0; i < _unpinned_constants.length(); i++) {
349    _unpinned_constants.at(i)->clear_operand();
350  }
351  _unpinned_constants.trunc_to(0);
352
353  // clear our any registers for other local constants
354  _constants.trunc_to(0);
355  _reg_for_constants.trunc_to(0);
356}
357
358
359void LIRGenerator::block_do(BlockBegin* block) {
360  CHECK_BAILOUT();
361
362  block_do_prolog(block);
363  set_block(block);
364
365  for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
366    if (instr->is_pinned()) do_root(instr);
367  }
368
369  set_block(NULL);
370  block_do_epilog(block);
371}
372
373
374//-------------------------LIRGenerator-----------------------------
375
376// This is where the tree-walk starts; instr must be root;
377void LIRGenerator::do_root(Value instr) {
378  CHECK_BAILOUT();
379
380  InstructionMark im(compilation(), instr);
381
382  assert(instr->is_pinned(), "use only with roots");
383  assert(instr->subst() == instr, "shouldn't have missed substitution");
384
385  instr->visit(this);
386
387  assert(!instr->has_uses() || instr->operand()->is_valid() ||
388         instr->as_Constant() != NULL || bailed_out(), "invalid item set");
389}
390
391
392// This is called for each node in tree; the walk stops if a root is reached
393void LIRGenerator::walk(Value instr) {
394  InstructionMark im(compilation(), instr);
395  //stop walk when encounter a root
396  if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
397    assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
398  } else {
399    assert(instr->subst() == instr, "shouldn't have missed substitution");
400    instr->visit(this);
401    // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
402  }
403}
404
405
406CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
407  assert(state != NULL, "state must be defined");
408
409#ifndef PRODUCT
410  state->verify();
411#endif
412
413  ValueStack* s = state;
414  for_each_state(s) {
415    if (s->kind() == ValueStack::EmptyExceptionState) {
416      assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
417      continue;
418    }
419
420    int index;
421    Value value;
422    for_each_stack_value(s, index, value) {
423      assert(value->subst() == value, "missed substitution");
424      if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
425        walk(value);
426        assert(value->operand()->is_valid(), "must be evaluated now");
427      }
428    }
429
430    int bci = s->bci();
431    IRScope* scope = s->scope();
432    ciMethod* method = scope->method();
433
434    MethodLivenessResult liveness = method->liveness_at_bci(bci);
435    if (bci == SynchronizationEntryBCI) {
436      if (x->as_ExceptionObject() || x->as_Throw()) {
437        // all locals are dead on exit from the synthetic unlocker
438        liveness.clear();
439      } else {
440        assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
441      }
442    }
443    if (!liveness.is_valid()) {
444      // Degenerate or breakpointed method.
445      bailout("Degenerate or breakpointed method");
446    } else {
447      assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
448      for_each_local_value(s, index, value) {
449        assert(value->subst() == value, "missed substition");
450        if (liveness.at(index) && !value->type()->is_illegal()) {
451          if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
452            walk(value);
453            assert(value->operand()->is_valid(), "must be evaluated now");
454          }
455        } else {
456          // NULL out this local so that linear scan can assume that all non-NULL values are live.
457          s->invalidate_local(index);
458        }
459      }
460    }
461  }
462
463  return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
464}
465
466
467CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
468  return state_for(x, x->exception_state());
469}
470
471
472void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
473  /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
474   * is active and the class hasn't yet been resolved we need to emit a patch that resolves
475   * the class. */
476  if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
477    assert(info != NULL, "info must be set if class is not loaded");
478    __ klass2reg_patch(NULL, r, info);
479  } else {
480    // no patching needed
481    __ metadata2reg(obj->constant_encoding(), r);
482  }
483}
484
485
486void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
487                                    CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
488  CodeStub* stub = new RangeCheckStub(range_check_info, index);
489  if (index->is_constant()) {
490    cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
491                index->as_jint(), null_check_info);
492    __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
493  } else {
494    cmp_reg_mem(lir_cond_aboveEqual, index, array,
495                arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
496    __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
497  }
498}
499
500
501void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
502  CodeStub* stub = new RangeCheckStub(info, index, true);
503  if (index->is_constant()) {
504    cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
505    __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
506  } else {
507    cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
508                java_nio_Buffer::limit_offset(), T_INT, info);
509    __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
510  }
511  __ move(index, result);
512}
513
514
515
516void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
517  LIR_Opr result_op = result;
518  LIR_Opr left_op   = left;
519  LIR_Opr right_op  = right;
520
521  if (TwoOperandLIRForm && left_op != result_op) {
522    assert(right_op != result_op, "malformed");
523    __ move(left_op, result_op);
524    left_op = result_op;
525  }
526
527  switch(code) {
528    case Bytecodes::_dadd:
529    case Bytecodes::_fadd:
530    case Bytecodes::_ladd:
531    case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
532    case Bytecodes::_fmul:
533    case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
534
535    case Bytecodes::_dmul:
536      {
537        if (is_strictfp) {
538          __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
539        } else {
540          __ mul(left_op, right_op, result_op); break;
541        }
542      }
543      break;
544
545    case Bytecodes::_imul:
546      {
547        bool    did_strength_reduce = false;
548
549        if (right->is_constant()) {
550          int c = right->as_jint();
551          if (is_power_of_2(c)) {
552            // do not need tmp here
553            __ shift_left(left_op, exact_log2(c), result_op);
554            did_strength_reduce = true;
555          } else {
556            did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
557          }
558        }
559        // we couldn't strength reduce so just emit the multiply
560        if (!did_strength_reduce) {
561          __ mul(left_op, right_op, result_op);
562        }
563      }
564      break;
565
566    case Bytecodes::_dsub:
567    case Bytecodes::_fsub:
568    case Bytecodes::_lsub:
569    case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
570
571    case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
572    // ldiv and lrem are implemented with a direct runtime call
573
574    case Bytecodes::_ddiv:
575      {
576        if (is_strictfp) {
577          __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
578        } else {
579          __ div (left_op, right_op, result_op); break;
580        }
581      }
582      break;
583
584    case Bytecodes::_drem:
585    case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
586
587    default: ShouldNotReachHere();
588  }
589}
590
591
592void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
593  arithmetic_op(code, result, left, right, false, tmp);
594}
595
596
597void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
598  arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
599}
600
601
602void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
603  arithmetic_op(code, result, left, right, is_strictfp, tmp);
604}
605
606
607void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
608  if (TwoOperandLIRForm && value != result_op) {
609    assert(count != result_op, "malformed");
610    __ move(value, result_op);
611    value = result_op;
612  }
613
614  assert(count->is_constant() || count->is_register(), "must be");
615  switch(code) {
616  case Bytecodes::_ishl:
617  case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
618  case Bytecodes::_ishr:
619  case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
620  case Bytecodes::_iushr:
621  case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
622  default: ShouldNotReachHere();
623  }
624}
625
626
627void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
628  if (TwoOperandLIRForm && left_op != result_op) {
629    assert(right_op != result_op, "malformed");
630    __ move(left_op, result_op);
631    left_op = result_op;
632  }
633
634  switch(code) {
635    case Bytecodes::_iand:
636    case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
637
638    case Bytecodes::_ior:
639    case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
640
641    case Bytecodes::_ixor:
642    case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
643
644    default: ShouldNotReachHere();
645  }
646}
647
648
649void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
650  if (!GenerateSynchronizationCode) return;
651  // for slow path, use debug info for state after successful locking
652  CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
653  __ load_stack_address_monitor(monitor_no, lock);
654  // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
655  __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
656}
657
658
659void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
660  if (!GenerateSynchronizationCode) return;
661  // setup registers
662  LIR_Opr hdr = lock;
663  lock = new_hdr;
664  CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
665  __ load_stack_address_monitor(monitor_no, lock);
666  __ unlock_object(hdr, object, lock, scratch, slow_path);
667}
668
669#ifndef PRODUCT
670void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
671  if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
672    tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
673  } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
674    tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
675  }
676}
677#endif
678
679void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
680  klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
681  // If klass is not loaded we do not know if the klass has finalizers:
682  if (UseFastNewInstance && klass->is_loaded()
683      && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
684
685    Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
686
687    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
688
689    assert(klass->is_loaded(), "must be loaded");
690    // allocate space for instance
691    assert(klass->size_helper() >= 0, "illegal instance size");
692    const int instance_size = align_object_size(klass->size_helper());
693    __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
694                       oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
695  } else {
696    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
697    __ branch(lir_cond_always, T_ILLEGAL, slow_path);
698    __ branch_destination(slow_path->continuation());
699  }
700}
701
702
703static bool is_constant_zero(Instruction* inst) {
704  IntConstant* c = inst->type()->as_IntConstant();
705  if (c) {
706    return (c->value() == 0);
707  }
708  return false;
709}
710
711
712static bool positive_constant(Instruction* inst) {
713  IntConstant* c = inst->type()->as_IntConstant();
714  if (c) {
715    return (c->value() >= 0);
716  }
717  return false;
718}
719
720
721static ciArrayKlass* as_array_klass(ciType* type) {
722  if (type != NULL && type->is_array_klass() && type->is_loaded()) {
723    return (ciArrayKlass*)type;
724  } else {
725    return NULL;
726  }
727}
728
729static ciType* phi_declared_type(Phi* phi) {
730  ciType* t = phi->operand_at(0)->declared_type();
731  if (t == NULL) {
732    return NULL;
733  }
734  for(int i = 1; i < phi->operand_count(); i++) {
735    if (t != phi->operand_at(i)->declared_type()) {
736      return NULL;
737    }
738  }
739  return t;
740}
741
742void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
743  Instruction* src     = x->argument_at(0);
744  Instruction* src_pos = x->argument_at(1);
745  Instruction* dst     = x->argument_at(2);
746  Instruction* dst_pos = x->argument_at(3);
747  Instruction* length  = x->argument_at(4);
748
749  // first try to identify the likely type of the arrays involved
750  ciArrayKlass* expected_type = NULL;
751  bool is_exact = false, src_objarray = false, dst_objarray = false;
752  {
753    ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
754    ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
755    Phi* phi;
756    if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
757      src_declared_type = as_array_klass(phi_declared_type(phi));
758    }
759    ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
760    ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
761    if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
762      dst_declared_type = as_array_klass(phi_declared_type(phi));
763    }
764
765    if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
766      // the types exactly match so the type is fully known
767      is_exact = true;
768      expected_type = src_exact_type;
769    } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
770      ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
771      ciArrayKlass* src_type = NULL;
772      if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
773        src_type = (ciArrayKlass*) src_exact_type;
774      } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
775        src_type = (ciArrayKlass*) src_declared_type;
776      }
777      if (src_type != NULL) {
778        if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
779          is_exact = true;
780          expected_type = dst_type;
781        }
782      }
783    }
784    // at least pass along a good guess
785    if (expected_type == NULL) expected_type = dst_exact_type;
786    if (expected_type == NULL) expected_type = src_declared_type;
787    if (expected_type == NULL) expected_type = dst_declared_type;
788
789    src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
790    dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
791  }
792
793  // if a probable array type has been identified, figure out if any
794  // of the required checks for a fast case can be elided.
795  int flags = LIR_OpArrayCopy::all_flags;
796
797  if (!src_objarray)
798    flags &= ~LIR_OpArrayCopy::src_objarray;
799  if (!dst_objarray)
800    flags &= ~LIR_OpArrayCopy::dst_objarray;
801
802  if (!x->arg_needs_null_check(0))
803    flags &= ~LIR_OpArrayCopy::src_null_check;
804  if (!x->arg_needs_null_check(2))
805    flags &= ~LIR_OpArrayCopy::dst_null_check;
806
807
808  if (expected_type != NULL) {
809    Value length_limit = NULL;
810
811    IfOp* ifop = length->as_IfOp();
812    if (ifop != NULL) {
813      // look for expressions like min(v, a.length) which ends up as
814      //   x > y ? y : x  or  x >= y ? y : x
815      if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
816          ifop->x() == ifop->fval() &&
817          ifop->y() == ifop->tval()) {
818        length_limit = ifop->y();
819      }
820    }
821
822    // try to skip null checks and range checks
823    NewArray* src_array = src->as_NewArray();
824    if (src_array != NULL) {
825      flags &= ~LIR_OpArrayCopy::src_null_check;
826      if (length_limit != NULL &&
827          src_array->length() == length_limit &&
828          is_constant_zero(src_pos)) {
829        flags &= ~LIR_OpArrayCopy::src_range_check;
830      }
831    }
832
833    NewArray* dst_array = dst->as_NewArray();
834    if (dst_array != NULL) {
835      flags &= ~LIR_OpArrayCopy::dst_null_check;
836      if (length_limit != NULL &&
837          dst_array->length() == length_limit &&
838          is_constant_zero(dst_pos)) {
839        flags &= ~LIR_OpArrayCopy::dst_range_check;
840      }
841    }
842
843    // check from incoming constant values
844    if (positive_constant(src_pos))
845      flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
846    if (positive_constant(dst_pos))
847      flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
848    if (positive_constant(length))
849      flags &= ~LIR_OpArrayCopy::length_positive_check;
850
851    // see if the range check can be elided, which might also imply
852    // that src or dst is non-null.
853    ArrayLength* al = length->as_ArrayLength();
854    if (al != NULL) {
855      if (al->array() == src) {
856        // it's the length of the source array
857        flags &= ~LIR_OpArrayCopy::length_positive_check;
858        flags &= ~LIR_OpArrayCopy::src_null_check;
859        if (is_constant_zero(src_pos))
860          flags &= ~LIR_OpArrayCopy::src_range_check;
861      }
862      if (al->array() == dst) {
863        // it's the length of the destination array
864        flags &= ~LIR_OpArrayCopy::length_positive_check;
865        flags &= ~LIR_OpArrayCopy::dst_null_check;
866        if (is_constant_zero(dst_pos))
867          flags &= ~LIR_OpArrayCopy::dst_range_check;
868      }
869    }
870    if (is_exact) {
871      flags &= ~LIR_OpArrayCopy::type_check;
872    }
873  }
874
875  IntConstant* src_int = src_pos->type()->as_IntConstant();
876  IntConstant* dst_int = dst_pos->type()->as_IntConstant();
877  if (src_int && dst_int) {
878    int s_offs = src_int->value();
879    int d_offs = dst_int->value();
880    if (src_int->value() >= dst_int->value()) {
881      flags &= ~LIR_OpArrayCopy::overlapping;
882    }
883    if (expected_type != NULL) {
884      BasicType t = expected_type->element_type()->basic_type();
885      int element_size = type2aelembytes(t);
886      if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
887          ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
888        flags &= ~LIR_OpArrayCopy::unaligned;
889      }
890    }
891  } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
892    // src and dest positions are the same, or dst is zero so assume
893    // nonoverlapping copy.
894    flags &= ~LIR_OpArrayCopy::overlapping;
895  }
896
897  if (src == dst) {
898    // moving within a single array so no type checks are needed
899    if (flags & LIR_OpArrayCopy::type_check) {
900      flags &= ~LIR_OpArrayCopy::type_check;
901    }
902  }
903  *flagsp = flags;
904  *expected_typep = (ciArrayKlass*)expected_type;
905}
906
907
908LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
909  assert(opr->is_register(), "why spill if item is not register?");
910
911  if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
912    LIR_Opr result = new_register(T_FLOAT);
913    set_vreg_flag(result, must_start_in_memory);
914    assert(opr->is_register(), "only a register can be spilled");
915    assert(opr->value_type()->is_float(), "rounding only for floats available");
916    __ roundfp(opr, LIR_OprFact::illegalOpr, result);
917    return result;
918  }
919  return opr;
920}
921
922
923LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
924  assert(type2size[t] == type2size[value->type()],
925         err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
926  if (!value->is_register()) {
927    // force into a register
928    LIR_Opr r = new_register(value->type());
929    __ move(value, r);
930    value = r;
931  }
932
933  // create a spill location
934  LIR_Opr tmp = new_register(t);
935  set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
936
937  // move from register to spill
938  __ move(value, tmp);
939  return tmp;
940}
941
942void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
943  if (if_instr->should_profile()) {
944    ciMethod* method = if_instr->profiled_method();
945    assert(method != NULL, "method should be set if branch is profiled");
946    ciMethodData* md = method->method_data_or_null();
947    assert(md != NULL, "Sanity");
948    ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
949    assert(data != NULL, "must have profiling data");
950    assert(data->is_BranchData(), "need BranchData for two-way branches");
951    int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
952    int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
953    if (if_instr->is_swapped()) {
954      int t = taken_count_offset;
955      taken_count_offset = not_taken_count_offset;
956      not_taken_count_offset = t;
957    }
958
959    LIR_Opr md_reg = new_register(T_METADATA);
960    __ metadata2reg(md->constant_encoding(), md_reg);
961
962    LIR_Opr data_offset_reg = new_pointer_register();
963    __ cmove(lir_cond(cond),
964             LIR_OprFact::intptrConst(taken_count_offset),
965             LIR_OprFact::intptrConst(not_taken_count_offset),
966             data_offset_reg, as_BasicType(if_instr->x()->type()));
967
968    // MDO cells are intptr_t, so the data_reg width is arch-dependent.
969    LIR_Opr data_reg = new_pointer_register();
970    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
971    __ move(data_addr, data_reg);
972    // Use leal instead of add to avoid destroying condition codes on x86
973    LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
974    __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
975    __ move(data_reg, data_addr);
976  }
977}
978
979// Phi technique:
980// This is about passing live values from one basic block to the other.
981// In code generated with Java it is rather rare that more than one
982// value is on the stack from one basic block to the other.
983// We optimize our technique for efficient passing of one value
984// (of type long, int, double..) but it can be extended.
985// When entering or leaving a basic block, all registers and all spill
986// slots are release and empty. We use the released registers
987// and spill slots to pass the live values from one block
988// to the other. The topmost value, i.e., the value on TOS of expression
989// stack is passed in registers. All other values are stored in spilling
990// area. Every Phi has an index which designates its spill slot
991// At exit of a basic block, we fill the register(s) and spill slots.
992// At entry of a basic block, the block_prolog sets up the content of phi nodes
993// and locks necessary registers and spilling slots.
994
995
996// move current value to referenced phi function
997void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
998  Phi* phi = sux_val->as_Phi();
999  // cur_val can be null without phi being null in conjunction with inlining
1000  if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1001    LIR_Opr operand = cur_val->operand();
1002    if (cur_val->operand()->is_illegal()) {
1003      assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1004             "these can be produced lazily");
1005      operand = operand_for_instruction(cur_val);
1006    }
1007    resolver->move(operand, operand_for_instruction(phi));
1008  }
1009}
1010
1011
1012// Moves all stack values into their PHI position
1013void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1014  BlockBegin* bb = block();
1015  if (bb->number_of_sux() == 1) {
1016    BlockBegin* sux = bb->sux_at(0);
1017    assert(sux->number_of_preds() > 0, "invalid CFG");
1018
1019    // a block with only one predecessor never has phi functions
1020    if (sux->number_of_preds() > 1) {
1021      int max_phis = cur_state->stack_size() + cur_state->locals_size();
1022      PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1023
1024      ValueStack* sux_state = sux->state();
1025      Value sux_value;
1026      int index;
1027
1028      assert(cur_state->scope() == sux_state->scope(), "not matching");
1029      assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1030      assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1031
1032      for_each_stack_value(sux_state, index, sux_value) {
1033        move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1034      }
1035
1036      for_each_local_value(sux_state, index, sux_value) {
1037        move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1038      }
1039
1040      assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1041    }
1042  }
1043}
1044
1045
1046LIR_Opr LIRGenerator::new_register(BasicType type) {
1047  int vreg = _virtual_register_number;
1048  // add a little fudge factor for the bailout, since the bailout is
1049  // only checked periodically.  This gives a few extra registers to
1050  // hand out before we really run out, which helps us keep from
1051  // tripping over assertions.
1052  if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1053    bailout("out of virtual registers");
1054    if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1055      // wrap it around
1056      _virtual_register_number = LIR_OprDesc::vreg_base;
1057    }
1058  }
1059  _virtual_register_number += 1;
1060  return LIR_OprFact::virtual_register(vreg, type);
1061}
1062
1063
1064// Try to lock using register in hint
1065LIR_Opr LIRGenerator::rlock(Value instr) {
1066  return new_register(instr->type());
1067}
1068
1069
1070// does an rlock and sets result
1071LIR_Opr LIRGenerator::rlock_result(Value x) {
1072  LIR_Opr reg = rlock(x);
1073  set_result(x, reg);
1074  return reg;
1075}
1076
1077
1078// does an rlock and sets result
1079LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1080  LIR_Opr reg;
1081  switch (type) {
1082  case T_BYTE:
1083  case T_BOOLEAN:
1084    reg = rlock_byte(type);
1085    break;
1086  default:
1087    reg = rlock(x);
1088    break;
1089  }
1090
1091  set_result(x, reg);
1092  return reg;
1093}
1094
1095
1096//---------------------------------------------------------------------
1097ciObject* LIRGenerator::get_jobject_constant(Value value) {
1098  ObjectType* oc = value->type()->as_ObjectType();
1099  if (oc) {
1100    return oc->constant_value();
1101  }
1102  return NULL;
1103}
1104
1105
1106void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1107  assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1108  assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1109
1110  // no moves are created for phi functions at the begin of exception
1111  // handlers, so assign operands manually here
1112  for_each_phi_fun(block(), phi,
1113                   operand_for_instruction(phi));
1114
1115  LIR_Opr thread_reg = getThreadPointer();
1116  __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1117               exceptionOopOpr());
1118  __ move_wide(LIR_OprFact::oopConst(NULL),
1119               new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1120  __ move_wide(LIR_OprFact::oopConst(NULL),
1121               new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1122
1123  LIR_Opr result = new_register(T_OBJECT);
1124  __ move(exceptionOopOpr(), result);
1125  set_result(x, result);
1126}
1127
1128
1129//----------------------------------------------------------------------
1130//----------------------------------------------------------------------
1131//----------------------------------------------------------------------
1132//----------------------------------------------------------------------
1133//                        visitor functions
1134//----------------------------------------------------------------------
1135//----------------------------------------------------------------------
1136//----------------------------------------------------------------------
1137//----------------------------------------------------------------------
1138
1139void LIRGenerator::do_Phi(Phi* x) {
1140  // phi functions are never visited directly
1141  ShouldNotReachHere();
1142}
1143
1144
1145// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1146void LIRGenerator::do_Constant(Constant* x) {
1147  if (x->state_before() != NULL) {
1148    // Any constant with a ValueStack requires patching so emit the patch here
1149    LIR_Opr reg = rlock_result(x);
1150    CodeEmitInfo* info = state_for(x, x->state_before());
1151    __ oop2reg_patch(NULL, reg, info);
1152  } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1153    if (!x->is_pinned()) {
1154      // unpinned constants are handled specially so that they can be
1155      // put into registers when they are used multiple times within a
1156      // block.  After the block completes their operand will be
1157      // cleared so that other blocks can't refer to that register.
1158      set_result(x, load_constant(x));
1159    } else {
1160      LIR_Opr res = x->operand();
1161      if (!res->is_valid()) {
1162        res = LIR_OprFact::value_type(x->type());
1163      }
1164      if (res->is_constant()) {
1165        LIR_Opr reg = rlock_result(x);
1166        __ move(res, reg);
1167      } else {
1168        set_result(x, res);
1169      }
1170    }
1171  } else {
1172    set_result(x, LIR_OprFact::value_type(x->type()));
1173  }
1174}
1175
1176
1177void LIRGenerator::do_Local(Local* x) {
1178  // operand_for_instruction has the side effect of setting the result
1179  // so there's no need to do it here.
1180  operand_for_instruction(x);
1181}
1182
1183
1184void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1185  Unimplemented();
1186}
1187
1188
1189void LIRGenerator::do_Return(Return* x) {
1190  if (compilation()->env()->dtrace_method_probes()) {
1191    BasicTypeList signature;
1192    signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1193    signature.append(T_METADATA); // Method*
1194    LIR_OprList* args = new LIR_OprList();
1195    args->append(getThreadPointer());
1196    LIR_Opr meth = new_register(T_METADATA);
1197    __ metadata2reg(method()->constant_encoding(), meth);
1198    args->append(meth);
1199    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1200  }
1201
1202  if (x->type()->is_void()) {
1203    __ return_op(LIR_OprFact::illegalOpr);
1204  } else {
1205    LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1206    LIRItem result(x->result(), this);
1207
1208    result.load_item_force(reg);
1209    __ return_op(result.result());
1210  }
1211  set_no_result(x);
1212}
1213
1214// Examble: ref.get()
1215// Combination of LoadField and g1 pre-write barrier
1216void LIRGenerator::do_Reference_get(Intrinsic* x) {
1217
1218  const int referent_offset = java_lang_ref_Reference::referent_offset;
1219  guarantee(referent_offset > 0, "referent offset not initialized");
1220
1221  assert(x->number_of_arguments() == 1, "wrong type");
1222
1223  LIRItem reference(x->argument_at(0), this);
1224  reference.load_item();
1225
1226  // need to perform the null check on the reference objecy
1227  CodeEmitInfo* info = NULL;
1228  if (x->needs_null_check()) {
1229    info = state_for(x);
1230  }
1231
1232  LIR_Address* referent_field_adr =
1233    new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1234
1235  LIR_Opr result = rlock_result(x);
1236
1237  __ load(referent_field_adr, result, info);
1238
1239  // Register the value in the referent field with the pre-barrier
1240  pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1241              result /* pre_val */,
1242              false  /* do_load */,
1243              false  /* patch */,
1244              NULL   /* info */);
1245}
1246
1247// Example: clazz.isInstance(object)
1248void LIRGenerator::do_isInstance(Intrinsic* x) {
1249  assert(x->number_of_arguments() == 2, "wrong type");
1250
1251  // TODO could try to substitute this node with an equivalent InstanceOf
1252  // if clazz is known to be a constant Class. This will pick up newly found
1253  // constants after HIR construction. I'll leave this to a future change.
1254
1255  // as a first cut, make a simple leaf call to runtime to stay platform independent.
1256  // could follow the aastore example in a future change.
1257
1258  LIRItem clazz(x->argument_at(0), this);
1259  LIRItem object(x->argument_at(1), this);
1260  clazz.load_item();
1261  object.load_item();
1262  LIR_Opr result = rlock_result(x);
1263
1264  // need to perform null check on clazz
1265  if (x->needs_null_check()) {
1266    CodeEmitInfo* info = state_for(x);
1267    __ null_check(clazz.result(), info);
1268  }
1269
1270  LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1271                                     CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1272                                     x->type(),
1273                                     NULL); // NULL CodeEmitInfo results in a leaf call
1274  __ move(call_result, result);
1275}
1276
1277// Example: object.getClass ()
1278void LIRGenerator::do_getClass(Intrinsic* x) {
1279  assert(x->number_of_arguments() == 1, "wrong type");
1280
1281  LIRItem rcvr(x->argument_at(0), this);
1282  rcvr.load_item();
1283  LIR_Opr temp = new_register(T_METADATA);
1284  LIR_Opr result = rlock_result(x);
1285
1286  // need to perform the null check on the rcvr
1287  CodeEmitInfo* info = NULL;
1288  if (x->needs_null_check()) {
1289    info = state_for(x);
1290  }
1291
1292  // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1293  // meaning of these two is mixed up (see JDK-8026837).
1294  __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1295  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1296}
1297
1298
1299// Example: Thread.currentThread()
1300void LIRGenerator::do_currentThread(Intrinsic* x) {
1301  assert(x->number_of_arguments() == 0, "wrong type");
1302  LIR_Opr reg = rlock_result(x);
1303  __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1304}
1305
1306
1307void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1308  assert(x->number_of_arguments() == 1, "wrong type");
1309  LIRItem receiver(x->argument_at(0), this);
1310
1311  receiver.load_item();
1312  BasicTypeList signature;
1313  signature.append(T_OBJECT); // receiver
1314  LIR_OprList* args = new LIR_OprList();
1315  args->append(receiver.result());
1316  CodeEmitInfo* info = state_for(x, x->state());
1317  call_runtime(&signature, args,
1318               CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1319               voidType, info);
1320
1321  set_no_result(x);
1322}
1323
1324
1325//------------------------local access--------------------------------------
1326
1327LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1328  if (x->operand()->is_illegal()) {
1329    Constant* c = x->as_Constant();
1330    if (c != NULL) {
1331      x->set_operand(LIR_OprFact::value_type(c->type()));
1332    } else {
1333      assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1334      // allocate a virtual register for this local or phi
1335      x->set_operand(rlock(x));
1336      _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1337    }
1338  }
1339  return x->operand();
1340}
1341
1342
1343Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1344  if (opr->is_virtual()) {
1345    return instruction_for_vreg(opr->vreg_number());
1346  }
1347  return NULL;
1348}
1349
1350
1351Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1352  if (reg_num < _instruction_for_operand.length()) {
1353    return _instruction_for_operand.at(reg_num);
1354  }
1355  return NULL;
1356}
1357
1358
1359void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1360  if (_vreg_flags.size_in_bits() == 0) {
1361    BitMap2D temp(100, num_vreg_flags);
1362    temp.clear();
1363    _vreg_flags = temp;
1364  }
1365  _vreg_flags.at_put_grow(vreg_num, f, true);
1366}
1367
1368bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1369  if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1370    return false;
1371  }
1372  return _vreg_flags.at(vreg_num, f);
1373}
1374
1375
1376// Block local constant handling.  This code is useful for keeping
1377// unpinned constants and constants which aren't exposed in the IR in
1378// registers.  Unpinned Constant instructions have their operands
1379// cleared when the block is finished so that other blocks can't end
1380// up referring to their registers.
1381
1382LIR_Opr LIRGenerator::load_constant(Constant* x) {
1383  assert(!x->is_pinned(), "only for unpinned constants");
1384  _unpinned_constants.append(x);
1385  return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1386}
1387
1388
1389LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1390  BasicType t = c->type();
1391  for (int i = 0; i < _constants.length(); i++) {
1392    LIR_Const* other = _constants.at(i);
1393    if (t == other->type()) {
1394      switch (t) {
1395      case T_INT:
1396      case T_FLOAT:
1397        if (c->as_jint_bits() != other->as_jint_bits()) continue;
1398        break;
1399      case T_LONG:
1400      case T_DOUBLE:
1401        if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1402        if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1403        break;
1404      case T_OBJECT:
1405        if (c->as_jobject() != other->as_jobject()) continue;
1406        break;
1407      }
1408      return _reg_for_constants.at(i);
1409    }
1410  }
1411
1412  LIR_Opr result = new_register(t);
1413  __ move((LIR_Opr)c, result);
1414  _constants.append(c);
1415  _reg_for_constants.append(result);
1416  return result;
1417}
1418
1419// Various barriers
1420
1421void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1422                               bool do_load, bool patch, CodeEmitInfo* info) {
1423  // Do the pre-write barrier, if any.
1424  switch (_bs->kind()) {
1425#if INCLUDE_ALL_GCS
1426    case BarrierSet::G1SATBCT:
1427    case BarrierSet::G1SATBCTLogging:
1428      G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1429      break;
1430#endif // INCLUDE_ALL_GCS
1431    case BarrierSet::CardTableModRef:
1432    case BarrierSet::CardTableExtension:
1433      // No pre barriers
1434      break;
1435    case BarrierSet::ModRef:
1436      // No pre barriers
1437      break;
1438    default      :
1439      ShouldNotReachHere();
1440
1441  }
1442}
1443
1444void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1445  switch (_bs->kind()) {
1446#if INCLUDE_ALL_GCS
1447    case BarrierSet::G1SATBCT:
1448    case BarrierSet::G1SATBCTLogging:
1449      G1SATBCardTableModRef_post_barrier(addr,  new_val);
1450      break;
1451#endif // INCLUDE_ALL_GCS
1452    case BarrierSet::CardTableModRef:
1453    case BarrierSet::CardTableExtension:
1454      CardTableModRef_post_barrier(addr,  new_val);
1455      break;
1456    case BarrierSet::ModRef:
1457      // No post barriers
1458      break;
1459    default      :
1460      ShouldNotReachHere();
1461    }
1462}
1463
1464////////////////////////////////////////////////////////////////////////
1465#if INCLUDE_ALL_GCS
1466
1467void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1468                                                     bool do_load, bool patch, CodeEmitInfo* info) {
1469  // First we test whether marking is in progress.
1470  BasicType flag_type;
1471  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1472    flag_type = T_INT;
1473  } else {
1474    guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1475              "Assumption");
1476    flag_type = T_BYTE;
1477  }
1478  LIR_Opr thrd = getThreadPointer();
1479  LIR_Address* mark_active_flag_addr =
1480    new LIR_Address(thrd,
1481                    in_bytes(JavaThread::satb_mark_queue_offset() +
1482                             PtrQueue::byte_offset_of_active()),
1483                    flag_type);
1484  // Read the marking-in-progress flag.
1485  LIR_Opr flag_val = new_register(T_INT);
1486  __ load(mark_active_flag_addr, flag_val);
1487  __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1488
1489  LIR_PatchCode pre_val_patch_code = lir_patch_none;
1490
1491  CodeStub* slow;
1492
1493  if (do_load) {
1494    assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1495    assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1496
1497    if (patch)
1498      pre_val_patch_code = lir_patch_normal;
1499
1500    pre_val = new_register(T_OBJECT);
1501
1502    if (!addr_opr->is_address()) {
1503      assert(addr_opr->is_register(), "must be");
1504      addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1505    }
1506    slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1507  } else {
1508    assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1509    assert(pre_val->is_register(), "must be");
1510    assert(pre_val->type() == T_OBJECT, "must be an object");
1511    assert(info == NULL, "sanity");
1512
1513    slow = new G1PreBarrierStub(pre_val);
1514  }
1515
1516  __ branch(lir_cond_notEqual, T_INT, slow);
1517  __ branch_destination(slow->continuation());
1518}
1519
1520void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1521  // If the "new_val" is a constant NULL, no barrier is necessary.
1522  if (new_val->is_constant() &&
1523      new_val->as_constant_ptr()->as_jobject() == NULL) return;
1524
1525  if (!new_val->is_register()) {
1526    LIR_Opr new_val_reg = new_register(T_OBJECT);
1527    if (new_val->is_constant()) {
1528      __ move(new_val, new_val_reg);
1529    } else {
1530      __ leal(new_val, new_val_reg);
1531    }
1532    new_val = new_val_reg;
1533  }
1534  assert(new_val->is_register(), "must be a register at this point");
1535
1536  if (addr->is_address()) {
1537    LIR_Address* address = addr->as_address_ptr();
1538    LIR_Opr ptr = new_pointer_register();
1539    if (!address->index()->is_valid() && address->disp() == 0) {
1540      __ move(address->base(), ptr);
1541    } else {
1542      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1543      __ leal(addr, ptr);
1544    }
1545    addr = ptr;
1546  }
1547  assert(addr->is_register(), "must be a register at this point");
1548
1549  LIR_Opr xor_res = new_pointer_register();
1550  LIR_Opr xor_shift_res = new_pointer_register();
1551  if (TwoOperandLIRForm ) {
1552    __ move(addr, xor_res);
1553    __ logical_xor(xor_res, new_val, xor_res);
1554    __ move(xor_res, xor_shift_res);
1555    __ unsigned_shift_right(xor_shift_res,
1556                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1557                            xor_shift_res,
1558                            LIR_OprDesc::illegalOpr());
1559  } else {
1560    __ logical_xor(addr, new_val, xor_res);
1561    __ unsigned_shift_right(xor_res,
1562                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1563                            xor_shift_res,
1564                            LIR_OprDesc::illegalOpr());
1565  }
1566
1567  if (!new_val->is_register()) {
1568    LIR_Opr new_val_reg = new_register(T_OBJECT);
1569    __ leal(new_val, new_val_reg);
1570    new_val = new_val_reg;
1571  }
1572  assert(new_val->is_register(), "must be a register at this point");
1573
1574  __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1575
1576  CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1577  __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1578  __ branch_destination(slow->continuation());
1579}
1580
1581#endif // INCLUDE_ALL_GCS
1582////////////////////////////////////////////////////////////////////////
1583
1584void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1585
1586  assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1587  LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1588  if (addr->is_address()) {
1589    LIR_Address* address = addr->as_address_ptr();
1590    // ptr cannot be an object because we use this barrier for array card marks
1591    // and addr can point in the middle of an array.
1592    LIR_Opr ptr = new_pointer_register();
1593    if (!address->index()->is_valid() && address->disp() == 0) {
1594      __ move(address->base(), ptr);
1595    } else {
1596      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1597      __ leal(addr, ptr);
1598    }
1599    addr = ptr;
1600  }
1601  assert(addr->is_register(), "must be a register at this point");
1602
1603#ifdef ARM
1604  // TODO: ARM - move to platform-dependent code
1605  LIR_Opr tmp = FrameMap::R14_opr;
1606  if (VM_Version::supports_movw()) {
1607    __ move((LIR_Opr)card_table_base, tmp);
1608  } else {
1609    __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1610  }
1611
1612  CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1613  LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1614  if(((int)ct->byte_map_base & 0xff) == 0) {
1615    __ move(tmp, card_addr);
1616  } else {
1617    LIR_Opr tmp_zero = new_register(T_INT);
1618    __ move(LIR_OprFact::intConst(0), tmp_zero);
1619    __ move(tmp_zero, card_addr);
1620  }
1621#else // ARM
1622  LIR_Opr tmp = new_pointer_register();
1623  if (TwoOperandLIRForm) {
1624    __ move(addr, tmp);
1625    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1626  } else {
1627    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1628  }
1629  if (can_inline_as_constant(card_table_base)) {
1630    __ move(LIR_OprFact::intConst(0),
1631              new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1632  } else {
1633    __ move(LIR_OprFact::intConst(0),
1634              new LIR_Address(tmp, load_constant(card_table_base),
1635                              T_BYTE));
1636  }
1637#endif // ARM
1638}
1639
1640
1641//------------------------field access--------------------------------------
1642
1643// Comment copied form templateTable_i486.cpp
1644// ----------------------------------------------------------------------------
1645// Volatile variables demand their effects be made known to all CPU's in
1646// order.  Store buffers on most chips allow reads & writes to reorder; the
1647// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1648// memory barrier (i.e., it's not sufficient that the interpreter does not
1649// reorder volatile references, the hardware also must not reorder them).
1650//
1651// According to the new Java Memory Model (JMM):
1652// (1) All volatiles are serialized wrt to each other.
1653// ALSO reads & writes act as aquire & release, so:
1654// (2) A read cannot let unrelated NON-volatile memory refs that happen after
1655// the read float up to before the read.  It's OK for non-volatile memory refs
1656// that happen before the volatile read to float down below it.
1657// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1658// that happen BEFORE the write float down to after the write.  It's OK for
1659// non-volatile memory refs that happen after the volatile write to float up
1660// before it.
1661//
1662// We only put in barriers around volatile refs (they are expensive), not
1663// _between_ memory refs (that would require us to track the flavor of the
1664// previous memory refs).  Requirements (2) and (3) require some barriers
1665// before volatile stores and after volatile loads.  These nearly cover
1666// requirement (1) but miss the volatile-store-volatile-load case.  This final
1667// case is placed after volatile-stores although it could just as well go
1668// before volatile-loads.
1669
1670
1671void LIRGenerator::do_StoreField(StoreField* x) {
1672  bool needs_patching = x->needs_patching();
1673  bool is_volatile = x->field()->is_volatile();
1674  BasicType field_type = x->field_type();
1675  bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1676
1677  CodeEmitInfo* info = NULL;
1678  if (needs_patching) {
1679    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1680    info = state_for(x, x->state_before());
1681  } else if (x->needs_null_check()) {
1682    NullCheck* nc = x->explicit_null_check();
1683    if (nc == NULL) {
1684      info = state_for(x);
1685    } else {
1686      info = state_for(nc);
1687    }
1688  }
1689
1690
1691  LIRItem object(x->obj(), this);
1692  LIRItem value(x->value(),  this);
1693
1694  object.load_item();
1695
1696  if (is_volatile || needs_patching) {
1697    // load item if field is volatile (fewer special cases for volatiles)
1698    // load item if field not initialized
1699    // load item if field not constant
1700    // because of code patching we cannot inline constants
1701    if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1702      value.load_byte_item();
1703    } else  {
1704      value.load_item();
1705    }
1706  } else {
1707    value.load_for_store(field_type);
1708  }
1709
1710  set_no_result(x);
1711
1712#ifndef PRODUCT
1713  if (PrintNotLoaded && needs_patching) {
1714    tty->print_cr("   ###class not loaded at store_%s bci %d",
1715                  x->is_static() ?  "static" : "field", x->printable_bci());
1716  }
1717#endif
1718
1719  if (x->needs_null_check() &&
1720      (needs_patching ||
1721       MacroAssembler::needs_explicit_null_check(x->offset()))) {
1722    // emit an explicit null check because the offset is too large
1723    __ null_check(object.result(), new CodeEmitInfo(info));
1724  }
1725
1726  LIR_Address* address;
1727  if (needs_patching) {
1728    // we need to patch the offset in the instruction so don't allow
1729    // generate_address to try to be smart about emitting the -1.
1730    // Otherwise the patching code won't know how to find the
1731    // instruction to patch.
1732    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1733  } else {
1734    address = generate_address(object.result(), x->offset(), field_type);
1735  }
1736
1737  if (is_volatile && os::is_MP()) {
1738    __ membar_release();
1739  }
1740
1741  if (is_oop) {
1742    // Do the pre-write barrier, if any.
1743    pre_barrier(LIR_OprFact::address(address),
1744                LIR_OprFact::illegalOpr /* pre_val */,
1745                true /* do_load*/,
1746                needs_patching,
1747                (info ? new CodeEmitInfo(info) : NULL));
1748  }
1749
1750  bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1751  if (needs_atomic_access && !needs_patching) {
1752    volatile_field_store(value.result(), address, info);
1753  } else {
1754    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1755    __ store(value.result(), address, info, patch_code);
1756  }
1757
1758  if (is_oop) {
1759    // Store to object so mark the card of the header
1760    post_barrier(object.result(), value.result());
1761  }
1762
1763  if (is_volatile && os::is_MP()) {
1764    __ membar();
1765  }
1766}
1767
1768
1769void LIRGenerator::do_LoadField(LoadField* x) {
1770  bool needs_patching = x->needs_patching();
1771  bool is_volatile = x->field()->is_volatile();
1772  BasicType field_type = x->field_type();
1773
1774  CodeEmitInfo* info = NULL;
1775  if (needs_patching) {
1776    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1777    info = state_for(x, x->state_before());
1778  } else if (x->needs_null_check()) {
1779    NullCheck* nc = x->explicit_null_check();
1780    if (nc == NULL) {
1781      info = state_for(x);
1782    } else {
1783      info = state_for(nc);
1784    }
1785  }
1786
1787  LIRItem object(x->obj(), this);
1788
1789  object.load_item();
1790
1791#ifndef PRODUCT
1792  if (PrintNotLoaded && needs_patching) {
1793    tty->print_cr("   ###class not loaded at load_%s bci %d",
1794                  x->is_static() ?  "static" : "field", x->printable_bci());
1795  }
1796#endif
1797
1798  bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1799  if (x->needs_null_check() &&
1800      (needs_patching ||
1801       MacroAssembler::needs_explicit_null_check(x->offset()) ||
1802       stress_deopt)) {
1803    LIR_Opr obj = object.result();
1804    if (stress_deopt) {
1805      obj = new_register(T_OBJECT);
1806      __ move(LIR_OprFact::oopConst(NULL), obj);
1807    }
1808    // emit an explicit null check because the offset is too large
1809    __ null_check(obj, new CodeEmitInfo(info));
1810  }
1811
1812  LIR_Opr reg = rlock_result(x, field_type);
1813  LIR_Address* address;
1814  if (needs_patching) {
1815    // we need to patch the offset in the instruction so don't allow
1816    // generate_address to try to be smart about emitting the -1.
1817    // Otherwise the patching code won't know how to find the
1818    // instruction to patch.
1819    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1820  } else {
1821    address = generate_address(object.result(), x->offset(), field_type);
1822  }
1823
1824  bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1825  if (needs_atomic_access && !needs_patching) {
1826    volatile_field_load(address, reg, info);
1827  } else {
1828    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1829    __ load(address, reg, info, patch_code);
1830  }
1831
1832  if (is_volatile && os::is_MP()) {
1833    __ membar_acquire();
1834  }
1835}
1836
1837
1838//------------------------java.nio.Buffer.checkIndex------------------------
1839
1840// int java.nio.Buffer.checkIndex(int)
1841void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1842  // NOTE: by the time we are in checkIndex() we are guaranteed that
1843  // the buffer is non-null (because checkIndex is package-private and
1844  // only called from within other methods in the buffer).
1845  assert(x->number_of_arguments() == 2, "wrong type");
1846  LIRItem buf  (x->argument_at(0), this);
1847  LIRItem index(x->argument_at(1), this);
1848  buf.load_item();
1849  index.load_item();
1850
1851  LIR_Opr result = rlock_result(x);
1852  if (GenerateRangeChecks) {
1853    CodeEmitInfo* info = state_for(x);
1854    CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1855    if (index.result()->is_constant()) {
1856      cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1857      __ branch(lir_cond_belowEqual, T_INT, stub);
1858    } else {
1859      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1860                  java_nio_Buffer::limit_offset(), T_INT, info);
1861      __ branch(lir_cond_aboveEqual, T_INT, stub);
1862    }
1863    __ move(index.result(), result);
1864  } else {
1865    // Just load the index into the result register
1866    __ move(index.result(), result);
1867  }
1868}
1869
1870
1871//------------------------array access--------------------------------------
1872
1873
1874void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1875  LIRItem array(x->array(), this);
1876  array.load_item();
1877  LIR_Opr reg = rlock_result(x);
1878
1879  CodeEmitInfo* info = NULL;
1880  if (x->needs_null_check()) {
1881    NullCheck* nc = x->explicit_null_check();
1882    if (nc == NULL) {
1883      info = state_for(x);
1884    } else {
1885      info = state_for(nc);
1886    }
1887    if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1888      LIR_Opr obj = new_register(T_OBJECT);
1889      __ move(LIR_OprFact::oopConst(NULL), obj);
1890      __ null_check(obj, new CodeEmitInfo(info));
1891    }
1892  }
1893  __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1894}
1895
1896
1897void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1898  bool use_length = x->length() != NULL;
1899  LIRItem array(x->array(), this);
1900  LIRItem index(x->index(), this);
1901  LIRItem length(this);
1902  bool needs_range_check = x->compute_needs_range_check();
1903
1904  if (use_length && needs_range_check) {
1905    length.set_instruction(x->length());
1906    length.load_item();
1907  }
1908
1909  array.load_item();
1910  if (index.is_constant() && can_inline_as_constant(x->index())) {
1911    // let it be a constant
1912    index.dont_load_item();
1913  } else {
1914    index.load_item();
1915  }
1916
1917  CodeEmitInfo* range_check_info = state_for(x);
1918  CodeEmitInfo* null_check_info = NULL;
1919  if (x->needs_null_check()) {
1920    NullCheck* nc = x->explicit_null_check();
1921    if (nc != NULL) {
1922      null_check_info = state_for(nc);
1923    } else {
1924      null_check_info = range_check_info;
1925    }
1926    if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1927      LIR_Opr obj = new_register(T_OBJECT);
1928      __ move(LIR_OprFact::oopConst(NULL), obj);
1929      __ null_check(obj, new CodeEmitInfo(null_check_info));
1930    }
1931  }
1932
1933  // emit array address setup early so it schedules better
1934  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1935
1936  if (GenerateRangeChecks && needs_range_check) {
1937    if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1938      __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1939    } else if (use_length) {
1940      // TODO: use a (modified) version of array_range_check that does not require a
1941      //       constant length to be loaded to a register
1942      __ cmp(lir_cond_belowEqual, length.result(), index.result());
1943      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1944    } else {
1945      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1946      // The range check performs the null check, so clear it out for the load
1947      null_check_info = NULL;
1948    }
1949  }
1950
1951  __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1952}
1953
1954
1955void LIRGenerator::do_NullCheck(NullCheck* x) {
1956  if (x->can_trap()) {
1957    LIRItem value(x->obj(), this);
1958    value.load_item();
1959    CodeEmitInfo* info = state_for(x);
1960    __ null_check(value.result(), info);
1961  }
1962}
1963
1964
1965void LIRGenerator::do_TypeCast(TypeCast* x) {
1966  LIRItem value(x->obj(), this);
1967  value.load_item();
1968  // the result is the same as from the node we are casting
1969  set_result(x, value.result());
1970}
1971
1972
1973void LIRGenerator::do_Throw(Throw* x) {
1974  LIRItem exception(x->exception(), this);
1975  exception.load_item();
1976  set_no_result(x);
1977  LIR_Opr exception_opr = exception.result();
1978  CodeEmitInfo* info = state_for(x, x->state());
1979
1980#ifndef PRODUCT
1981  if (PrintC1Statistics) {
1982    increment_counter(Runtime1::throw_count_address(), T_INT);
1983  }
1984#endif
1985
1986  // check if the instruction has an xhandler in any of the nested scopes
1987  bool unwind = false;
1988  if (info->exception_handlers()->length() == 0) {
1989    // this throw is not inside an xhandler
1990    unwind = true;
1991  } else {
1992    // get some idea of the throw type
1993    bool type_is_exact = true;
1994    ciType* throw_type = x->exception()->exact_type();
1995    if (throw_type == NULL) {
1996      type_is_exact = false;
1997      throw_type = x->exception()->declared_type();
1998    }
1999    if (throw_type != NULL && throw_type->is_instance_klass()) {
2000      ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2001      unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2002    }
2003  }
2004
2005  // do null check before moving exception oop into fixed register
2006  // to avoid a fixed interval with an oop during the null check.
2007  // Use a copy of the CodeEmitInfo because debug information is
2008  // different for null_check and throw.
2009  if (GenerateCompilerNullChecks &&
2010      (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2011    // if the exception object wasn't created using new then it might be null.
2012    __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2013  }
2014
2015  if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2016    // we need to go through the exception lookup path to get JVMTI
2017    // notification done
2018    unwind = false;
2019  }
2020
2021  // move exception oop into fixed register
2022  __ move(exception_opr, exceptionOopOpr());
2023
2024  if (unwind) {
2025    __ unwind_exception(exceptionOopOpr());
2026  } else {
2027    __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2028  }
2029}
2030
2031
2032void LIRGenerator::do_RoundFP(RoundFP* x) {
2033  LIRItem input(x->input(), this);
2034  input.load_item();
2035  LIR_Opr input_opr = input.result();
2036  assert(input_opr->is_register(), "why round if value is not in a register?");
2037  assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2038  if (input_opr->is_single_fpu()) {
2039    set_result(x, round_item(input_opr)); // This code path not currently taken
2040  } else {
2041    LIR_Opr result = new_register(T_DOUBLE);
2042    set_vreg_flag(result, must_start_in_memory);
2043    __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2044    set_result(x, result);
2045  }
2046}
2047
2048// Here UnsafeGetRaw may have x->base() and x->index() be int or long
2049// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2050void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2051  LIRItem base(x->base(), this);
2052  LIRItem idx(this);
2053
2054  base.load_item();
2055  if (x->has_index()) {
2056    idx.set_instruction(x->index());
2057    idx.load_nonconstant();
2058  }
2059
2060  LIR_Opr reg = rlock_result(x, x->basic_type());
2061
2062  int   log2_scale = 0;
2063  if (x->has_index()) {
2064    log2_scale = x->log2_scale();
2065  }
2066
2067  assert(!x->has_index() || idx.value() == x->index(), "should match");
2068
2069  LIR_Opr base_op = base.result();
2070  LIR_Opr index_op = idx.result();
2071#ifndef _LP64
2072  if (base_op->type() == T_LONG) {
2073    base_op = new_register(T_INT);
2074    __ convert(Bytecodes::_l2i, base.result(), base_op);
2075  }
2076  if (x->has_index()) {
2077    if (index_op->type() == T_LONG) {
2078      LIR_Opr long_index_op = index_op;
2079      if (index_op->is_constant()) {
2080        long_index_op = new_register(T_LONG);
2081        __ move(index_op, long_index_op);
2082      }
2083      index_op = new_register(T_INT);
2084      __ convert(Bytecodes::_l2i, long_index_op, index_op);
2085    } else {
2086      assert(x->index()->type()->tag() == intTag, "must be");
2087    }
2088  }
2089  // At this point base and index should be all ints.
2090  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2091  assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2092#else
2093  if (x->has_index()) {
2094    if (index_op->type() == T_INT) {
2095      if (!index_op->is_constant()) {
2096        index_op = new_register(T_LONG);
2097        __ convert(Bytecodes::_i2l, idx.result(), index_op);
2098      }
2099    } else {
2100      assert(index_op->type() == T_LONG, "must be");
2101      if (index_op->is_constant()) {
2102        index_op = new_register(T_LONG);
2103        __ move(idx.result(), index_op);
2104      }
2105    }
2106  }
2107  // At this point base is a long non-constant
2108  // Index is a long register or a int constant.
2109  // We allow the constant to stay an int because that would allow us a more compact encoding by
2110  // embedding an immediate offset in the address expression. If we have a long constant, we have to
2111  // move it into a register first.
2112  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2113  assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2114                            (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2115#endif
2116
2117  BasicType dst_type = x->basic_type();
2118
2119  LIR_Address* addr;
2120  if (index_op->is_constant()) {
2121    assert(log2_scale == 0, "must not have a scale");
2122    assert(index_op->type() == T_INT, "only int constants supported");
2123    addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2124  } else {
2125#ifdef X86
2126    addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2127#elif defined(ARM)
2128    addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2129#else
2130    if (index_op->is_illegal() || log2_scale == 0) {
2131      addr = new LIR_Address(base_op, index_op, dst_type);
2132    } else {
2133      LIR_Opr tmp = new_pointer_register();
2134      __ shift_left(index_op, log2_scale, tmp);
2135      addr = new LIR_Address(base_op, tmp, dst_type);
2136    }
2137#endif
2138  }
2139
2140  if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2141    __ unaligned_move(addr, reg);
2142  } else {
2143    if (dst_type == T_OBJECT && x->is_wide()) {
2144      __ move_wide(addr, reg);
2145    } else {
2146      __ move(addr, reg);
2147    }
2148  }
2149}
2150
2151
2152void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2153  int  log2_scale = 0;
2154  BasicType type = x->basic_type();
2155
2156  if (x->has_index()) {
2157    log2_scale = x->log2_scale();
2158  }
2159
2160  LIRItem base(x->base(), this);
2161  LIRItem value(x->value(), this);
2162  LIRItem idx(this);
2163
2164  base.load_item();
2165  if (x->has_index()) {
2166    idx.set_instruction(x->index());
2167    idx.load_item();
2168  }
2169
2170  if (type == T_BYTE || type == T_BOOLEAN) {
2171    value.load_byte_item();
2172  } else {
2173    value.load_item();
2174  }
2175
2176  set_no_result(x);
2177
2178  LIR_Opr base_op = base.result();
2179  LIR_Opr index_op = idx.result();
2180
2181#ifndef _LP64
2182  if (base_op->type() == T_LONG) {
2183    base_op = new_register(T_INT);
2184    __ convert(Bytecodes::_l2i, base.result(), base_op);
2185  }
2186  if (x->has_index()) {
2187    if (index_op->type() == T_LONG) {
2188      index_op = new_register(T_INT);
2189      __ convert(Bytecodes::_l2i, idx.result(), index_op);
2190    }
2191  }
2192  // At this point base and index should be all ints and not constants
2193  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2194  assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2195#else
2196  if (x->has_index()) {
2197    if (index_op->type() == T_INT) {
2198      index_op = new_register(T_LONG);
2199      __ convert(Bytecodes::_i2l, idx.result(), index_op);
2200    }
2201  }
2202  // At this point base and index are long and non-constant
2203  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2204  assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2205#endif
2206
2207  if (log2_scale != 0) {
2208    // temporary fix (platform dependent code without shift on Intel would be better)
2209    // TODO: ARM also allows embedded shift in the address
2210    __ shift_left(index_op, log2_scale, index_op);
2211  }
2212
2213  LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2214  __ move(value.result(), addr);
2215}
2216
2217
2218void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2219  BasicType type = x->basic_type();
2220  LIRItem src(x->object(), this);
2221  LIRItem off(x->offset(), this);
2222
2223  off.load_item();
2224  src.load_item();
2225
2226  LIR_Opr value = rlock_result(x, x->basic_type());
2227
2228  get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2229
2230#if INCLUDE_ALL_GCS
2231  // We might be reading the value of the referent field of a
2232  // Reference object in order to attach it back to the live
2233  // object graph. If G1 is enabled then we need to record
2234  // the value that is being returned in an SATB log buffer.
2235  //
2236  // We need to generate code similar to the following...
2237  //
2238  // if (offset == java_lang_ref_Reference::referent_offset) {
2239  //   if (src != NULL) {
2240  //     if (klass(src)->reference_type() != REF_NONE) {
2241  //       pre_barrier(..., value, ...);
2242  //     }
2243  //   }
2244  // }
2245
2246  if (UseG1GC && type == T_OBJECT) {
2247    bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
2248    bool gen_offset_check = true;    // Assume we need to generate the offset guard.
2249    bool gen_source_check = true;    // Assume we need to check the src object for null.
2250    bool gen_type_check = true;      // Assume we need to check the reference_type.
2251
2252    if (off.is_constant()) {
2253      jlong off_con = (off.type()->is_int() ?
2254                        (jlong) off.get_jint_constant() :
2255                        off.get_jlong_constant());
2256
2257
2258      if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2259        // The constant offset is something other than referent_offset.
2260        // We can skip generating/checking the remaining guards and
2261        // skip generation of the code stub.
2262        gen_pre_barrier = false;
2263      } else {
2264        // The constant offset is the same as referent_offset -
2265        // we do not need to generate a runtime offset check.
2266        gen_offset_check = false;
2267      }
2268    }
2269
2270    // We don't need to generate stub if the source object is an array
2271    if (gen_pre_barrier && src.type()->is_array()) {
2272      gen_pre_barrier = false;
2273    }
2274
2275    if (gen_pre_barrier) {
2276      // We still need to continue with the checks.
2277      if (src.is_constant()) {
2278        ciObject* src_con = src.get_jobject_constant();
2279        guarantee(src_con != NULL, "no source constant");
2280
2281        if (src_con->is_null_object()) {
2282          // The constant src object is null - We can skip
2283          // generating the code stub.
2284          gen_pre_barrier = false;
2285        } else {
2286          // Non-null constant source object. We still have to generate
2287          // the slow stub - but we don't need to generate the runtime
2288          // null object check.
2289          gen_source_check = false;
2290        }
2291      }
2292    }
2293    if (gen_pre_barrier && !PatchALot) {
2294      // Can the klass of object be statically determined to be
2295      // a sub-class of Reference?
2296      ciType* type = src.value()->declared_type();
2297      if ((type != NULL) && type->is_loaded()) {
2298        if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2299          gen_type_check = false;
2300        } else if (type->is_klass() &&
2301                   !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2302          // Not Reference and not Object klass.
2303          gen_pre_barrier = false;
2304        }
2305      }
2306    }
2307
2308    if (gen_pre_barrier) {
2309      LabelObj* Lcont = new LabelObj();
2310
2311      // We can have generate one runtime check here. Let's start with
2312      // the offset check.
2313      if (gen_offset_check) {
2314        // if (offset != referent_offset) -> continue
2315        // If offset is an int then we can do the comparison with the
2316        // referent_offset constant; otherwise we need to move
2317        // referent_offset into a temporary register and generate
2318        // a reg-reg compare.
2319
2320        LIR_Opr referent_off;
2321
2322        if (off.type()->is_int()) {
2323          referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2324        } else {
2325          assert(off.type()->is_long(), "what else?");
2326          referent_off = new_register(T_LONG);
2327          __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2328        }
2329        __ cmp(lir_cond_notEqual, off.result(), referent_off);
2330        __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2331      }
2332      if (gen_source_check) {
2333        // offset is a const and equals referent offset
2334        // if (source == null) -> continue
2335        __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2336        __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2337      }
2338      LIR_Opr src_klass = new_register(T_OBJECT);
2339      if (gen_type_check) {
2340        // We have determined that offset == referent_offset && src != null.
2341        // if (src->_klass->_reference_type == REF_NONE) -> continue
2342        __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2343        LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2344        LIR_Opr reference_type = new_register(T_INT);
2345        __ move(reference_type_addr, reference_type);
2346        __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2347        __ branch(lir_cond_equal, T_INT, Lcont->label());
2348      }
2349      {
2350        // We have determined that src->_klass->_reference_type != REF_NONE
2351        // so register the value in the referent field with the pre-barrier.
2352        pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2353                    value  /* pre_val */,
2354                    false  /* do_load */,
2355                    false  /* patch */,
2356                    NULL   /* info */);
2357      }
2358      __ branch_destination(Lcont->label());
2359    }
2360  }
2361#endif // INCLUDE_ALL_GCS
2362
2363  if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2364}
2365
2366
2367void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2368  BasicType type = x->basic_type();
2369  LIRItem src(x->object(), this);
2370  LIRItem off(x->offset(), this);
2371  LIRItem data(x->value(), this);
2372
2373  src.load_item();
2374  if (type == T_BOOLEAN || type == T_BYTE) {
2375    data.load_byte_item();
2376  } else {
2377    data.load_item();
2378  }
2379  off.load_item();
2380
2381  set_no_result(x);
2382
2383  if (x->is_volatile() && os::is_MP()) __ membar_release();
2384  put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2385  if (x->is_volatile() && os::is_MP()) __ membar();
2386}
2387
2388
2389void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2390  int lng = x->length();
2391
2392  for (int i = 0; i < lng; i++) {
2393    SwitchRange* one_range = x->at(i);
2394    int low_key = one_range->low_key();
2395    int high_key = one_range->high_key();
2396    BlockBegin* dest = one_range->sux();
2397    if (low_key == high_key) {
2398      __ cmp(lir_cond_equal, value, low_key);
2399      __ branch(lir_cond_equal, T_INT, dest);
2400    } else if (high_key - low_key == 1) {
2401      __ cmp(lir_cond_equal, value, low_key);
2402      __ branch(lir_cond_equal, T_INT, dest);
2403      __ cmp(lir_cond_equal, value, high_key);
2404      __ branch(lir_cond_equal, T_INT, dest);
2405    } else {
2406      LabelObj* L = new LabelObj();
2407      __ cmp(lir_cond_less, value, low_key);
2408      __ branch(lir_cond_less, T_INT, L->label());
2409      __ cmp(lir_cond_lessEqual, value, high_key);
2410      __ branch(lir_cond_lessEqual, T_INT, dest);
2411      __ branch_destination(L->label());
2412    }
2413  }
2414  __ jump(default_sux);
2415}
2416
2417
2418SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2419  SwitchRangeList* res = new SwitchRangeList();
2420  int len = x->length();
2421  if (len > 0) {
2422    BlockBegin* sux = x->sux_at(0);
2423    int key = x->lo_key();
2424    BlockBegin* default_sux = x->default_sux();
2425    SwitchRange* range = new SwitchRange(key, sux);
2426    for (int i = 0; i < len; i++, key++) {
2427      BlockBegin* new_sux = x->sux_at(i);
2428      if (sux == new_sux) {
2429        // still in same range
2430        range->set_high_key(key);
2431      } else {
2432        // skip tests which explicitly dispatch to the default
2433        if (sux != default_sux) {
2434          res->append(range);
2435        }
2436        range = new SwitchRange(key, new_sux);
2437      }
2438      sux = new_sux;
2439    }
2440    if (res->length() == 0 || res->last() != range)  res->append(range);
2441  }
2442  return res;
2443}
2444
2445
2446// we expect the keys to be sorted by increasing value
2447SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2448  SwitchRangeList* res = new SwitchRangeList();
2449  int len = x->length();
2450  if (len > 0) {
2451    BlockBegin* default_sux = x->default_sux();
2452    int key = x->key_at(0);
2453    BlockBegin* sux = x->sux_at(0);
2454    SwitchRange* range = new SwitchRange(key, sux);
2455    for (int i = 1; i < len; i++) {
2456      int new_key = x->key_at(i);
2457      BlockBegin* new_sux = x->sux_at(i);
2458      if (key+1 == new_key && sux == new_sux) {
2459        // still in same range
2460        range->set_high_key(new_key);
2461      } else {
2462        // skip tests which explicitly dispatch to the default
2463        if (range->sux() != default_sux) {
2464          res->append(range);
2465        }
2466        range = new SwitchRange(new_key, new_sux);
2467      }
2468      key = new_key;
2469      sux = new_sux;
2470    }
2471    if (res->length() == 0 || res->last() != range)  res->append(range);
2472  }
2473  return res;
2474}
2475
2476
2477void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2478  LIRItem tag(x->tag(), this);
2479  tag.load_item();
2480  set_no_result(x);
2481
2482  if (x->is_safepoint()) {
2483    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2484  }
2485
2486  // move values into phi locations
2487  move_to_phi(x->state());
2488
2489  int lo_key = x->lo_key();
2490  int hi_key = x->hi_key();
2491  int len = x->length();
2492  LIR_Opr value = tag.result();
2493  if (UseTableRanges) {
2494    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2495  } else {
2496    for (int i = 0; i < len; i++) {
2497      __ cmp(lir_cond_equal, value, i + lo_key);
2498      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2499    }
2500    __ jump(x->default_sux());
2501  }
2502}
2503
2504
2505void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2506  LIRItem tag(x->tag(), this);
2507  tag.load_item();
2508  set_no_result(x);
2509
2510  if (x->is_safepoint()) {
2511    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2512  }
2513
2514  // move values into phi locations
2515  move_to_phi(x->state());
2516
2517  LIR_Opr value = tag.result();
2518  if (UseTableRanges) {
2519    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2520  } else {
2521    int len = x->length();
2522    for (int i = 0; i < len; i++) {
2523      __ cmp(lir_cond_equal, value, x->key_at(i));
2524      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2525    }
2526    __ jump(x->default_sux());
2527  }
2528}
2529
2530
2531void LIRGenerator::do_Goto(Goto* x) {
2532  set_no_result(x);
2533
2534  if (block()->next()->as_OsrEntry()) {
2535    // need to free up storage used for OSR entry point
2536    LIR_Opr osrBuffer = block()->next()->operand();
2537    BasicTypeList signature;
2538    signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2539    CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2540    __ move(osrBuffer, cc->args()->at(0));
2541    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2542                         getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2543  }
2544
2545  if (x->is_safepoint()) {
2546    ValueStack* state = x->state_before() ? x->state_before() : x->state();
2547
2548    // increment backedge counter if needed
2549    CodeEmitInfo* info = state_for(x, state);
2550    increment_backedge_counter(info, x->profiled_bci());
2551    CodeEmitInfo* safepoint_info = state_for(x, state);
2552    __ safepoint(safepoint_poll_register(), safepoint_info);
2553  }
2554
2555  // Gotos can be folded Ifs, handle this case.
2556  if (x->should_profile()) {
2557    ciMethod* method = x->profiled_method();
2558    assert(method != NULL, "method should be set if branch is profiled");
2559    ciMethodData* md = method->method_data_or_null();
2560    assert(md != NULL, "Sanity");
2561    ciProfileData* data = md->bci_to_data(x->profiled_bci());
2562    assert(data != NULL, "must have profiling data");
2563    int offset;
2564    if (x->direction() == Goto::taken) {
2565      assert(data->is_BranchData(), "need BranchData for two-way branches");
2566      offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2567    } else if (x->direction() == Goto::not_taken) {
2568      assert(data->is_BranchData(), "need BranchData for two-way branches");
2569      offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2570    } else {
2571      assert(data->is_JumpData(), "need JumpData for branches");
2572      offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2573    }
2574    LIR_Opr md_reg = new_register(T_METADATA);
2575    __ metadata2reg(md->constant_encoding(), md_reg);
2576
2577    increment_counter(new LIR_Address(md_reg, offset,
2578                                      NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2579  }
2580
2581  // emit phi-instruction move after safepoint since this simplifies
2582  // describing the state as the safepoint.
2583  move_to_phi(x->state());
2584
2585  __ jump(x->default_sux());
2586}
2587
2588/**
2589 * Emit profiling code if needed for arguments, parameters, return value types
2590 *
2591 * @param md                    MDO the code will update at runtime
2592 * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2593 * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2594 * @param profiled_k            current profile
2595 * @param obj                   IR node for the object to be profiled
2596 * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2597 *                              Set once we find an update to make and use for next ones.
2598 * @param not_null              true if we know obj cannot be null
2599 * @param signature_at_call_k   signature at call for obj
2600 * @param callee_signature_k    signature of callee for obj
2601 *                              at call and callee signatures differ at method handle call
2602 * @return                      the only klass we know will ever be seen at this profile point
2603 */
2604ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2605                                    Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2606                                    ciKlass* callee_signature_k) {
2607  ciKlass* result = NULL;
2608  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2609  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2610  // known not to be null or null bit already set and already set to
2611  // unknown: nothing we can do to improve profiling
2612  if (!do_null && !do_update) {
2613    return result;
2614  }
2615
2616  ciKlass* exact_klass = NULL;
2617  Compilation* comp = Compilation::current();
2618  if (do_update) {
2619    // try to find exact type, using CHA if possible, so that loading
2620    // the klass from the object can be avoided
2621    ciType* type = obj->exact_type();
2622    if (type == NULL) {
2623      type = obj->declared_type();
2624      type = comp->cha_exact_type(type);
2625    }
2626    assert(type == NULL || type->is_klass(), "type should be class");
2627    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2628
2629    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2630  }
2631
2632  if (!do_null && !do_update) {
2633    return result;
2634  }
2635
2636  ciKlass* exact_signature_k = NULL;
2637  if (do_update) {
2638    // Is the type from the signature exact (the only one possible)?
2639    exact_signature_k = signature_at_call_k->exact_klass();
2640    if (exact_signature_k == NULL) {
2641      exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2642    } else {
2643      result = exact_signature_k;
2644      // Known statically. No need to emit any code: prevent
2645      // LIR_Assembler::emit_profile_type() from emitting useless code
2646      profiled_k = ciTypeEntries::with_status(result, profiled_k);
2647    }
2648    // exact_klass and exact_signature_k can be both non NULL but
2649    // different if exact_klass is loaded after the ciObject for
2650    // exact_signature_k is created.
2651    if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2652      // sometimes the type of the signature is better than the best type
2653      // the compiler has
2654      exact_klass = exact_signature_k;
2655    }
2656    if (callee_signature_k != NULL &&
2657        callee_signature_k != signature_at_call_k) {
2658      ciKlass* improved_klass = callee_signature_k->exact_klass();
2659      if (improved_klass == NULL) {
2660        improved_klass = comp->cha_exact_type(callee_signature_k);
2661      }
2662      if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2663        exact_klass = exact_signature_k;
2664      }
2665    }
2666    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2667  }
2668
2669  if (!do_null && !do_update) {
2670    return result;
2671  }
2672
2673  if (mdp == LIR_OprFact::illegalOpr) {
2674    mdp = new_register(T_METADATA);
2675    __ metadata2reg(md->constant_encoding(), mdp);
2676    if (md_base_offset != 0) {
2677      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2678      mdp = new_pointer_register();
2679      __ leal(LIR_OprFact::address(base_type_address), mdp);
2680    }
2681  }
2682  LIRItem value(obj, this);
2683  value.load_item();
2684  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2685                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2686  return result;
2687}
2688
2689// profile parameters on entry to the root of the compilation
2690void LIRGenerator::profile_parameters(Base* x) {
2691  if (compilation()->profile_parameters()) {
2692    CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2693    ciMethodData* md = scope()->method()->method_data_or_null();
2694    assert(md != NULL, "Sanity");
2695
2696    if (md->parameters_type_data() != NULL) {
2697      ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2698      ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2699      LIR_Opr mdp = LIR_OprFact::illegalOpr;
2700      for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2701        LIR_Opr src = args->at(i);
2702        assert(!src->is_illegal(), "check");
2703        BasicType t = src->type();
2704        if (t == T_OBJECT || t == T_ARRAY) {
2705          intptr_t profiled_k = parameters->type(j);
2706          Local* local = x->state()->local_at(java_index)->as_Local();
2707          ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2708                                        in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2709                                        profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2710          // If the profile is known statically set it once for all and do not emit any code
2711          if (exact != NULL) {
2712            md->set_parameter_type(j, exact);
2713          }
2714          j++;
2715        }
2716        java_index += type2size[t];
2717      }
2718    }
2719  }
2720}
2721
2722void LIRGenerator::do_Base(Base* x) {
2723  __ std_entry(LIR_OprFact::illegalOpr);
2724  // Emit moves from physical registers / stack slots to virtual registers
2725  CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2726  IRScope* irScope = compilation()->hir()->top_scope();
2727  int java_index = 0;
2728  for (int i = 0; i < args->length(); i++) {
2729    LIR_Opr src = args->at(i);
2730    assert(!src->is_illegal(), "check");
2731    BasicType t = src->type();
2732
2733    // Types which are smaller than int are passed as int, so
2734    // correct the type which passed.
2735    switch (t) {
2736    case T_BYTE:
2737    case T_BOOLEAN:
2738    case T_SHORT:
2739    case T_CHAR:
2740      t = T_INT;
2741      break;
2742    }
2743
2744    LIR_Opr dest = new_register(t);
2745    __ move(src, dest);
2746
2747    // Assign new location to Local instruction for this local
2748    Local* local = x->state()->local_at(java_index)->as_Local();
2749    assert(local != NULL, "Locals for incoming arguments must have been created");
2750#ifndef __SOFTFP__
2751    // The java calling convention passes double as long and float as int.
2752    assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2753#endif // __SOFTFP__
2754    local->set_operand(dest);
2755    _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2756    java_index += type2size[t];
2757  }
2758
2759  if (compilation()->env()->dtrace_method_probes()) {
2760    BasicTypeList signature;
2761    signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2762    signature.append(T_METADATA); // Method*
2763    LIR_OprList* args = new LIR_OprList();
2764    args->append(getThreadPointer());
2765    LIR_Opr meth = new_register(T_METADATA);
2766    __ metadata2reg(method()->constant_encoding(), meth);
2767    args->append(meth);
2768    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2769  }
2770
2771  if (method()->is_synchronized()) {
2772    LIR_Opr obj;
2773    if (method()->is_static()) {
2774      obj = new_register(T_OBJECT);
2775      __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2776    } else {
2777      Local* receiver = x->state()->local_at(0)->as_Local();
2778      assert(receiver != NULL, "must already exist");
2779      obj = receiver->operand();
2780    }
2781    assert(obj->is_valid(), "must be valid");
2782
2783    if (method()->is_synchronized() && GenerateSynchronizationCode) {
2784      LIR_Opr lock = new_register(T_INT);
2785      __ load_stack_address_monitor(0, lock);
2786
2787      CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2788      CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2789
2790      // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2791      __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2792    }
2793  }
2794  if (compilation()->age_code()) {
2795    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2796    decrement_age(info);
2797  }
2798  // increment invocation counters if needed
2799  if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2800    profile_parameters(x);
2801    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2802    increment_invocation_counter(info);
2803  }
2804
2805  // all blocks with a successor must end with an unconditional jump
2806  // to the successor even if they are consecutive
2807  __ jump(x->default_sux());
2808}
2809
2810
2811void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2812  // construct our frame and model the production of incoming pointer
2813  // to the OSR buffer.
2814  __ osr_entry(LIR_Assembler::osrBufferPointer());
2815  LIR_Opr result = rlock_result(x);
2816  __ move(LIR_Assembler::osrBufferPointer(), result);
2817}
2818
2819
2820void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2821  assert(args->length() == arg_list->length(),
2822         err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2823  for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2824    LIRItem* param = args->at(i);
2825    LIR_Opr loc = arg_list->at(i);
2826    if (loc->is_register()) {
2827      param->load_item_force(loc);
2828    } else {
2829      LIR_Address* addr = loc->as_address_ptr();
2830      param->load_for_store(addr->type());
2831      if (addr->type() == T_OBJECT) {
2832        __ move_wide(param->result(), addr);
2833      } else
2834        if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2835          __ unaligned_move(param->result(), addr);
2836        } else {
2837          __ move(param->result(), addr);
2838        }
2839    }
2840  }
2841
2842  if (x->has_receiver()) {
2843    LIRItem* receiver = args->at(0);
2844    LIR_Opr loc = arg_list->at(0);
2845    if (loc->is_register()) {
2846      receiver->load_item_force(loc);
2847    } else {
2848      assert(loc->is_address(), "just checking");
2849      receiver->load_for_store(T_OBJECT);
2850      __ move_wide(receiver->result(), loc->as_address_ptr());
2851    }
2852  }
2853}
2854
2855
2856// Visits all arguments, returns appropriate items without loading them
2857LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2858  LIRItemList* argument_items = new LIRItemList();
2859  if (x->has_receiver()) {
2860    LIRItem* receiver = new LIRItem(x->receiver(), this);
2861    argument_items->append(receiver);
2862  }
2863  for (int i = 0; i < x->number_of_arguments(); i++) {
2864    LIRItem* param = new LIRItem(x->argument_at(i), this);
2865    argument_items->append(param);
2866  }
2867  return argument_items;
2868}
2869
2870
2871// The invoke with receiver has following phases:
2872//   a) traverse and load/lock receiver;
2873//   b) traverse all arguments -> item-array (invoke_visit_argument)
2874//   c) push receiver on stack
2875//   d) load each of the items and push on stack
2876//   e) unlock receiver
2877//   f) move receiver into receiver-register %o0
2878//   g) lock result registers and emit call operation
2879//
2880// Before issuing a call, we must spill-save all values on stack
2881// that are in caller-save register. "spill-save" moves thos registers
2882// either in a free callee-save register or spills them if no free
2883// callee save register is available.
2884//
2885// The problem is where to invoke spill-save.
2886// - if invoked between e) and f), we may lock callee save
2887//   register in "spill-save" that destroys the receiver register
2888//   before f) is executed
2889// - if we rearange the f) to be earlier, by loading %o0, it
2890//   may destroy a value on the stack that is currently in %o0
2891//   and is waiting to be spilled
2892// - if we keep the receiver locked while doing spill-save,
2893//   we cannot spill it as it is spill-locked
2894//
2895void LIRGenerator::do_Invoke(Invoke* x) {
2896  CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2897
2898  LIR_OprList* arg_list = cc->args();
2899  LIRItemList* args = invoke_visit_arguments(x);
2900  LIR_Opr receiver = LIR_OprFact::illegalOpr;
2901
2902  // setup result register
2903  LIR_Opr result_register = LIR_OprFact::illegalOpr;
2904  if (x->type() != voidType) {
2905    result_register = result_register_for(x->type());
2906  }
2907
2908  CodeEmitInfo* info = state_for(x, x->state());
2909
2910  invoke_load_arguments(x, args, arg_list);
2911
2912  if (x->has_receiver()) {
2913    args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2914    receiver = args->at(0)->result();
2915  }
2916
2917  // emit invoke code
2918  bool optimized = x->target_is_loaded() && x->target_is_final();
2919  assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2920
2921  // JSR 292
2922  // Preserve the SP over MethodHandle call sites.
2923  ciMethod* target = x->target();
2924  bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2925                                  target->is_method_handle_intrinsic() ||
2926                                  target->is_compiled_lambda_form());
2927  if (is_method_handle_invoke) {
2928    info->set_is_method_handle_invoke(true);
2929    __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2930  }
2931
2932  switch (x->code()) {
2933    case Bytecodes::_invokestatic:
2934      __ call_static(target, result_register,
2935                     SharedRuntime::get_resolve_static_call_stub(),
2936                     arg_list, info);
2937      break;
2938    case Bytecodes::_invokespecial:
2939    case Bytecodes::_invokevirtual:
2940    case Bytecodes::_invokeinterface:
2941      // for final target we still produce an inline cache, in order
2942      // to be able to call mixed mode
2943      if (x->code() == Bytecodes::_invokespecial || optimized) {
2944        __ call_opt_virtual(target, receiver, result_register,
2945                            SharedRuntime::get_resolve_opt_virtual_call_stub(),
2946                            arg_list, info);
2947      } else if (x->vtable_index() < 0) {
2948        __ call_icvirtual(target, receiver, result_register,
2949                          SharedRuntime::get_resolve_virtual_call_stub(),
2950                          arg_list, info);
2951      } else {
2952        int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2953        int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2954        __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2955      }
2956      break;
2957    case Bytecodes::_invokedynamic: {
2958      __ call_dynamic(target, receiver, result_register,
2959                      SharedRuntime::get_resolve_static_call_stub(),
2960                      arg_list, info);
2961      break;
2962    }
2963    default:
2964      fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2965      break;
2966  }
2967
2968  // JSR 292
2969  // Restore the SP after MethodHandle call sites.
2970  if (is_method_handle_invoke) {
2971    __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2972  }
2973
2974  if (x->type()->is_float() || x->type()->is_double()) {
2975    // Force rounding of results from non-strictfp when in strictfp
2976    // scope (or when we don't know the strictness of the callee, to
2977    // be safe.)
2978    if (method()->is_strict()) {
2979      if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2980        result_register = round_item(result_register);
2981      }
2982    }
2983  }
2984
2985  if (result_register->is_valid()) {
2986    LIR_Opr result = rlock_result(x);
2987    __ move(result_register, result);
2988  }
2989}
2990
2991
2992void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2993  assert(x->number_of_arguments() == 1, "wrong type");
2994  LIRItem value       (x->argument_at(0), this);
2995  LIR_Opr reg = rlock_result(x);
2996  value.load_item();
2997  LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2998  __ move(tmp, reg);
2999}
3000
3001
3002
3003// Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3004void LIRGenerator::do_IfOp(IfOp* x) {
3005#ifdef ASSERT
3006  {
3007    ValueTag xtag = x->x()->type()->tag();
3008    ValueTag ttag = x->tval()->type()->tag();
3009    assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3010    assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3011    assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3012  }
3013#endif
3014
3015  LIRItem left(x->x(), this);
3016  LIRItem right(x->y(), this);
3017  left.load_item();
3018  if (can_inline_as_constant(right.value())) {
3019    right.dont_load_item();
3020  } else {
3021    right.load_item();
3022  }
3023
3024  LIRItem t_val(x->tval(), this);
3025  LIRItem f_val(x->fval(), this);
3026  t_val.dont_load_item();
3027  f_val.dont_load_item();
3028  LIR_Opr reg = rlock_result(x);
3029
3030  __ cmp(lir_cond(x->cond()), left.result(), right.result());
3031  __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3032}
3033
3034void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3035    assert(x->number_of_arguments() == expected_arguments, "wrong type");
3036    LIR_Opr reg = result_register_for(x->type());
3037    __ call_runtime_leaf(routine, getThreadTemp(),
3038                         reg, new LIR_OprList());
3039    LIR_Opr result = rlock_result(x);
3040    __ move(reg, result);
3041}
3042
3043#ifdef TRACE_HAVE_INTRINSICS
3044void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3045    LIR_Opr thread = getThreadPointer();
3046    LIR_Opr osthread = new_pointer_register();
3047    __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3048    size_t thread_id_size = OSThread::thread_id_size();
3049    if (thread_id_size == (size_t) BytesPerLong) {
3050      LIR_Opr id = new_register(T_LONG);
3051      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3052      __ convert(Bytecodes::_l2i, id, rlock_result(x));
3053    } else if (thread_id_size == (size_t) BytesPerInt) {
3054      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3055    } else {
3056      ShouldNotReachHere();
3057    }
3058}
3059
3060void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3061    CodeEmitInfo* info = state_for(x);
3062    CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3063    BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3064    assert(info != NULL, "must have info");
3065    LIRItem arg(x->argument_at(1), this);
3066    arg.load_item();
3067    LIR_Opr klass = new_pointer_register();
3068    __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3069    LIR_Opr id = new_register(T_LONG);
3070    ByteSize offset = TRACE_ID_OFFSET;
3071    LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3072    __ move(trace_id_addr, id);
3073    __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3074    __ store(id, trace_id_addr);
3075    __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3076    __ move(id, rlock_result(x));
3077}
3078#endif
3079
3080void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3081  switch (x->id()) {
3082  case vmIntrinsics::_intBitsToFloat      :
3083  case vmIntrinsics::_doubleToRawLongBits :
3084  case vmIntrinsics::_longBitsToDouble    :
3085  case vmIntrinsics::_floatToRawIntBits   : {
3086    do_FPIntrinsics(x);
3087    break;
3088  }
3089
3090#ifdef TRACE_HAVE_INTRINSICS
3091  case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3092  case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3093  case vmIntrinsics::_counterTime:
3094    do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3095    break;
3096#endif
3097
3098  case vmIntrinsics::_currentTimeMillis:
3099    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3100    break;
3101
3102  case vmIntrinsics::_nanoTime:
3103    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3104    break;
3105
3106  case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
3107  case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
3108  case vmIntrinsics::_getClass:       do_getClass(x);      break;
3109  case vmIntrinsics::_currentThread:  do_currentThread(x); break;
3110
3111  case vmIntrinsics::_dlog:           // fall through
3112  case vmIntrinsics::_dlog10:         // fall through
3113  case vmIntrinsics::_dabs:           // fall through
3114  case vmIntrinsics::_dsqrt:          // fall through
3115  case vmIntrinsics::_dtan:           // fall through
3116  case vmIntrinsics::_dsin :          // fall through
3117  case vmIntrinsics::_dcos :          // fall through
3118  case vmIntrinsics::_dexp :          // fall through
3119  case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
3120  case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
3121
3122  // java.nio.Buffer.checkIndex
3123  case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
3124
3125  case vmIntrinsics::_compareAndSwapObject:
3126    do_CompareAndSwap(x, objectType);
3127    break;
3128  case vmIntrinsics::_compareAndSwapInt:
3129    do_CompareAndSwap(x, intType);
3130    break;
3131  case vmIntrinsics::_compareAndSwapLong:
3132    do_CompareAndSwap(x, longType);
3133    break;
3134
3135  case vmIntrinsics::_loadFence :
3136    if (os::is_MP()) __ membar_acquire();
3137    break;
3138  case vmIntrinsics::_storeFence:
3139    if (os::is_MP()) __ membar_release();
3140    break;
3141  case vmIntrinsics::_fullFence :
3142    if (os::is_MP()) __ membar();
3143    break;
3144
3145  case vmIntrinsics::_Reference_get:
3146    do_Reference_get(x);
3147    break;
3148
3149  case vmIntrinsics::_updateCRC32:
3150  case vmIntrinsics::_updateBytesCRC32:
3151  case vmIntrinsics::_updateByteBufferCRC32:
3152    do_update_CRC32(x);
3153    break;
3154
3155  default: ShouldNotReachHere(); break;
3156  }
3157}
3158
3159void LIRGenerator::profile_arguments(ProfileCall* x) {
3160  if (compilation()->profile_arguments()) {
3161    int bci = x->bci_of_invoke();
3162    ciMethodData* md = x->method()->method_data_or_null();
3163    ciProfileData* data = md->bci_to_data(bci);
3164    if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3165        (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3166      ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3167      int base_offset = md->byte_offset_of_slot(data, extra);
3168      LIR_Opr mdp = LIR_OprFact::illegalOpr;
3169      ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3170
3171      Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3172      int start = 0;
3173      int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3174      if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3175        // first argument is not profiled at call (method handle invoke)
3176        assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3177        start = 1;
3178      }
3179      ciSignature* callee_signature = x->callee()->signature();
3180      // method handle call to virtual method
3181      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3182      ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3183
3184      bool ignored_will_link;
3185      ciSignature* signature_at_call = NULL;
3186      x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3187      ciSignatureStream signature_at_call_stream(signature_at_call);
3188
3189      // if called through method handle invoke, some arguments may have been popped
3190      for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3191        int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3192        ciKlass* exact = profile_type(md, base_offset, off,
3193                                      args->type(i), x->profiled_arg_at(i+start), mdp,
3194                                      !x->arg_needs_null_check(i+start),
3195                                      signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3196        if (exact != NULL) {
3197          md->set_argument_type(bci, i, exact);
3198        }
3199      }
3200    } else {
3201#ifdef ASSERT
3202      Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3203      int n = x->nb_profiled_args();
3204      assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3205                                                  (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3206             "only at JSR292 bytecodes");
3207#endif
3208    }
3209  }
3210}
3211
3212// profile parameters on entry to an inlined method
3213void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3214  if (compilation()->profile_parameters() && x->inlined()) {
3215    ciMethodData* md = x->callee()->method_data_or_null();
3216    if (md != NULL) {
3217      ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3218      if (parameters_type_data != NULL) {
3219        ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3220        LIR_Opr mdp = LIR_OprFact::illegalOpr;
3221        bool has_receiver = !x->callee()->is_static();
3222        ciSignature* sig = x->callee()->signature();
3223        ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3224        int i = 0; // to iterate on the Instructions
3225        Value arg = x->recv();
3226        bool not_null = false;
3227        int bci = x->bci_of_invoke();
3228        Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3229        // The first parameter is the receiver so that's what we start
3230        // with if it exists. One exception is method handle call to
3231        // virtual method: the receiver is in the args list
3232        if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3233          i = 1;
3234          arg = x->profiled_arg_at(0);
3235          not_null = !x->arg_needs_null_check(0);
3236        }
3237        int k = 0; // to iterate on the profile data
3238        for (;;) {
3239          intptr_t profiled_k = parameters->type(k);
3240          ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3241                                        in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3242                                        profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3243          // If the profile is known statically set it once for all and do not emit any code
3244          if (exact != NULL) {
3245            md->set_parameter_type(k, exact);
3246          }
3247          k++;
3248          if (k >= parameters_type_data->number_of_parameters()) {
3249#ifdef ASSERT
3250            int extra = 0;
3251            if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3252                x->nb_profiled_args() >= TypeProfileParmsLimit &&
3253                x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3254              extra += 1;
3255            }
3256            assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3257#endif
3258            break;
3259          }
3260          arg = x->profiled_arg_at(i);
3261          not_null = !x->arg_needs_null_check(i);
3262          i++;
3263        }
3264      }
3265    }
3266  }
3267}
3268
3269void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3270  // Need recv in a temporary register so it interferes with the other temporaries
3271  LIR_Opr recv = LIR_OprFact::illegalOpr;
3272  LIR_Opr mdo = new_register(T_OBJECT);
3273  // tmp is used to hold the counters on SPARC
3274  LIR_Opr tmp = new_pointer_register();
3275
3276  if (x->nb_profiled_args() > 0) {
3277    profile_arguments(x);
3278  }
3279
3280  // profile parameters on inlined method entry including receiver
3281  if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3282    profile_parameters_at_call(x);
3283  }
3284
3285  if (x->recv() != NULL) {
3286    LIRItem value(x->recv(), this);
3287    value.load_item();
3288    recv = new_register(T_OBJECT);
3289    __ move(value.result(), recv);
3290  }
3291  __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3292}
3293
3294void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3295  int bci = x->bci_of_invoke();
3296  ciMethodData* md = x->method()->method_data_or_null();
3297  ciProfileData* data = md->bci_to_data(bci);
3298  assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3299  ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3300  LIR_Opr mdp = LIR_OprFact::illegalOpr;
3301
3302  bool ignored_will_link;
3303  ciSignature* signature_at_call = NULL;
3304  x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3305
3306  // The offset within the MDO of the entry to update may be too large
3307  // to be used in load/store instructions on some platforms. So have
3308  // profile_type() compute the address of the profile in a register.
3309  ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3310                                ret->type(), x->ret(), mdp,
3311                                !x->needs_null_check(),
3312                                signature_at_call->return_type()->as_klass(),
3313                                x->callee()->signature()->return_type()->as_klass());
3314  if (exact != NULL) {
3315    md->set_return_type(bci, exact);
3316  }
3317}
3318
3319void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3320  // We can safely ignore accessors here, since c2 will inline them anyway,
3321  // accessors are also always mature.
3322  if (!x->inlinee()->is_accessor()) {
3323    CodeEmitInfo* info = state_for(x, x->state(), true);
3324    // Notify the runtime very infrequently only to take care of counter overflows
3325    int freq_log = Tier23InlineeNotifyFreqLog;
3326    double scale;
3327    if (_method->has_option_value("CompileThresholdScaling", scale)) {
3328      freq_log = Arguments::scaled_freq_log(freq_log, scale);
3329    }
3330    increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
3331  }
3332}
3333
3334void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3335  int freq_log;
3336  int level = compilation()->env()->comp_level();
3337  if (level == CompLevel_limited_profile) {
3338    freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3339  } else if (level == CompLevel_full_profile) {
3340    freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3341  } else {
3342    ShouldNotReachHere();
3343  }
3344  // Increment the appropriate invocation/backedge counter and notify the runtime.
3345  double scale;
3346  if (_method->has_option_value("CompileThresholdScaling", scale)) {
3347    freq_log = Arguments::scaled_freq_log(freq_log, scale);
3348  }
3349  increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
3350}
3351
3352void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3353  ciMethod* method = info->scope()->method();
3354  MethodCounters* mc_adr = method->ensure_method_counters();
3355  if (mc_adr != NULL) {
3356    LIR_Opr mc = new_pointer_register();
3357    __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3358    int offset = in_bytes(MethodCounters::nmethod_age_offset());
3359    LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3360    LIR_Opr result = new_register(T_INT);
3361    __ load(counter, result);
3362    __ sub(result, LIR_OprFact::intConst(1), result);
3363    __ store(result, counter);
3364    // DeoptimizeStub will reexecute from the current state in code info.
3365    CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3366                                         Deoptimization::Action_make_not_entrant);
3367    __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3368    __ branch(lir_cond_lessEqual, T_INT, deopt);
3369  }
3370}
3371
3372
3373void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3374                                                ciMethod *method, int frequency,
3375                                                int bci, bool backedge, bool notify) {
3376  assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3377  int level = _compilation->env()->comp_level();
3378  assert(level > CompLevel_simple, "Shouldn't be here");
3379
3380  int offset = -1;
3381  LIR_Opr counter_holder;
3382  if (level == CompLevel_limited_profile) {
3383    MethodCounters* counters_adr = method->ensure_method_counters();
3384    if (counters_adr == NULL) {
3385      bailout("method counters allocation failed");
3386      return;
3387    }
3388    counter_holder = new_pointer_register();
3389    __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3390    offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3391                                 MethodCounters::invocation_counter_offset());
3392  } else if (level == CompLevel_full_profile) {
3393    counter_holder = new_register(T_METADATA);
3394    offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3395                                 MethodData::invocation_counter_offset());
3396    ciMethodData* md = method->method_data_or_null();
3397    assert(md != NULL, "Sanity");
3398    __ metadata2reg(md->constant_encoding(), counter_holder);
3399  } else {
3400    ShouldNotReachHere();
3401  }
3402  LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3403  LIR_Opr result = new_register(T_INT);
3404  __ load(counter, result);
3405  __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3406  __ store(result, counter);
3407  if (notify) {
3408    LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3409    LIR_Opr meth = new_register(T_METADATA);
3410    __ metadata2reg(method->constant_encoding(), meth);
3411    __ logical_and(result, mask, result);
3412    __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3413    // The bci for info can point to cmp for if's we want the if bci
3414    CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3415    __ branch(lir_cond_equal, T_INT, overflow);
3416    __ branch_destination(overflow->continuation());
3417  }
3418}
3419
3420void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3421  LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3422  BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3423
3424  if (x->pass_thread()) {
3425    signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3426    args->append(getThreadPointer());
3427  }
3428
3429  for (int i = 0; i < x->number_of_arguments(); i++) {
3430    Value a = x->argument_at(i);
3431    LIRItem* item = new LIRItem(a, this);
3432    item->load_item();
3433    args->append(item->result());
3434    signature->append(as_BasicType(a->type()));
3435  }
3436
3437  LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3438  if (x->type() == voidType) {
3439    set_no_result(x);
3440  } else {
3441    __ move(result, rlock_result(x));
3442  }
3443}
3444
3445#ifdef ASSERT
3446void LIRGenerator::do_Assert(Assert *x) {
3447  ValueTag tag = x->x()->type()->tag();
3448  If::Condition cond = x->cond();
3449
3450  LIRItem xitem(x->x(), this);
3451  LIRItem yitem(x->y(), this);
3452  LIRItem* xin = &xitem;
3453  LIRItem* yin = &yitem;
3454
3455  assert(tag == intTag, "Only integer assertions are valid!");
3456
3457  xin->load_item();
3458  yin->dont_load_item();
3459
3460  set_no_result(x);
3461
3462  LIR_Opr left = xin->result();
3463  LIR_Opr right = yin->result();
3464
3465  __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3466}
3467#endif
3468
3469void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3470
3471
3472  Instruction *a = x->x();
3473  Instruction *b = x->y();
3474  if (!a || StressRangeCheckElimination) {
3475    assert(!b || StressRangeCheckElimination, "B must also be null");
3476
3477    CodeEmitInfo *info = state_for(x, x->state());
3478    CodeStub* stub = new PredicateFailedStub(info);
3479
3480    __ jump(stub);
3481  } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3482    int a_int = a->type()->as_IntConstant()->value();
3483    int b_int = b->type()->as_IntConstant()->value();
3484
3485    bool ok = false;
3486
3487    switch(x->cond()) {
3488      case Instruction::eql: ok = (a_int == b_int); break;
3489      case Instruction::neq: ok = (a_int != b_int); break;
3490      case Instruction::lss: ok = (a_int < b_int); break;
3491      case Instruction::leq: ok = (a_int <= b_int); break;
3492      case Instruction::gtr: ok = (a_int > b_int); break;
3493      case Instruction::geq: ok = (a_int >= b_int); break;
3494      case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3495      case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3496      default: ShouldNotReachHere();
3497    }
3498
3499    if (ok) {
3500
3501      CodeEmitInfo *info = state_for(x, x->state());
3502      CodeStub* stub = new PredicateFailedStub(info);
3503
3504      __ jump(stub);
3505    }
3506  } else {
3507
3508    ValueTag tag = x->x()->type()->tag();
3509    If::Condition cond = x->cond();
3510    LIRItem xitem(x->x(), this);
3511    LIRItem yitem(x->y(), this);
3512    LIRItem* xin = &xitem;
3513    LIRItem* yin = &yitem;
3514
3515    assert(tag == intTag, "Only integer deoptimizations are valid!");
3516
3517    xin->load_item();
3518    yin->dont_load_item();
3519    set_no_result(x);
3520
3521    LIR_Opr left = xin->result();
3522    LIR_Opr right = yin->result();
3523
3524    CodeEmitInfo *info = state_for(x, x->state());
3525    CodeStub* stub = new PredicateFailedStub(info);
3526
3527    __ cmp(lir_cond(cond), left, right);
3528    __ branch(lir_cond(cond), right->type(), stub);
3529  }
3530}
3531
3532
3533LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3534  LIRItemList args(1);
3535  LIRItem value(arg1, this);
3536  args.append(&value);
3537  BasicTypeList signature;
3538  signature.append(as_BasicType(arg1->type()));
3539
3540  return call_runtime(&signature, &args, entry, result_type, info);
3541}
3542
3543
3544LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3545  LIRItemList args(2);
3546  LIRItem value1(arg1, this);
3547  LIRItem value2(arg2, this);
3548  args.append(&value1);
3549  args.append(&value2);
3550  BasicTypeList signature;
3551  signature.append(as_BasicType(arg1->type()));
3552  signature.append(as_BasicType(arg2->type()));
3553
3554  return call_runtime(&signature, &args, entry, result_type, info);
3555}
3556
3557
3558LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3559                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
3560  // get a result register
3561  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3562  LIR_Opr result = LIR_OprFact::illegalOpr;
3563  if (result_type->tag() != voidTag) {
3564    result = new_register(result_type);
3565    phys_reg = result_register_for(result_type);
3566  }
3567
3568  // move the arguments into the correct location
3569  CallingConvention* cc = frame_map()->c_calling_convention(signature);
3570  assert(cc->length() == args->length(), "argument mismatch");
3571  for (int i = 0; i < args->length(); i++) {
3572    LIR_Opr arg = args->at(i);
3573    LIR_Opr loc = cc->at(i);
3574    if (loc->is_register()) {
3575      __ move(arg, loc);
3576    } else {
3577      LIR_Address* addr = loc->as_address_ptr();
3578//           if (!can_store_as_constant(arg)) {
3579//             LIR_Opr tmp = new_register(arg->type());
3580//             __ move(arg, tmp);
3581//             arg = tmp;
3582//           }
3583      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3584        __ unaligned_move(arg, addr);
3585      } else {
3586        __ move(arg, addr);
3587      }
3588    }
3589  }
3590
3591  if (info) {
3592    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3593  } else {
3594    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3595  }
3596  if (result->is_valid()) {
3597    __ move(phys_reg, result);
3598  }
3599  return result;
3600}
3601
3602
3603LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3604                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
3605  // get a result register
3606  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3607  LIR_Opr result = LIR_OprFact::illegalOpr;
3608  if (result_type->tag() != voidTag) {
3609    result = new_register(result_type);
3610    phys_reg = result_register_for(result_type);
3611  }
3612
3613  // move the arguments into the correct location
3614  CallingConvention* cc = frame_map()->c_calling_convention(signature);
3615
3616  assert(cc->length() == args->length(), "argument mismatch");
3617  for (int i = 0; i < args->length(); i++) {
3618    LIRItem* arg = args->at(i);
3619    LIR_Opr loc = cc->at(i);
3620    if (loc->is_register()) {
3621      arg->load_item_force(loc);
3622    } else {
3623      LIR_Address* addr = loc->as_address_ptr();
3624      arg->load_for_store(addr->type());
3625      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3626        __ unaligned_move(arg->result(), addr);
3627      } else {
3628        __ move(arg->result(), addr);
3629      }
3630    }
3631  }
3632
3633  if (info) {
3634    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3635  } else {
3636    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3637  }
3638  if (result->is_valid()) {
3639    __ move(phys_reg, result);
3640  }
3641  return result;
3642}
3643
3644void LIRGenerator::do_MemBar(MemBar* x) {
3645  if (os::is_MP()) {
3646    LIR_Code code = x->code();
3647    switch(code) {
3648      case lir_membar_acquire   : __ membar_acquire(); break;
3649      case lir_membar_release   : __ membar_release(); break;
3650      case lir_membar           : __ membar(); break;
3651      case lir_membar_loadload  : __ membar_loadload(); break;
3652      case lir_membar_storestore: __ membar_storestore(); break;
3653      case lir_membar_loadstore : __ membar_loadstore(); break;
3654      case lir_membar_storeload : __ membar_storeload(); break;
3655      default                   : ShouldNotReachHere(); break;
3656    }
3657  }
3658}
3659