c1_LIRGenerator.cpp revision 3274:0105f367a14c
1/*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_FrameMap.hpp"
28#include "c1/c1_Instruction.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_LIRGenerator.hpp"
31#include "c1/c1_ValueStack.hpp"
32#include "ci/ciArrayKlass.hpp"
33#include "ci/ciCPCache.hpp"
34#include "ci/ciInstance.hpp"
35#include "runtime/sharedRuntime.hpp"
36#include "runtime/stubRoutines.hpp"
37#include "utilities/bitMap.inline.hpp"
38#ifndef SERIALGC
39#include "gc_implementation/g1/heapRegion.hpp"
40#endif
41
42#ifdef ASSERT
43#define __ gen()->lir(__FILE__, __LINE__)->
44#else
45#define __ gen()->lir()->
46#endif
47
48// TODO: ARM - Use some recognizable constant which still fits architectural constraints
49#ifdef ARM
50#define PATCHED_ADDR  (204)
51#else
52#define PATCHED_ADDR  (max_jint)
53#endif
54
55void PhiResolverState::reset(int max_vregs) {
56  // Initialize array sizes
57  _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
58  _virtual_operands.trunc_to(0);
59  _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
60  _other_operands.trunc_to(0);
61  _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
62  _vreg_table.trunc_to(0);
63}
64
65
66
67//--------------------------------------------------------------
68// PhiResolver
69
70// Resolves cycles:
71//
72//  r1 := r2  becomes  temp := r1
73//  r2 := r1           r1 := r2
74//                     r2 := temp
75// and orders moves:
76//
77//  r2 := r3  becomes  r1 := r2
78//  r1 := r2           r2 := r3
79
80PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
81 : _gen(gen)
82 , _state(gen->resolver_state())
83 , _temp(LIR_OprFact::illegalOpr)
84{
85  // reinitialize the shared state arrays
86  _state.reset(max_vregs);
87}
88
89
90void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
91  assert(src->is_valid(), "");
92  assert(dest->is_valid(), "");
93  __ move(src, dest);
94}
95
96
97void PhiResolver::move_temp_to(LIR_Opr dest) {
98  assert(_temp->is_valid(), "");
99  emit_move(_temp, dest);
100  NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
101}
102
103
104void PhiResolver::move_to_temp(LIR_Opr src) {
105  assert(_temp->is_illegal(), "");
106  _temp = _gen->new_register(src->type());
107  emit_move(src, _temp);
108}
109
110
111// Traverse assignment graph in depth first order and generate moves in post order
112// ie. two assignments: b := c, a := b start with node c:
113// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
114// Generates moves in this order: move b to a and move c to b
115// ie. cycle a := b, b := a start with node a
116// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
117// Generates moves in this order: move b to temp, move a to b, move temp to a
118void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
119  if (!dest->visited()) {
120    dest->set_visited();
121    for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
122      move(dest, dest->destination_at(i));
123    }
124  } else if (!dest->start_node()) {
125    // cylce in graph detected
126    assert(_loop == NULL, "only one loop valid!");
127    _loop = dest;
128    move_to_temp(src->operand());
129    return;
130  } // else dest is a start node
131
132  if (!dest->assigned()) {
133    if (_loop == dest) {
134      move_temp_to(dest->operand());
135      dest->set_assigned();
136    } else if (src != NULL) {
137      emit_move(src->operand(), dest->operand());
138      dest->set_assigned();
139    }
140  }
141}
142
143
144PhiResolver::~PhiResolver() {
145  int i;
146  // resolve any cycles in moves from and to virtual registers
147  for (i = virtual_operands().length() - 1; i >= 0; i --) {
148    ResolveNode* node = virtual_operands()[i];
149    if (!node->visited()) {
150      _loop = NULL;
151      move(NULL, node);
152      node->set_start_node();
153      assert(_temp->is_illegal(), "move_temp_to() call missing");
154    }
155  }
156
157  // generate move for move from non virtual register to abitrary destination
158  for (i = other_operands().length() - 1; i >= 0; i --) {
159    ResolveNode* node = other_operands()[i];
160    for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
161      emit_move(node->operand(), node->destination_at(j)->operand());
162    }
163  }
164}
165
166
167ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
168  ResolveNode* node;
169  if (opr->is_virtual()) {
170    int vreg_num = opr->vreg_number();
171    node = vreg_table().at_grow(vreg_num, NULL);
172    assert(node == NULL || node->operand() == opr, "");
173    if (node == NULL) {
174      node = new ResolveNode(opr);
175      vreg_table()[vreg_num] = node;
176    }
177    // Make sure that all virtual operands show up in the list when
178    // they are used as the source of a move.
179    if (source && !virtual_operands().contains(node)) {
180      virtual_operands().append(node);
181    }
182  } else {
183    assert(source, "");
184    node = new ResolveNode(opr);
185    other_operands().append(node);
186  }
187  return node;
188}
189
190
191void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
192  assert(dest->is_virtual(), "");
193  // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
194  assert(src->is_valid(), "");
195  assert(dest->is_valid(), "");
196  ResolveNode* source = source_node(src);
197  source->append(destination_node(dest));
198}
199
200
201//--------------------------------------------------------------
202// LIRItem
203
204void LIRItem::set_result(LIR_Opr opr) {
205  assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
206  value()->set_operand(opr);
207
208  if (opr->is_virtual()) {
209    _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
210  }
211
212  _result = opr;
213}
214
215void LIRItem::load_item() {
216  if (result()->is_illegal()) {
217    // update the items result
218    _result = value()->operand();
219  }
220  if (!result()->is_register()) {
221    LIR_Opr reg = _gen->new_register(value()->type());
222    __ move(result(), reg);
223    if (result()->is_constant()) {
224      _result = reg;
225    } else {
226      set_result(reg);
227    }
228  }
229}
230
231
232void LIRItem::load_for_store(BasicType type) {
233  if (_gen->can_store_as_constant(value(), type)) {
234    _result = value()->operand();
235    if (!_result->is_constant()) {
236      _result = LIR_OprFact::value_type(value()->type());
237    }
238  } else if (type == T_BYTE || type == T_BOOLEAN) {
239    load_byte_item();
240  } else {
241    load_item();
242  }
243}
244
245void LIRItem::load_item_force(LIR_Opr reg) {
246  LIR_Opr r = result();
247  if (r != reg) {
248#if !defined(ARM) && !defined(E500V2)
249    if (r->type() != reg->type()) {
250      // moves between different types need an intervening spill slot
251      r = _gen->force_to_spill(r, reg->type());
252    }
253#endif
254    __ move(r, reg);
255    _result = reg;
256  }
257}
258
259ciObject* LIRItem::get_jobject_constant() const {
260  ObjectType* oc = type()->as_ObjectType();
261  if (oc) {
262    return oc->constant_value();
263  }
264  return NULL;
265}
266
267
268jint LIRItem::get_jint_constant() const {
269  assert(is_constant() && value() != NULL, "");
270  assert(type()->as_IntConstant() != NULL, "type check");
271  return type()->as_IntConstant()->value();
272}
273
274
275jint LIRItem::get_address_constant() const {
276  assert(is_constant() && value() != NULL, "");
277  assert(type()->as_AddressConstant() != NULL, "type check");
278  return type()->as_AddressConstant()->value();
279}
280
281
282jfloat LIRItem::get_jfloat_constant() const {
283  assert(is_constant() && value() != NULL, "");
284  assert(type()->as_FloatConstant() != NULL, "type check");
285  return type()->as_FloatConstant()->value();
286}
287
288
289jdouble LIRItem::get_jdouble_constant() const {
290  assert(is_constant() && value() != NULL, "");
291  assert(type()->as_DoubleConstant() != NULL, "type check");
292  return type()->as_DoubleConstant()->value();
293}
294
295
296jlong LIRItem::get_jlong_constant() const {
297  assert(is_constant() && value() != NULL, "");
298  assert(type()->as_LongConstant() != NULL, "type check");
299  return type()->as_LongConstant()->value();
300}
301
302
303
304//--------------------------------------------------------------
305
306
307void LIRGenerator::init() {
308  _bs = Universe::heap()->barrier_set();
309}
310
311
312void LIRGenerator::block_do_prolog(BlockBegin* block) {
313#ifndef PRODUCT
314  if (PrintIRWithLIR) {
315    block->print();
316  }
317#endif
318
319  // set up the list of LIR instructions
320  assert(block->lir() == NULL, "LIR list already computed for this block");
321  _lir = new LIR_List(compilation(), block);
322  block->set_lir(_lir);
323
324  __ branch_destination(block->label());
325
326  if (LIRTraceExecution &&
327      Compilation::current()->hir()->start()->block_id() != block->block_id() &&
328      !block->is_set(BlockBegin::exception_entry_flag)) {
329    assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
330    trace_block_entry(block);
331  }
332}
333
334
335void LIRGenerator::block_do_epilog(BlockBegin* block) {
336#ifndef PRODUCT
337  if (PrintIRWithLIR) {
338    tty->cr();
339  }
340#endif
341
342  // LIR_Opr for unpinned constants shouldn't be referenced by other
343  // blocks so clear them out after processing the block.
344  for (int i = 0; i < _unpinned_constants.length(); i++) {
345    _unpinned_constants.at(i)->clear_operand();
346  }
347  _unpinned_constants.trunc_to(0);
348
349  // clear our any registers for other local constants
350  _constants.trunc_to(0);
351  _reg_for_constants.trunc_to(0);
352}
353
354
355void LIRGenerator::block_do(BlockBegin* block) {
356  CHECK_BAILOUT();
357
358  block_do_prolog(block);
359  set_block(block);
360
361  for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
362    if (instr->is_pinned()) do_root(instr);
363  }
364
365  set_block(NULL);
366  block_do_epilog(block);
367}
368
369
370//-------------------------LIRGenerator-----------------------------
371
372// This is where the tree-walk starts; instr must be root;
373void LIRGenerator::do_root(Value instr) {
374  CHECK_BAILOUT();
375
376  InstructionMark im(compilation(), instr);
377
378  assert(instr->is_pinned(), "use only with roots");
379  assert(instr->subst() == instr, "shouldn't have missed substitution");
380
381  instr->visit(this);
382
383  assert(!instr->has_uses() || instr->operand()->is_valid() ||
384         instr->as_Constant() != NULL || bailed_out(), "invalid item set");
385}
386
387
388// This is called for each node in tree; the walk stops if a root is reached
389void LIRGenerator::walk(Value instr) {
390  InstructionMark im(compilation(), instr);
391  //stop walk when encounter a root
392  if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
393    assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
394  } else {
395    assert(instr->subst() == instr, "shouldn't have missed substitution");
396    instr->visit(this);
397    // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
398  }
399}
400
401
402CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
403  assert(state != NULL, "state must be defined");
404
405  ValueStack* s = state;
406  for_each_state(s) {
407    if (s->kind() == ValueStack::EmptyExceptionState) {
408      assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
409      continue;
410    }
411
412    int index;
413    Value value;
414    for_each_stack_value(s, index, value) {
415      assert(value->subst() == value, "missed substitution");
416      if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
417        walk(value);
418        assert(value->operand()->is_valid(), "must be evaluated now");
419      }
420    }
421
422    int bci = s->bci();
423    IRScope* scope = s->scope();
424    ciMethod* method = scope->method();
425
426    MethodLivenessResult liveness = method->liveness_at_bci(bci);
427    if (bci == SynchronizationEntryBCI) {
428      if (x->as_ExceptionObject() || x->as_Throw()) {
429        // all locals are dead on exit from the synthetic unlocker
430        liveness.clear();
431      } else {
432        assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
433      }
434    }
435    if (!liveness.is_valid()) {
436      // Degenerate or breakpointed method.
437      bailout("Degenerate or breakpointed method");
438    } else {
439      assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
440      for_each_local_value(s, index, value) {
441        assert(value->subst() == value, "missed substition");
442        if (liveness.at(index) && !value->type()->is_illegal()) {
443          if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
444            walk(value);
445            assert(value->operand()->is_valid(), "must be evaluated now");
446          }
447        } else {
448          // NULL out this local so that linear scan can assume that all non-NULL values are live.
449          s->invalidate_local(index);
450        }
451      }
452    }
453  }
454
455  return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
456}
457
458
459CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
460  return state_for(x, x->exception_state());
461}
462
463
464void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
465  if (!obj->is_loaded() || PatchALot) {
466    assert(info != NULL, "info must be set if class is not loaded");
467    __ oop2reg_patch(NULL, r, info);
468  } else {
469    // no patching needed
470    __ oop2reg(obj->constant_encoding(), r);
471  }
472}
473
474
475void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
476                                    CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
477  CodeStub* stub = new RangeCheckStub(range_check_info, index);
478  if (index->is_constant()) {
479    cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
480                index->as_jint(), null_check_info);
481    __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
482  } else {
483    cmp_reg_mem(lir_cond_aboveEqual, index, array,
484                arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
485    __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
486  }
487}
488
489
490void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
491  CodeStub* stub = new RangeCheckStub(info, index, true);
492  if (index->is_constant()) {
493    cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
494    __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
495  } else {
496    cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
497                java_nio_Buffer::limit_offset(), T_INT, info);
498    __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
499  }
500  __ move(index, result);
501}
502
503
504
505void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
506  LIR_Opr result_op = result;
507  LIR_Opr left_op   = left;
508  LIR_Opr right_op  = right;
509
510  if (TwoOperandLIRForm && left_op != result_op) {
511    assert(right_op != result_op, "malformed");
512    __ move(left_op, result_op);
513    left_op = result_op;
514  }
515
516  switch(code) {
517    case Bytecodes::_dadd:
518    case Bytecodes::_fadd:
519    case Bytecodes::_ladd:
520    case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
521    case Bytecodes::_fmul:
522    case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
523
524    case Bytecodes::_dmul:
525      {
526        if (is_strictfp) {
527          __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
528        } else {
529          __ mul(left_op, right_op, result_op); break;
530        }
531      }
532      break;
533
534    case Bytecodes::_imul:
535      {
536        bool    did_strength_reduce = false;
537
538        if (right->is_constant()) {
539          int c = right->as_jint();
540          if (is_power_of_2(c)) {
541            // do not need tmp here
542            __ shift_left(left_op, exact_log2(c), result_op);
543            did_strength_reduce = true;
544          } else {
545            did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
546          }
547        }
548        // we couldn't strength reduce so just emit the multiply
549        if (!did_strength_reduce) {
550          __ mul(left_op, right_op, result_op);
551        }
552      }
553      break;
554
555    case Bytecodes::_dsub:
556    case Bytecodes::_fsub:
557    case Bytecodes::_lsub:
558    case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
559
560    case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
561    // ldiv and lrem are implemented with a direct runtime call
562
563    case Bytecodes::_ddiv:
564      {
565        if (is_strictfp) {
566          __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
567        } else {
568          __ div (left_op, right_op, result_op); break;
569        }
570      }
571      break;
572
573    case Bytecodes::_drem:
574    case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
575
576    default: ShouldNotReachHere();
577  }
578}
579
580
581void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
582  arithmetic_op(code, result, left, right, false, tmp);
583}
584
585
586void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
587  arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
588}
589
590
591void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
592  arithmetic_op(code, result, left, right, is_strictfp, tmp);
593}
594
595
596void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
597  if (TwoOperandLIRForm && value != result_op) {
598    assert(count != result_op, "malformed");
599    __ move(value, result_op);
600    value = result_op;
601  }
602
603  assert(count->is_constant() || count->is_register(), "must be");
604  switch(code) {
605  case Bytecodes::_ishl:
606  case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
607  case Bytecodes::_ishr:
608  case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
609  case Bytecodes::_iushr:
610  case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
611  default: ShouldNotReachHere();
612  }
613}
614
615
616void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
617  if (TwoOperandLIRForm && left_op != result_op) {
618    assert(right_op != result_op, "malformed");
619    __ move(left_op, result_op);
620    left_op = result_op;
621  }
622
623  switch(code) {
624    case Bytecodes::_iand:
625    case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
626
627    case Bytecodes::_ior:
628    case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
629
630    case Bytecodes::_ixor:
631    case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
632
633    default: ShouldNotReachHere();
634  }
635}
636
637
638void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
639  if (!GenerateSynchronizationCode) return;
640  // for slow path, use debug info for state after successful locking
641  CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
642  __ load_stack_address_monitor(monitor_no, lock);
643  // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
644  __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
645}
646
647
648void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
649  if (!GenerateSynchronizationCode) return;
650  // setup registers
651  LIR_Opr hdr = lock;
652  lock = new_hdr;
653  CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
654  __ load_stack_address_monitor(monitor_no, lock);
655  __ unlock_object(hdr, object, lock, scratch, slow_path);
656}
657
658
659void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
660  jobject2reg_with_patching(klass_reg, klass, info);
661  // If klass is not loaded we do not know if the klass has finalizers:
662  if (UseFastNewInstance && klass->is_loaded()
663      && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
664
665    Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
666
667    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
668
669    assert(klass->is_loaded(), "must be loaded");
670    // allocate space for instance
671    assert(klass->size_helper() >= 0, "illegal instance size");
672    const int instance_size = align_object_size(klass->size_helper());
673    __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
674                       oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
675  } else {
676    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
677    __ branch(lir_cond_always, T_ILLEGAL, slow_path);
678    __ branch_destination(slow_path->continuation());
679  }
680}
681
682
683static bool is_constant_zero(Instruction* inst) {
684  IntConstant* c = inst->type()->as_IntConstant();
685  if (c) {
686    return (c->value() == 0);
687  }
688  return false;
689}
690
691
692static bool positive_constant(Instruction* inst) {
693  IntConstant* c = inst->type()->as_IntConstant();
694  if (c) {
695    return (c->value() >= 0);
696  }
697  return false;
698}
699
700
701static ciArrayKlass* as_array_klass(ciType* type) {
702  if (type != NULL && type->is_array_klass() && type->is_loaded()) {
703    return (ciArrayKlass*)type;
704  } else {
705    return NULL;
706  }
707}
708
709static Value maxvalue(IfOp* ifop) {
710  switch (ifop->cond()) {
711    case If::eql: return NULL;
712    case If::neq: return NULL;
713    case If::lss: // x <  y ? x : y
714    case If::leq: // x <= y ? x : y
715      if (ifop->x() == ifop->tval() &&
716          ifop->y() == ifop->fval()) return ifop->y();
717      return NULL;
718
719    case If::gtr: // x >  y ? y : x
720    case If::geq: // x >= y ? y : x
721      if (ifop->x() == ifop->tval() &&
722          ifop->y() == ifop->fval()) return ifop->y();
723      return NULL;
724
725  }
726}
727
728static ciType* phi_declared_type(Phi* phi) {
729  ciType* t = phi->operand_at(0)->declared_type();
730  if (t == NULL) {
731    return NULL;
732  }
733  for(int i = 1; i < phi->operand_count(); i++) {
734    if (t != phi->operand_at(i)->declared_type()) {
735      return NULL;
736    }
737  }
738  return t;
739}
740
741void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
742  Instruction* src     = x->argument_at(0);
743  Instruction* src_pos = x->argument_at(1);
744  Instruction* dst     = x->argument_at(2);
745  Instruction* dst_pos = x->argument_at(3);
746  Instruction* length  = x->argument_at(4);
747
748  // first try to identify the likely type of the arrays involved
749  ciArrayKlass* expected_type = NULL;
750  bool is_exact = false, src_objarray = false, dst_objarray = false;
751  {
752    ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
753    ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
754    Phi* phi;
755    if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
756      src_declared_type = as_array_klass(phi_declared_type(phi));
757    }
758    ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
759    ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
760    if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
761      dst_declared_type = as_array_klass(phi_declared_type(phi));
762    }
763
764    if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
765      // the types exactly match so the type is fully known
766      is_exact = true;
767      expected_type = src_exact_type;
768    } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
769      ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
770      ciArrayKlass* src_type = NULL;
771      if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
772        src_type = (ciArrayKlass*) src_exact_type;
773      } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
774        src_type = (ciArrayKlass*) src_declared_type;
775      }
776      if (src_type != NULL) {
777        if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
778          is_exact = true;
779          expected_type = dst_type;
780        }
781      }
782    }
783    // at least pass along a good guess
784    if (expected_type == NULL) expected_type = dst_exact_type;
785    if (expected_type == NULL) expected_type = src_declared_type;
786    if (expected_type == NULL) expected_type = dst_declared_type;
787
788    src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
789    dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
790  }
791
792  // if a probable array type has been identified, figure out if any
793  // of the required checks for a fast case can be elided.
794  int flags = LIR_OpArrayCopy::all_flags;
795
796  if (!src_objarray)
797    flags &= ~LIR_OpArrayCopy::src_objarray;
798  if (!dst_objarray)
799    flags &= ~LIR_OpArrayCopy::dst_objarray;
800
801  if (!x->arg_needs_null_check(0))
802    flags &= ~LIR_OpArrayCopy::src_null_check;
803  if (!x->arg_needs_null_check(2))
804    flags &= ~LIR_OpArrayCopy::dst_null_check;
805
806
807  if (expected_type != NULL) {
808    Value length_limit = NULL;
809
810    IfOp* ifop = length->as_IfOp();
811    if (ifop != NULL) {
812      // look for expressions like min(v, a.length) which ends up as
813      //   x > y ? y : x  or  x >= y ? y : x
814      if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
815          ifop->x() == ifop->fval() &&
816          ifop->y() == ifop->tval()) {
817        length_limit = ifop->y();
818      }
819    }
820
821    // try to skip null checks and range checks
822    NewArray* src_array = src->as_NewArray();
823    if (src_array != NULL) {
824      flags &= ~LIR_OpArrayCopy::src_null_check;
825      if (length_limit != NULL &&
826          src_array->length() == length_limit &&
827          is_constant_zero(src_pos)) {
828        flags &= ~LIR_OpArrayCopy::src_range_check;
829      }
830    }
831
832    NewArray* dst_array = dst->as_NewArray();
833    if (dst_array != NULL) {
834      flags &= ~LIR_OpArrayCopy::dst_null_check;
835      if (length_limit != NULL &&
836          dst_array->length() == length_limit &&
837          is_constant_zero(dst_pos)) {
838        flags &= ~LIR_OpArrayCopy::dst_range_check;
839      }
840    }
841
842    // check from incoming constant values
843    if (positive_constant(src_pos))
844      flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
845    if (positive_constant(dst_pos))
846      flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
847    if (positive_constant(length))
848      flags &= ~LIR_OpArrayCopy::length_positive_check;
849
850    // see if the range check can be elided, which might also imply
851    // that src or dst is non-null.
852    ArrayLength* al = length->as_ArrayLength();
853    if (al != NULL) {
854      if (al->array() == src) {
855        // it's the length of the source array
856        flags &= ~LIR_OpArrayCopy::length_positive_check;
857        flags &= ~LIR_OpArrayCopy::src_null_check;
858        if (is_constant_zero(src_pos))
859          flags &= ~LIR_OpArrayCopy::src_range_check;
860      }
861      if (al->array() == dst) {
862        // it's the length of the destination array
863        flags &= ~LIR_OpArrayCopy::length_positive_check;
864        flags &= ~LIR_OpArrayCopy::dst_null_check;
865        if (is_constant_zero(dst_pos))
866          flags &= ~LIR_OpArrayCopy::dst_range_check;
867      }
868    }
869    if (is_exact) {
870      flags &= ~LIR_OpArrayCopy::type_check;
871    }
872  }
873
874  IntConstant* src_int = src_pos->type()->as_IntConstant();
875  IntConstant* dst_int = dst_pos->type()->as_IntConstant();
876  if (src_int && dst_int) {
877    int s_offs = src_int->value();
878    int d_offs = dst_int->value();
879    if (src_int->value() >= dst_int->value()) {
880      flags &= ~LIR_OpArrayCopy::overlapping;
881    }
882    if (expected_type != NULL) {
883      BasicType t = expected_type->element_type()->basic_type();
884      int element_size = type2aelembytes(t);
885      if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
886          ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
887        flags &= ~LIR_OpArrayCopy::unaligned;
888      }
889    }
890  } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
891    // src and dest positions are the same, or dst is zero so assume
892    // nonoverlapping copy.
893    flags &= ~LIR_OpArrayCopy::overlapping;
894  }
895
896  if (src == dst) {
897    // moving within a single array so no type checks are needed
898    if (flags & LIR_OpArrayCopy::type_check) {
899      flags &= ~LIR_OpArrayCopy::type_check;
900    }
901  }
902  *flagsp = flags;
903  *expected_typep = (ciArrayKlass*)expected_type;
904}
905
906
907LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
908  assert(opr->is_register(), "why spill if item is not register?");
909
910  if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
911    LIR_Opr result = new_register(T_FLOAT);
912    set_vreg_flag(result, must_start_in_memory);
913    assert(opr->is_register(), "only a register can be spilled");
914    assert(opr->value_type()->is_float(), "rounding only for floats available");
915    __ roundfp(opr, LIR_OprFact::illegalOpr, result);
916    return result;
917  }
918  return opr;
919}
920
921
922LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
923  assert(type2size[t] == type2size[value->type()], "size mismatch");
924  if (!value->is_register()) {
925    // force into a register
926    LIR_Opr r = new_register(value->type());
927    __ move(value, r);
928    value = r;
929  }
930
931  // create a spill location
932  LIR_Opr tmp = new_register(t);
933  set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
934
935  // move from register to spill
936  __ move(value, tmp);
937  return tmp;
938}
939
940void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
941  if (if_instr->should_profile()) {
942    ciMethod* method = if_instr->profiled_method();
943    assert(method != NULL, "method should be set if branch is profiled");
944    ciMethodData* md = method->method_data_or_null();
945    assert(md != NULL, "Sanity");
946    ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
947    assert(data != NULL, "must have profiling data");
948    assert(data->is_BranchData(), "need BranchData for two-way branches");
949    int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
950    int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
951    if (if_instr->is_swapped()) {
952      int t = taken_count_offset;
953      taken_count_offset = not_taken_count_offset;
954      not_taken_count_offset = t;
955    }
956
957    LIR_Opr md_reg = new_register(T_OBJECT);
958    __ oop2reg(md->constant_encoding(), md_reg);
959
960    LIR_Opr data_offset_reg = new_pointer_register();
961    __ cmove(lir_cond(cond),
962             LIR_OprFact::intptrConst(taken_count_offset),
963             LIR_OprFact::intptrConst(not_taken_count_offset),
964             data_offset_reg, as_BasicType(if_instr->x()->type()));
965
966    // MDO cells are intptr_t, so the data_reg width is arch-dependent.
967    LIR_Opr data_reg = new_pointer_register();
968    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
969    __ move(data_addr, data_reg);
970    // Use leal instead of add to avoid destroying condition codes on x86
971    LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
972    __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
973    __ move(data_reg, data_addr);
974  }
975}
976
977// Phi technique:
978// This is about passing live values from one basic block to the other.
979// In code generated with Java it is rather rare that more than one
980// value is on the stack from one basic block to the other.
981// We optimize our technique for efficient passing of one value
982// (of type long, int, double..) but it can be extended.
983// When entering or leaving a basic block, all registers and all spill
984// slots are release and empty. We use the released registers
985// and spill slots to pass the live values from one block
986// to the other. The topmost value, i.e., the value on TOS of expression
987// stack is passed in registers. All other values are stored in spilling
988// area. Every Phi has an index which designates its spill slot
989// At exit of a basic block, we fill the register(s) and spill slots.
990// At entry of a basic block, the block_prolog sets up the content of phi nodes
991// and locks necessary registers and spilling slots.
992
993
994// move current value to referenced phi function
995void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
996  Phi* phi = sux_val->as_Phi();
997  // cur_val can be null without phi being null in conjunction with inlining
998  if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
999    LIR_Opr operand = cur_val->operand();
1000    if (cur_val->operand()->is_illegal()) {
1001      assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1002             "these can be produced lazily");
1003      operand = operand_for_instruction(cur_val);
1004    }
1005    resolver->move(operand, operand_for_instruction(phi));
1006  }
1007}
1008
1009
1010// Moves all stack values into their PHI position
1011void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1012  BlockBegin* bb = block();
1013  if (bb->number_of_sux() == 1) {
1014    BlockBegin* sux = bb->sux_at(0);
1015    assert(sux->number_of_preds() > 0, "invalid CFG");
1016
1017    // a block with only one predecessor never has phi functions
1018    if (sux->number_of_preds() > 1) {
1019      int max_phis = cur_state->stack_size() + cur_state->locals_size();
1020      PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1021
1022      ValueStack* sux_state = sux->state();
1023      Value sux_value;
1024      int index;
1025
1026      assert(cur_state->scope() == sux_state->scope(), "not matching");
1027      assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1028      assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1029
1030      for_each_stack_value(sux_state, index, sux_value) {
1031        move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1032      }
1033
1034      for_each_local_value(sux_state, index, sux_value) {
1035        move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1036      }
1037
1038      assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1039    }
1040  }
1041}
1042
1043
1044LIR_Opr LIRGenerator::new_register(BasicType type) {
1045  int vreg = _virtual_register_number;
1046  // add a little fudge factor for the bailout, since the bailout is
1047  // only checked periodically.  This gives a few extra registers to
1048  // hand out before we really run out, which helps us keep from
1049  // tripping over assertions.
1050  if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1051    bailout("out of virtual registers");
1052    if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1053      // wrap it around
1054      _virtual_register_number = LIR_OprDesc::vreg_base;
1055    }
1056  }
1057  _virtual_register_number += 1;
1058  return LIR_OprFact::virtual_register(vreg, type);
1059}
1060
1061
1062// Try to lock using register in hint
1063LIR_Opr LIRGenerator::rlock(Value instr) {
1064  return new_register(instr->type());
1065}
1066
1067
1068// does an rlock and sets result
1069LIR_Opr LIRGenerator::rlock_result(Value x) {
1070  LIR_Opr reg = rlock(x);
1071  set_result(x, reg);
1072  return reg;
1073}
1074
1075
1076// does an rlock and sets result
1077LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1078  LIR_Opr reg;
1079  switch (type) {
1080  case T_BYTE:
1081  case T_BOOLEAN:
1082    reg = rlock_byte(type);
1083    break;
1084  default:
1085    reg = rlock(x);
1086    break;
1087  }
1088
1089  set_result(x, reg);
1090  return reg;
1091}
1092
1093
1094//---------------------------------------------------------------------
1095ciObject* LIRGenerator::get_jobject_constant(Value value) {
1096  ObjectType* oc = value->type()->as_ObjectType();
1097  if (oc) {
1098    return oc->constant_value();
1099  }
1100  return NULL;
1101}
1102
1103
1104void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1105  assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1106  assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1107
1108  // no moves are created for phi functions at the begin of exception
1109  // handlers, so assign operands manually here
1110  for_each_phi_fun(block(), phi,
1111                   operand_for_instruction(phi));
1112
1113  LIR_Opr thread_reg = getThreadPointer();
1114  __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1115               exceptionOopOpr());
1116  __ move_wide(LIR_OprFact::oopConst(NULL),
1117               new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1118  __ move_wide(LIR_OprFact::oopConst(NULL),
1119               new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1120
1121  LIR_Opr result = new_register(T_OBJECT);
1122  __ move(exceptionOopOpr(), result);
1123  set_result(x, result);
1124}
1125
1126
1127//----------------------------------------------------------------------
1128//----------------------------------------------------------------------
1129//----------------------------------------------------------------------
1130//----------------------------------------------------------------------
1131//                        visitor functions
1132//----------------------------------------------------------------------
1133//----------------------------------------------------------------------
1134//----------------------------------------------------------------------
1135//----------------------------------------------------------------------
1136
1137void LIRGenerator::do_Phi(Phi* x) {
1138  // phi functions are never visited directly
1139  ShouldNotReachHere();
1140}
1141
1142
1143// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1144void LIRGenerator::do_Constant(Constant* x) {
1145  if (x->state_before() != NULL) {
1146    // Any constant with a ValueStack requires patching so emit the patch here
1147    LIR_Opr reg = rlock_result(x);
1148    CodeEmitInfo* info = state_for(x, x->state_before());
1149    __ oop2reg_patch(NULL, reg, info);
1150  } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1151    if (!x->is_pinned()) {
1152      // unpinned constants are handled specially so that they can be
1153      // put into registers when they are used multiple times within a
1154      // block.  After the block completes their operand will be
1155      // cleared so that other blocks can't refer to that register.
1156      set_result(x, load_constant(x));
1157    } else {
1158      LIR_Opr res = x->operand();
1159      if (!res->is_valid()) {
1160        res = LIR_OprFact::value_type(x->type());
1161      }
1162      if (res->is_constant()) {
1163        LIR_Opr reg = rlock_result(x);
1164        __ move(res, reg);
1165      } else {
1166        set_result(x, res);
1167      }
1168    }
1169  } else {
1170    set_result(x, LIR_OprFact::value_type(x->type()));
1171  }
1172}
1173
1174
1175void LIRGenerator::do_Local(Local* x) {
1176  // operand_for_instruction has the side effect of setting the result
1177  // so there's no need to do it here.
1178  operand_for_instruction(x);
1179}
1180
1181
1182void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1183  Unimplemented();
1184}
1185
1186
1187void LIRGenerator::do_Return(Return* x) {
1188  if (compilation()->env()->dtrace_method_probes()) {
1189    BasicTypeList signature;
1190    signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1191    signature.append(T_OBJECT); // methodOop
1192    LIR_OprList* args = new LIR_OprList();
1193    args->append(getThreadPointer());
1194    LIR_Opr meth = new_register(T_OBJECT);
1195    __ oop2reg(method()->constant_encoding(), meth);
1196    args->append(meth);
1197    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1198  }
1199
1200  if (x->type()->is_void()) {
1201    __ return_op(LIR_OprFact::illegalOpr);
1202  } else {
1203    LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204    LIRItem result(x->result(), this);
1205
1206    result.load_item_force(reg);
1207    __ return_op(result.result());
1208  }
1209  set_no_result(x);
1210}
1211
1212// Examble: ref.get()
1213// Combination of LoadField and g1 pre-write barrier
1214void LIRGenerator::do_Reference_get(Intrinsic* x) {
1215
1216  const int referent_offset = java_lang_ref_Reference::referent_offset;
1217  guarantee(referent_offset > 0, "referent offset not initialized");
1218
1219  assert(x->number_of_arguments() == 1, "wrong type");
1220
1221  LIRItem reference(x->argument_at(0), this);
1222  reference.load_item();
1223
1224  // need to perform the null check on the reference objecy
1225  CodeEmitInfo* info = NULL;
1226  if (x->needs_null_check()) {
1227    info = state_for(x);
1228  }
1229
1230  LIR_Address* referent_field_adr =
1231    new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1232
1233  LIR_Opr result = rlock_result(x);
1234
1235  __ load(referent_field_adr, result, info);
1236
1237  // Register the value in the referent field with the pre-barrier
1238  pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1239              result /* pre_val */,
1240              false  /* do_load */,
1241              false  /* patch */,
1242              NULL   /* info */);
1243}
1244
1245// Example: object.getClass ()
1246void LIRGenerator::do_getClass(Intrinsic* x) {
1247  assert(x->number_of_arguments() == 1, "wrong type");
1248
1249  LIRItem rcvr(x->argument_at(0), this);
1250  rcvr.load_item();
1251  LIR_Opr result = rlock_result(x);
1252
1253  // need to perform the null check on the rcvr
1254  CodeEmitInfo* info = NULL;
1255  if (x->needs_null_check()) {
1256    info = state_for(x);
1257  }
1258  __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
1259  __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1260}
1261
1262
1263// Example: Thread.currentThread()
1264void LIRGenerator::do_currentThread(Intrinsic* x) {
1265  assert(x->number_of_arguments() == 0, "wrong type");
1266  LIR_Opr reg = rlock_result(x);
1267  __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1268}
1269
1270
1271void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1272  assert(x->number_of_arguments() == 1, "wrong type");
1273  LIRItem receiver(x->argument_at(0), this);
1274
1275  receiver.load_item();
1276  BasicTypeList signature;
1277  signature.append(T_OBJECT); // receiver
1278  LIR_OprList* args = new LIR_OprList();
1279  args->append(receiver.result());
1280  CodeEmitInfo* info = state_for(x, x->state());
1281  call_runtime(&signature, args,
1282               CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1283               voidType, info);
1284
1285  set_no_result(x);
1286}
1287
1288
1289//------------------------local access--------------------------------------
1290
1291LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1292  if (x->operand()->is_illegal()) {
1293    Constant* c = x->as_Constant();
1294    if (c != NULL) {
1295      x->set_operand(LIR_OprFact::value_type(c->type()));
1296    } else {
1297      assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1298      // allocate a virtual register for this local or phi
1299      x->set_operand(rlock(x));
1300      _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1301    }
1302  }
1303  return x->operand();
1304}
1305
1306
1307Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1308  if (opr->is_virtual()) {
1309    return instruction_for_vreg(opr->vreg_number());
1310  }
1311  return NULL;
1312}
1313
1314
1315Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1316  if (reg_num < _instruction_for_operand.length()) {
1317    return _instruction_for_operand.at(reg_num);
1318  }
1319  return NULL;
1320}
1321
1322
1323void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1324  if (_vreg_flags.size_in_bits() == 0) {
1325    BitMap2D temp(100, num_vreg_flags);
1326    temp.clear();
1327    _vreg_flags = temp;
1328  }
1329  _vreg_flags.at_put_grow(vreg_num, f, true);
1330}
1331
1332bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1333  if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1334    return false;
1335  }
1336  return _vreg_flags.at(vreg_num, f);
1337}
1338
1339
1340// Block local constant handling.  This code is useful for keeping
1341// unpinned constants and constants which aren't exposed in the IR in
1342// registers.  Unpinned Constant instructions have their operands
1343// cleared when the block is finished so that other blocks can't end
1344// up referring to their registers.
1345
1346LIR_Opr LIRGenerator::load_constant(Constant* x) {
1347  assert(!x->is_pinned(), "only for unpinned constants");
1348  _unpinned_constants.append(x);
1349  return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1350}
1351
1352
1353LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1354  BasicType t = c->type();
1355  for (int i = 0; i < _constants.length(); i++) {
1356    LIR_Const* other = _constants.at(i);
1357    if (t == other->type()) {
1358      switch (t) {
1359      case T_INT:
1360      case T_FLOAT:
1361        if (c->as_jint_bits() != other->as_jint_bits()) continue;
1362        break;
1363      case T_LONG:
1364      case T_DOUBLE:
1365        if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1366        if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1367        break;
1368      case T_OBJECT:
1369        if (c->as_jobject() != other->as_jobject()) continue;
1370        break;
1371      }
1372      return _reg_for_constants.at(i);
1373    }
1374  }
1375
1376  LIR_Opr result = new_register(t);
1377  __ move((LIR_Opr)c, result);
1378  _constants.append(c);
1379  _reg_for_constants.append(result);
1380  return result;
1381}
1382
1383// Various barriers
1384
1385void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1386                               bool do_load, bool patch, CodeEmitInfo* info) {
1387  // Do the pre-write barrier, if any.
1388  switch (_bs->kind()) {
1389#ifndef SERIALGC
1390    case BarrierSet::G1SATBCT:
1391    case BarrierSet::G1SATBCTLogging:
1392      G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1393      break;
1394#endif // SERIALGC
1395    case BarrierSet::CardTableModRef:
1396    case BarrierSet::CardTableExtension:
1397      // No pre barriers
1398      break;
1399    case BarrierSet::ModRef:
1400    case BarrierSet::Other:
1401      // No pre barriers
1402      break;
1403    default      :
1404      ShouldNotReachHere();
1405
1406  }
1407}
1408
1409void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1410  switch (_bs->kind()) {
1411#ifndef SERIALGC
1412    case BarrierSet::G1SATBCT:
1413    case BarrierSet::G1SATBCTLogging:
1414      G1SATBCardTableModRef_post_barrier(addr,  new_val);
1415      break;
1416#endif // SERIALGC
1417    case BarrierSet::CardTableModRef:
1418    case BarrierSet::CardTableExtension:
1419      CardTableModRef_post_barrier(addr,  new_val);
1420      break;
1421    case BarrierSet::ModRef:
1422    case BarrierSet::Other:
1423      // No post barriers
1424      break;
1425    default      :
1426      ShouldNotReachHere();
1427    }
1428}
1429
1430////////////////////////////////////////////////////////////////////////
1431#ifndef SERIALGC
1432
1433void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1434                                                     bool do_load, bool patch, CodeEmitInfo* info) {
1435  // First we test whether marking is in progress.
1436  BasicType flag_type;
1437  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1438    flag_type = T_INT;
1439  } else {
1440    guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1441              "Assumption");
1442    flag_type = T_BYTE;
1443  }
1444  LIR_Opr thrd = getThreadPointer();
1445  LIR_Address* mark_active_flag_addr =
1446    new LIR_Address(thrd,
1447                    in_bytes(JavaThread::satb_mark_queue_offset() +
1448                             PtrQueue::byte_offset_of_active()),
1449                    flag_type);
1450  // Read the marking-in-progress flag.
1451  LIR_Opr flag_val = new_register(T_INT);
1452  __ load(mark_active_flag_addr, flag_val);
1453  __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1454
1455  LIR_PatchCode pre_val_patch_code = lir_patch_none;
1456
1457  CodeStub* slow;
1458
1459  if (do_load) {
1460    assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1461    assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1462
1463    if (patch)
1464      pre_val_patch_code = lir_patch_normal;
1465
1466    pre_val = new_register(T_OBJECT);
1467
1468    if (!addr_opr->is_address()) {
1469      assert(addr_opr->is_register(), "must be");
1470      addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1471    }
1472    slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1473  } else {
1474    assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1475    assert(pre_val->is_register(), "must be");
1476    assert(pre_val->type() == T_OBJECT, "must be an object");
1477    assert(info == NULL, "sanity");
1478
1479    slow = new G1PreBarrierStub(pre_val);
1480  }
1481
1482  __ branch(lir_cond_notEqual, T_INT, slow);
1483  __ branch_destination(slow->continuation());
1484}
1485
1486void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1487  // If the "new_val" is a constant NULL, no barrier is necessary.
1488  if (new_val->is_constant() &&
1489      new_val->as_constant_ptr()->as_jobject() == NULL) return;
1490
1491  if (!new_val->is_register()) {
1492    LIR_Opr new_val_reg = new_register(T_OBJECT);
1493    if (new_val->is_constant()) {
1494      __ move(new_val, new_val_reg);
1495    } else {
1496      __ leal(new_val, new_val_reg);
1497    }
1498    new_val = new_val_reg;
1499  }
1500  assert(new_val->is_register(), "must be a register at this point");
1501
1502  if (addr->is_address()) {
1503    LIR_Address* address = addr->as_address_ptr();
1504    LIR_Opr ptr = new_pointer_register();
1505    if (!address->index()->is_valid() && address->disp() == 0) {
1506      __ move(address->base(), ptr);
1507    } else {
1508      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1509      __ leal(addr, ptr);
1510    }
1511    addr = ptr;
1512  }
1513  assert(addr->is_register(), "must be a register at this point");
1514
1515  LIR_Opr xor_res = new_pointer_register();
1516  LIR_Opr xor_shift_res = new_pointer_register();
1517  if (TwoOperandLIRForm ) {
1518    __ move(addr, xor_res);
1519    __ logical_xor(xor_res, new_val, xor_res);
1520    __ move(xor_res, xor_shift_res);
1521    __ unsigned_shift_right(xor_shift_res,
1522                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1523                            xor_shift_res,
1524                            LIR_OprDesc::illegalOpr());
1525  } else {
1526    __ logical_xor(addr, new_val, xor_res);
1527    __ unsigned_shift_right(xor_res,
1528                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1529                            xor_shift_res,
1530                            LIR_OprDesc::illegalOpr());
1531  }
1532
1533  if (!new_val->is_register()) {
1534    LIR_Opr new_val_reg = new_register(T_OBJECT);
1535    __ leal(new_val, new_val_reg);
1536    new_val = new_val_reg;
1537  }
1538  assert(new_val->is_register(), "must be a register at this point");
1539
1540  __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1541
1542  CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1543  __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1544  __ branch_destination(slow->continuation());
1545}
1546
1547#endif // SERIALGC
1548////////////////////////////////////////////////////////////////////////
1549
1550void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1551
1552  assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1553  LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1554  if (addr->is_address()) {
1555    LIR_Address* address = addr->as_address_ptr();
1556    // ptr cannot be an object because we use this barrier for array card marks
1557    // and addr can point in the middle of an array.
1558    LIR_Opr ptr = new_pointer_register();
1559    if (!address->index()->is_valid() && address->disp() == 0) {
1560      __ move(address->base(), ptr);
1561    } else {
1562      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1563      __ leal(addr, ptr);
1564    }
1565    addr = ptr;
1566  }
1567  assert(addr->is_register(), "must be a register at this point");
1568
1569#ifdef ARM
1570  // TODO: ARM - move to platform-dependent code
1571  LIR_Opr tmp = FrameMap::R14_opr;
1572  if (VM_Version::supports_movw()) {
1573    __ move((LIR_Opr)card_table_base, tmp);
1574  } else {
1575    __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1576  }
1577
1578  CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1579  LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1580  if(((int)ct->byte_map_base & 0xff) == 0) {
1581    __ move(tmp, card_addr);
1582  } else {
1583    LIR_Opr tmp_zero = new_register(T_INT);
1584    __ move(LIR_OprFact::intConst(0), tmp_zero);
1585    __ move(tmp_zero, card_addr);
1586  }
1587#else // ARM
1588  LIR_Opr tmp = new_pointer_register();
1589  if (TwoOperandLIRForm) {
1590    __ move(addr, tmp);
1591    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1592  } else {
1593    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1594  }
1595  if (can_inline_as_constant(card_table_base)) {
1596    __ move(LIR_OprFact::intConst(0),
1597              new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1598  } else {
1599    __ move(LIR_OprFact::intConst(0),
1600              new LIR_Address(tmp, load_constant(card_table_base),
1601                              T_BYTE));
1602  }
1603#endif // ARM
1604}
1605
1606
1607//------------------------field access--------------------------------------
1608
1609// Comment copied form templateTable_i486.cpp
1610// ----------------------------------------------------------------------------
1611// Volatile variables demand their effects be made known to all CPU's in
1612// order.  Store buffers on most chips allow reads & writes to reorder; the
1613// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1614// memory barrier (i.e., it's not sufficient that the interpreter does not
1615// reorder volatile references, the hardware also must not reorder them).
1616//
1617// According to the new Java Memory Model (JMM):
1618// (1) All volatiles are serialized wrt to each other.
1619// ALSO reads & writes act as aquire & release, so:
1620// (2) A read cannot let unrelated NON-volatile memory refs that happen after
1621// the read float up to before the read.  It's OK for non-volatile memory refs
1622// that happen before the volatile read to float down below it.
1623// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1624// that happen BEFORE the write float down to after the write.  It's OK for
1625// non-volatile memory refs that happen after the volatile write to float up
1626// before it.
1627//
1628// We only put in barriers around volatile refs (they are expensive), not
1629// _between_ memory refs (that would require us to track the flavor of the
1630// previous memory refs).  Requirements (2) and (3) require some barriers
1631// before volatile stores and after volatile loads.  These nearly cover
1632// requirement (1) but miss the volatile-store-volatile-load case.  This final
1633// case is placed after volatile-stores although it could just as well go
1634// before volatile-loads.
1635
1636
1637void LIRGenerator::do_StoreField(StoreField* x) {
1638  bool needs_patching = x->needs_patching();
1639  bool is_volatile = x->field()->is_volatile();
1640  BasicType field_type = x->field_type();
1641  bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1642
1643  CodeEmitInfo* info = NULL;
1644  if (needs_patching) {
1645    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1646    info = state_for(x, x->state_before());
1647  } else if (x->needs_null_check()) {
1648    NullCheck* nc = x->explicit_null_check();
1649    if (nc == NULL) {
1650      info = state_for(x);
1651    } else {
1652      info = state_for(nc);
1653    }
1654  }
1655
1656
1657  LIRItem object(x->obj(), this);
1658  LIRItem value(x->value(),  this);
1659
1660  object.load_item();
1661
1662  if (is_volatile || needs_patching) {
1663    // load item if field is volatile (fewer special cases for volatiles)
1664    // load item if field not initialized
1665    // load item if field not constant
1666    // because of code patching we cannot inline constants
1667    if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1668      value.load_byte_item();
1669    } else  {
1670      value.load_item();
1671    }
1672  } else {
1673    value.load_for_store(field_type);
1674  }
1675
1676  set_no_result(x);
1677
1678#ifndef PRODUCT
1679  if (PrintNotLoaded && needs_patching) {
1680    tty->print_cr("   ###class not loaded at store_%s bci %d",
1681                  x->is_static() ?  "static" : "field", x->printable_bci());
1682  }
1683#endif
1684
1685  if (x->needs_null_check() &&
1686      (needs_patching ||
1687       MacroAssembler::needs_explicit_null_check(x->offset()))) {
1688    // emit an explicit null check because the offset is too large
1689    __ null_check(object.result(), new CodeEmitInfo(info));
1690  }
1691
1692  LIR_Address* address;
1693  if (needs_patching) {
1694    // we need to patch the offset in the instruction so don't allow
1695    // generate_address to try to be smart about emitting the -1.
1696    // Otherwise the patching code won't know how to find the
1697    // instruction to patch.
1698    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1699  } else {
1700    address = generate_address(object.result(), x->offset(), field_type);
1701  }
1702
1703  if (is_volatile && os::is_MP()) {
1704    __ membar_release();
1705  }
1706
1707  if (is_oop) {
1708    // Do the pre-write barrier, if any.
1709    pre_barrier(LIR_OprFact::address(address),
1710                LIR_OprFact::illegalOpr /* pre_val */,
1711                true /* do_load*/,
1712                needs_patching,
1713                (info ? new CodeEmitInfo(info) : NULL));
1714  }
1715
1716  if (is_volatile && !needs_patching) {
1717    volatile_field_store(value.result(), address, info);
1718  } else {
1719    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1720    __ store(value.result(), address, info, patch_code);
1721  }
1722
1723  if (is_oop) {
1724    // Store to object so mark the card of the header
1725    post_barrier(object.result(), value.result());
1726  }
1727
1728  if (is_volatile && os::is_MP()) {
1729    __ membar();
1730  }
1731}
1732
1733
1734void LIRGenerator::do_LoadField(LoadField* x) {
1735  bool needs_patching = x->needs_patching();
1736  bool is_volatile = x->field()->is_volatile();
1737  BasicType field_type = x->field_type();
1738
1739  CodeEmitInfo* info = NULL;
1740  if (needs_patching) {
1741    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1742    info = state_for(x, x->state_before());
1743  } else if (x->needs_null_check()) {
1744    NullCheck* nc = x->explicit_null_check();
1745    if (nc == NULL) {
1746      info = state_for(x);
1747    } else {
1748      info = state_for(nc);
1749    }
1750  }
1751
1752  LIRItem object(x->obj(), this);
1753
1754  object.load_item();
1755
1756#ifndef PRODUCT
1757  if (PrintNotLoaded && needs_patching) {
1758    tty->print_cr("   ###class not loaded at load_%s bci %d",
1759                  x->is_static() ?  "static" : "field", x->printable_bci());
1760  }
1761#endif
1762
1763  if (x->needs_null_check() &&
1764      (needs_patching ||
1765       MacroAssembler::needs_explicit_null_check(x->offset()))) {
1766    // emit an explicit null check because the offset is too large
1767    __ null_check(object.result(), new CodeEmitInfo(info));
1768  }
1769
1770  LIR_Opr reg = rlock_result(x, field_type);
1771  LIR_Address* address;
1772  if (needs_patching) {
1773    // we need to patch the offset in the instruction so don't allow
1774    // generate_address to try to be smart about emitting the -1.
1775    // Otherwise the patching code won't know how to find the
1776    // instruction to patch.
1777    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1778  } else {
1779    address = generate_address(object.result(), x->offset(), field_type);
1780  }
1781
1782  if (is_volatile && !needs_patching) {
1783    volatile_field_load(address, reg, info);
1784  } else {
1785    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1786    __ load(address, reg, info, patch_code);
1787  }
1788
1789  if (is_volatile && os::is_MP()) {
1790    __ membar_acquire();
1791  }
1792}
1793
1794
1795//------------------------java.nio.Buffer.checkIndex------------------------
1796
1797// int java.nio.Buffer.checkIndex(int)
1798void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1799  // NOTE: by the time we are in checkIndex() we are guaranteed that
1800  // the buffer is non-null (because checkIndex is package-private and
1801  // only called from within other methods in the buffer).
1802  assert(x->number_of_arguments() == 2, "wrong type");
1803  LIRItem buf  (x->argument_at(0), this);
1804  LIRItem index(x->argument_at(1), this);
1805  buf.load_item();
1806  index.load_item();
1807
1808  LIR_Opr result = rlock_result(x);
1809  if (GenerateRangeChecks) {
1810    CodeEmitInfo* info = state_for(x);
1811    CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1812    if (index.result()->is_constant()) {
1813      cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1814      __ branch(lir_cond_belowEqual, T_INT, stub);
1815    } else {
1816      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1817                  java_nio_Buffer::limit_offset(), T_INT, info);
1818      __ branch(lir_cond_aboveEqual, T_INT, stub);
1819    }
1820    __ move(index.result(), result);
1821  } else {
1822    // Just load the index into the result register
1823    __ move(index.result(), result);
1824  }
1825}
1826
1827
1828//------------------------array access--------------------------------------
1829
1830
1831void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1832  LIRItem array(x->array(), this);
1833  array.load_item();
1834  LIR_Opr reg = rlock_result(x);
1835
1836  CodeEmitInfo* info = NULL;
1837  if (x->needs_null_check()) {
1838    NullCheck* nc = x->explicit_null_check();
1839    if (nc == NULL) {
1840      info = state_for(x);
1841    } else {
1842      info = state_for(nc);
1843    }
1844  }
1845  __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1846}
1847
1848
1849void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1850  bool use_length = x->length() != NULL;
1851  LIRItem array(x->array(), this);
1852  LIRItem index(x->index(), this);
1853  LIRItem length(this);
1854  bool needs_range_check = true;
1855
1856  if (use_length) {
1857    needs_range_check = x->compute_needs_range_check();
1858    if (needs_range_check) {
1859      length.set_instruction(x->length());
1860      length.load_item();
1861    }
1862  }
1863
1864  array.load_item();
1865  if (index.is_constant() && can_inline_as_constant(x->index())) {
1866    // let it be a constant
1867    index.dont_load_item();
1868  } else {
1869    index.load_item();
1870  }
1871
1872  CodeEmitInfo* range_check_info = state_for(x);
1873  CodeEmitInfo* null_check_info = NULL;
1874  if (x->needs_null_check()) {
1875    NullCheck* nc = x->explicit_null_check();
1876    if (nc != NULL) {
1877      null_check_info = state_for(nc);
1878    } else {
1879      null_check_info = range_check_info;
1880    }
1881  }
1882
1883  // emit array address setup early so it schedules better
1884  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1885
1886  if (GenerateRangeChecks && needs_range_check) {
1887    if (use_length) {
1888      // TODO: use a (modified) version of array_range_check that does not require a
1889      //       constant length to be loaded to a register
1890      __ cmp(lir_cond_belowEqual, length.result(), index.result());
1891      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1892    } else {
1893      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1894      // The range check performs the null check, so clear it out for the load
1895      null_check_info = NULL;
1896    }
1897  }
1898
1899  __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1900}
1901
1902
1903void LIRGenerator::do_NullCheck(NullCheck* x) {
1904  if (x->can_trap()) {
1905    LIRItem value(x->obj(), this);
1906    value.load_item();
1907    CodeEmitInfo* info = state_for(x);
1908    __ null_check(value.result(), info);
1909  }
1910}
1911
1912
1913void LIRGenerator::do_Throw(Throw* x) {
1914  LIRItem exception(x->exception(), this);
1915  exception.load_item();
1916  set_no_result(x);
1917  LIR_Opr exception_opr = exception.result();
1918  CodeEmitInfo* info = state_for(x, x->state());
1919
1920#ifndef PRODUCT
1921  if (PrintC1Statistics) {
1922    increment_counter(Runtime1::throw_count_address(), T_INT);
1923  }
1924#endif
1925
1926  // check if the instruction has an xhandler in any of the nested scopes
1927  bool unwind = false;
1928  if (info->exception_handlers()->length() == 0) {
1929    // this throw is not inside an xhandler
1930    unwind = true;
1931  } else {
1932    // get some idea of the throw type
1933    bool type_is_exact = true;
1934    ciType* throw_type = x->exception()->exact_type();
1935    if (throw_type == NULL) {
1936      type_is_exact = false;
1937      throw_type = x->exception()->declared_type();
1938    }
1939    if (throw_type != NULL && throw_type->is_instance_klass()) {
1940      ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1941      unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1942    }
1943  }
1944
1945  // do null check before moving exception oop into fixed register
1946  // to avoid a fixed interval with an oop during the null check.
1947  // Use a copy of the CodeEmitInfo because debug information is
1948  // different for null_check and throw.
1949  if (GenerateCompilerNullChecks &&
1950      (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1951    // if the exception object wasn't created using new then it might be null.
1952    __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1953  }
1954
1955  if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1956    // we need to go through the exception lookup path to get JVMTI
1957    // notification done
1958    unwind = false;
1959  }
1960
1961  // move exception oop into fixed register
1962  __ move(exception_opr, exceptionOopOpr());
1963
1964  if (unwind) {
1965    __ unwind_exception(exceptionOopOpr());
1966  } else {
1967    __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1968  }
1969}
1970
1971
1972void LIRGenerator::do_RoundFP(RoundFP* x) {
1973  LIRItem input(x->input(), this);
1974  input.load_item();
1975  LIR_Opr input_opr = input.result();
1976  assert(input_opr->is_register(), "why round if value is not in a register?");
1977  assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1978  if (input_opr->is_single_fpu()) {
1979    set_result(x, round_item(input_opr)); // This code path not currently taken
1980  } else {
1981    LIR_Opr result = new_register(T_DOUBLE);
1982    set_vreg_flag(result, must_start_in_memory);
1983    __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1984    set_result(x, result);
1985  }
1986}
1987
1988void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1989  LIRItem base(x->base(), this);
1990  LIRItem idx(this);
1991
1992  base.load_item();
1993  if (x->has_index()) {
1994    idx.set_instruction(x->index());
1995    idx.load_nonconstant();
1996  }
1997
1998  LIR_Opr reg = rlock_result(x, x->basic_type());
1999
2000  int   log2_scale = 0;
2001  if (x->has_index()) {
2002    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2003    log2_scale = x->log2_scale();
2004  }
2005
2006  assert(!x->has_index() || idx.value() == x->index(), "should match");
2007
2008  LIR_Opr base_op = base.result();
2009#ifndef _LP64
2010  if (x->base()->type()->tag() == longTag) {
2011    base_op = new_register(T_INT);
2012    __ convert(Bytecodes::_l2i, base.result(), base_op);
2013  } else {
2014    assert(x->base()->type()->tag() == intTag, "must be");
2015  }
2016#endif
2017
2018  BasicType dst_type = x->basic_type();
2019  LIR_Opr index_op = idx.result();
2020
2021  LIR_Address* addr;
2022  if (index_op->is_constant()) {
2023    assert(log2_scale == 0, "must not have a scale");
2024    addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2025  } else {
2026#ifdef X86
2027#ifdef _LP64
2028    if (!index_op->is_illegal() && index_op->type() == T_INT) {
2029      LIR_Opr tmp = new_pointer_register();
2030      __ convert(Bytecodes::_i2l, index_op, tmp);
2031      index_op = tmp;
2032    }
2033#endif
2034    addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2035#elif defined(ARM)
2036    addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2037#else
2038    if (index_op->is_illegal() || log2_scale == 0) {
2039#ifdef _LP64
2040      if (!index_op->is_illegal() && index_op->type() == T_INT) {
2041        LIR_Opr tmp = new_pointer_register();
2042        __ convert(Bytecodes::_i2l, index_op, tmp);
2043        index_op = tmp;
2044      }
2045#endif
2046      addr = new LIR_Address(base_op, index_op, dst_type);
2047    } else {
2048      LIR_Opr tmp = new_pointer_register();
2049      __ shift_left(index_op, log2_scale, tmp);
2050      addr = new LIR_Address(base_op, tmp, dst_type);
2051    }
2052#endif
2053  }
2054
2055  if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2056    __ unaligned_move(addr, reg);
2057  } else {
2058    if (dst_type == T_OBJECT && x->is_wide()) {
2059      __ move_wide(addr, reg);
2060    } else {
2061      __ move(addr, reg);
2062    }
2063  }
2064}
2065
2066
2067void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2068  int  log2_scale = 0;
2069  BasicType type = x->basic_type();
2070
2071  if (x->has_index()) {
2072    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2073    log2_scale = x->log2_scale();
2074  }
2075
2076  LIRItem base(x->base(), this);
2077  LIRItem value(x->value(), this);
2078  LIRItem idx(this);
2079
2080  base.load_item();
2081  if (x->has_index()) {
2082    idx.set_instruction(x->index());
2083    idx.load_item();
2084  }
2085
2086  if (type == T_BYTE || type == T_BOOLEAN) {
2087    value.load_byte_item();
2088  } else {
2089    value.load_item();
2090  }
2091
2092  set_no_result(x);
2093
2094  LIR_Opr base_op = base.result();
2095#ifndef _LP64
2096  if (x->base()->type()->tag() == longTag) {
2097    base_op = new_register(T_INT);
2098    __ convert(Bytecodes::_l2i, base.result(), base_op);
2099  } else {
2100    assert(x->base()->type()->tag() == intTag, "must be");
2101  }
2102#endif
2103
2104  LIR_Opr index_op = idx.result();
2105  if (log2_scale != 0) {
2106    // temporary fix (platform dependent code without shift on Intel would be better)
2107    index_op = new_pointer_register();
2108#ifdef _LP64
2109    if(idx.result()->type() == T_INT) {
2110      __ convert(Bytecodes::_i2l, idx.result(), index_op);
2111    } else {
2112#endif
2113      // TODO: ARM also allows embedded shift in the address
2114      __ move(idx.result(), index_op);
2115#ifdef _LP64
2116    }
2117#endif
2118    __ shift_left(index_op, log2_scale, index_op);
2119  }
2120#ifdef _LP64
2121  else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2122    LIR_Opr tmp = new_pointer_register();
2123    __ convert(Bytecodes::_i2l, index_op, tmp);
2124    index_op = tmp;
2125  }
2126#endif
2127
2128  LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2129  __ move(value.result(), addr);
2130}
2131
2132
2133void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2134  BasicType type = x->basic_type();
2135  LIRItem src(x->object(), this);
2136  LIRItem off(x->offset(), this);
2137
2138  off.load_item();
2139  src.load_item();
2140
2141  LIR_Opr reg = rlock_result(x, x->basic_type());
2142
2143  get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
2144
2145#ifndef SERIALGC
2146  // We might be reading the value of the referent field of a
2147  // Reference object in order to attach it back to the live
2148  // object graph. If G1 is enabled then we need to record
2149  // the value that is being returned in an SATB log buffer.
2150  //
2151  // We need to generate code similar to the following...
2152  //
2153  // if (offset == java_lang_ref_Reference::referent_offset) {
2154  //   if (src != NULL) {
2155  //     if (klass(src)->reference_type() != REF_NONE) {
2156  //       pre_barrier(..., reg, ...);
2157  //     }
2158  //   }
2159  // }
2160  //
2161  // The first non-constant check of either the offset or
2162  // the src operand will be done here; the remainder
2163  // will take place in the generated code stub.
2164
2165  if (UseG1GC && type == T_OBJECT) {
2166    bool gen_code_stub = true;       // Assume we need to generate the slow code stub.
2167    bool gen_offset_check = true;       // Assume the code stub has to generate the offset guard.
2168    bool gen_source_check = true;       // Assume the code stub has to check the src object for null.
2169
2170    if (off.is_constant()) {
2171      jlong off_con = (off.type()->is_int() ?
2172                        (jlong) off.get_jint_constant() :
2173                        off.get_jlong_constant());
2174
2175
2176      if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2177        // The constant offset is something other than referent_offset.
2178        // We can skip generating/checking the remaining guards and
2179        // skip generation of the code stub.
2180        gen_code_stub = false;
2181      } else {
2182        // The constant offset is the same as referent_offset -
2183        // we do not need to generate a runtime offset check.
2184        gen_offset_check = false;
2185      }
2186    }
2187
2188    // We don't need to generate stub if the source object is an array
2189    if (gen_code_stub && src.type()->is_array()) {
2190      gen_code_stub = false;
2191    }
2192
2193    if (gen_code_stub) {
2194      // We still need to continue with the checks.
2195      if (src.is_constant()) {
2196        ciObject* src_con = src.get_jobject_constant();
2197
2198        if (src_con->is_null_object()) {
2199          // The constant src object is null - We can skip
2200          // generating the code stub.
2201          gen_code_stub = false;
2202        } else {
2203          // Non-null constant source object. We still have to generate
2204          // the slow stub - but we don't need to generate the runtime
2205          // null object check.
2206          gen_source_check = false;
2207        }
2208      }
2209    }
2210
2211    if (gen_code_stub) {
2212      // Temoraries.
2213      LIR_Opr src_klass = new_register(T_OBJECT);
2214
2215      // Get the thread pointer for the pre-barrier
2216      LIR_Opr thread = getThreadPointer();
2217
2218      CodeStub* stub;
2219
2220      // We can have generate one runtime check here. Let's start with
2221      // the offset check.
2222      if (gen_offset_check) {
2223        // if (offset == referent_offset) -> slow code stub
2224        // If offset is an int then we can do the comparison with the
2225        // referent_offset constant; otherwise we need to move
2226        // referent_offset into a temporary register and generate
2227        // a reg-reg compare.
2228
2229        LIR_Opr referent_off;
2230
2231        if (off.type()->is_int()) {
2232          referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2233        } else {
2234          assert(off.type()->is_long(), "what else?");
2235          referent_off = new_register(T_LONG);
2236          __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2237        }
2238
2239        __ cmp(lir_cond_equal, off.result(), referent_off);
2240
2241        // Optionally generate "src == null" check.
2242        stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
2243                                                    src_klass, thread,
2244                                                    gen_source_check);
2245
2246        __ branch(lir_cond_equal, as_BasicType(off.type()), stub);
2247      } else {
2248        if (gen_source_check) {
2249          // offset is a const and equals referent offset
2250          // if (source != null) -> slow code stub
2251          __ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
2252
2253          // Since we are generating the "if src == null" guard here,
2254          // there is no need to generate the "src == null" check again.
2255          stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
2256                                                    src_klass, thread,
2257                                                    false);
2258
2259          __ branch(lir_cond_notEqual, T_OBJECT, stub);
2260        } else {
2261          // We have statically determined that offset == referent_offset
2262          // && src != null so we unconditionally branch to code stub
2263          // to perform the guards and record reg in the SATB log buffer.
2264
2265          stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
2266                                                    src_klass, thread,
2267                                                    false);
2268
2269          __ branch(lir_cond_always, T_ILLEGAL, stub);
2270        }
2271      }
2272
2273      // Continuation point
2274      __ branch_destination(stub->continuation());
2275    }
2276  }
2277#endif // SERIALGC
2278
2279  if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2280}
2281
2282
2283void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2284  BasicType type = x->basic_type();
2285  LIRItem src(x->object(), this);
2286  LIRItem off(x->offset(), this);
2287  LIRItem data(x->value(), this);
2288
2289  src.load_item();
2290  if (type == T_BOOLEAN || type == T_BYTE) {
2291    data.load_byte_item();
2292  } else {
2293    data.load_item();
2294  }
2295  off.load_item();
2296
2297  set_no_result(x);
2298
2299  if (x->is_volatile() && os::is_MP()) __ membar_release();
2300  put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2301  if (x->is_volatile() && os::is_MP()) __ membar();
2302}
2303
2304
2305void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2306  LIRItem src(x->object(), this);
2307  LIRItem off(x->offset(), this);
2308
2309  src.load_item();
2310  if (off.is_constant() && can_inline_as_constant(x->offset())) {
2311    // let it be a constant
2312    off.dont_load_item();
2313  } else {
2314    off.load_item();
2315  }
2316
2317  set_no_result(x);
2318
2319  LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2320  __ prefetch(addr, is_store);
2321}
2322
2323
2324void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2325  do_UnsafePrefetch(x, false);
2326}
2327
2328
2329void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2330  do_UnsafePrefetch(x, true);
2331}
2332
2333
2334void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2335  int lng = x->length();
2336
2337  for (int i = 0; i < lng; i++) {
2338    SwitchRange* one_range = x->at(i);
2339    int low_key = one_range->low_key();
2340    int high_key = one_range->high_key();
2341    BlockBegin* dest = one_range->sux();
2342    if (low_key == high_key) {
2343      __ cmp(lir_cond_equal, value, low_key);
2344      __ branch(lir_cond_equal, T_INT, dest);
2345    } else if (high_key - low_key == 1) {
2346      __ cmp(lir_cond_equal, value, low_key);
2347      __ branch(lir_cond_equal, T_INT, dest);
2348      __ cmp(lir_cond_equal, value, high_key);
2349      __ branch(lir_cond_equal, T_INT, dest);
2350    } else {
2351      LabelObj* L = new LabelObj();
2352      __ cmp(lir_cond_less, value, low_key);
2353      __ branch(lir_cond_less, T_INT, L->label());
2354      __ cmp(lir_cond_lessEqual, value, high_key);
2355      __ branch(lir_cond_lessEqual, T_INT, dest);
2356      __ branch_destination(L->label());
2357    }
2358  }
2359  __ jump(default_sux);
2360}
2361
2362
2363SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2364  SwitchRangeList* res = new SwitchRangeList();
2365  int len = x->length();
2366  if (len > 0) {
2367    BlockBegin* sux = x->sux_at(0);
2368    int key = x->lo_key();
2369    BlockBegin* default_sux = x->default_sux();
2370    SwitchRange* range = new SwitchRange(key, sux);
2371    for (int i = 0; i < len; i++, key++) {
2372      BlockBegin* new_sux = x->sux_at(i);
2373      if (sux == new_sux) {
2374        // still in same range
2375        range->set_high_key(key);
2376      } else {
2377        // skip tests which explicitly dispatch to the default
2378        if (sux != default_sux) {
2379          res->append(range);
2380        }
2381        range = new SwitchRange(key, new_sux);
2382      }
2383      sux = new_sux;
2384    }
2385    if (res->length() == 0 || res->last() != range)  res->append(range);
2386  }
2387  return res;
2388}
2389
2390
2391// we expect the keys to be sorted by increasing value
2392SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2393  SwitchRangeList* res = new SwitchRangeList();
2394  int len = x->length();
2395  if (len > 0) {
2396    BlockBegin* default_sux = x->default_sux();
2397    int key = x->key_at(0);
2398    BlockBegin* sux = x->sux_at(0);
2399    SwitchRange* range = new SwitchRange(key, sux);
2400    for (int i = 1; i < len; i++) {
2401      int new_key = x->key_at(i);
2402      BlockBegin* new_sux = x->sux_at(i);
2403      if (key+1 == new_key && sux == new_sux) {
2404        // still in same range
2405        range->set_high_key(new_key);
2406      } else {
2407        // skip tests which explicitly dispatch to the default
2408        if (range->sux() != default_sux) {
2409          res->append(range);
2410        }
2411        range = new SwitchRange(new_key, new_sux);
2412      }
2413      key = new_key;
2414      sux = new_sux;
2415    }
2416    if (res->length() == 0 || res->last() != range)  res->append(range);
2417  }
2418  return res;
2419}
2420
2421
2422void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2423  LIRItem tag(x->tag(), this);
2424  tag.load_item();
2425  set_no_result(x);
2426
2427  if (x->is_safepoint()) {
2428    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2429  }
2430
2431  // move values into phi locations
2432  move_to_phi(x->state());
2433
2434  int lo_key = x->lo_key();
2435  int hi_key = x->hi_key();
2436  int len = x->length();
2437  LIR_Opr value = tag.result();
2438  if (UseTableRanges) {
2439    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2440  } else {
2441    for (int i = 0; i < len; i++) {
2442      __ cmp(lir_cond_equal, value, i + lo_key);
2443      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2444    }
2445    __ jump(x->default_sux());
2446  }
2447}
2448
2449
2450void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2451  LIRItem tag(x->tag(), this);
2452  tag.load_item();
2453  set_no_result(x);
2454
2455  if (x->is_safepoint()) {
2456    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2457  }
2458
2459  // move values into phi locations
2460  move_to_phi(x->state());
2461
2462  LIR_Opr value = tag.result();
2463  if (UseTableRanges) {
2464    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2465  } else {
2466    int len = x->length();
2467    for (int i = 0; i < len; i++) {
2468      __ cmp(lir_cond_equal, value, x->key_at(i));
2469      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2470    }
2471    __ jump(x->default_sux());
2472  }
2473}
2474
2475
2476void LIRGenerator::do_Goto(Goto* x) {
2477  set_no_result(x);
2478
2479  if (block()->next()->as_OsrEntry()) {
2480    // need to free up storage used for OSR entry point
2481    LIR_Opr osrBuffer = block()->next()->operand();
2482    BasicTypeList signature;
2483    signature.append(T_INT);
2484    CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2485    __ move(osrBuffer, cc->args()->at(0));
2486    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2487                         getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2488  }
2489
2490  if (x->is_safepoint()) {
2491    ValueStack* state = x->state_before() ? x->state_before() : x->state();
2492
2493    // increment backedge counter if needed
2494    CodeEmitInfo* info = state_for(x, state);
2495    increment_backedge_counter(info, x->profiled_bci());
2496    CodeEmitInfo* safepoint_info = state_for(x, state);
2497    __ safepoint(safepoint_poll_register(), safepoint_info);
2498  }
2499
2500  // Gotos can be folded Ifs, handle this case.
2501  if (x->should_profile()) {
2502    ciMethod* method = x->profiled_method();
2503    assert(method != NULL, "method should be set if branch is profiled");
2504    ciMethodData* md = method->method_data_or_null();
2505    assert(md != NULL, "Sanity");
2506    ciProfileData* data = md->bci_to_data(x->profiled_bci());
2507    assert(data != NULL, "must have profiling data");
2508    int offset;
2509    if (x->direction() == Goto::taken) {
2510      assert(data->is_BranchData(), "need BranchData for two-way branches");
2511      offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2512    } else if (x->direction() == Goto::not_taken) {
2513      assert(data->is_BranchData(), "need BranchData for two-way branches");
2514      offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2515    } else {
2516      assert(data->is_JumpData(), "need JumpData for branches");
2517      offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2518    }
2519    LIR_Opr md_reg = new_register(T_OBJECT);
2520    __ oop2reg(md->constant_encoding(), md_reg);
2521
2522    increment_counter(new LIR_Address(md_reg, offset,
2523                                      NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2524  }
2525
2526  // emit phi-instruction move after safepoint since this simplifies
2527  // describing the state as the safepoint.
2528  move_to_phi(x->state());
2529
2530  __ jump(x->default_sux());
2531}
2532
2533
2534void LIRGenerator::do_Base(Base* x) {
2535  __ std_entry(LIR_OprFact::illegalOpr);
2536  // Emit moves from physical registers / stack slots to virtual registers
2537  CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2538  IRScope* irScope = compilation()->hir()->top_scope();
2539  int java_index = 0;
2540  for (int i = 0; i < args->length(); i++) {
2541    LIR_Opr src = args->at(i);
2542    assert(!src->is_illegal(), "check");
2543    BasicType t = src->type();
2544
2545    // Types which are smaller than int are passed as int, so
2546    // correct the type which passed.
2547    switch (t) {
2548    case T_BYTE:
2549    case T_BOOLEAN:
2550    case T_SHORT:
2551    case T_CHAR:
2552      t = T_INT;
2553      break;
2554    }
2555
2556    LIR_Opr dest = new_register(t);
2557    __ move(src, dest);
2558
2559    // Assign new location to Local instruction for this local
2560    Local* local = x->state()->local_at(java_index)->as_Local();
2561    assert(local != NULL, "Locals for incoming arguments must have been created");
2562#ifndef __SOFTFP__
2563    // The java calling convention passes double as long and float as int.
2564    assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2565#endif // __SOFTFP__
2566    local->set_operand(dest);
2567    _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2568    java_index += type2size[t];
2569  }
2570
2571  if (compilation()->env()->dtrace_method_probes()) {
2572    BasicTypeList signature;
2573    signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2574    signature.append(T_OBJECT); // methodOop
2575    LIR_OprList* args = new LIR_OprList();
2576    args->append(getThreadPointer());
2577    LIR_Opr meth = new_register(T_OBJECT);
2578    __ oop2reg(method()->constant_encoding(), meth);
2579    args->append(meth);
2580    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2581  }
2582
2583  if (method()->is_synchronized()) {
2584    LIR_Opr obj;
2585    if (method()->is_static()) {
2586      obj = new_register(T_OBJECT);
2587      __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2588    } else {
2589      Local* receiver = x->state()->local_at(0)->as_Local();
2590      assert(receiver != NULL, "must already exist");
2591      obj = receiver->operand();
2592    }
2593    assert(obj->is_valid(), "must be valid");
2594
2595    if (method()->is_synchronized() && GenerateSynchronizationCode) {
2596      LIR_Opr lock = new_register(T_INT);
2597      __ load_stack_address_monitor(0, lock);
2598
2599      CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2600      CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2601
2602      // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2603      __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2604    }
2605  }
2606
2607  // increment invocation counters if needed
2608  if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2609    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2610    increment_invocation_counter(info);
2611  }
2612
2613  // all blocks with a successor must end with an unconditional jump
2614  // to the successor even if they are consecutive
2615  __ jump(x->default_sux());
2616}
2617
2618
2619void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2620  // construct our frame and model the production of incoming pointer
2621  // to the OSR buffer.
2622  __ osr_entry(LIR_Assembler::osrBufferPointer());
2623  LIR_Opr result = rlock_result(x);
2624  __ move(LIR_Assembler::osrBufferPointer(), result);
2625}
2626
2627
2628void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2629  int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
2630  for (; i < args->length(); i++) {
2631    LIRItem* param = args->at(i);
2632    LIR_Opr loc = arg_list->at(i);
2633    if (loc->is_register()) {
2634      param->load_item_force(loc);
2635    } else {
2636      LIR_Address* addr = loc->as_address_ptr();
2637      param->load_for_store(addr->type());
2638      if (addr->type() == T_OBJECT) {
2639        __ move_wide(param->result(), addr);
2640      } else
2641        if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2642          __ unaligned_move(param->result(), addr);
2643        } else {
2644          __ move(param->result(), addr);
2645        }
2646    }
2647  }
2648
2649  if (x->has_receiver()) {
2650    LIRItem* receiver = args->at(0);
2651    LIR_Opr loc = arg_list->at(0);
2652    if (loc->is_register()) {
2653      receiver->load_item_force(loc);
2654    } else {
2655      assert(loc->is_address(), "just checking");
2656      receiver->load_for_store(T_OBJECT);
2657      __ move_wide(receiver->result(), loc->as_address_ptr());
2658    }
2659  }
2660}
2661
2662
2663// Visits all arguments, returns appropriate items without loading them
2664LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2665  LIRItemList* argument_items = new LIRItemList();
2666  if (x->has_receiver()) {
2667    LIRItem* receiver = new LIRItem(x->receiver(), this);
2668    argument_items->append(receiver);
2669  }
2670  if (x->is_invokedynamic()) {
2671    // Insert a dummy for the synthetic MethodHandle argument.
2672    argument_items->append(NULL);
2673  }
2674  int idx = x->has_receiver() ? 1 : 0;
2675  for (int i = 0; i < x->number_of_arguments(); i++) {
2676    LIRItem* param = new LIRItem(x->argument_at(i), this);
2677    argument_items->append(param);
2678    idx += (param->type()->is_double_word() ? 2 : 1);
2679  }
2680  return argument_items;
2681}
2682
2683
2684// The invoke with receiver has following phases:
2685//   a) traverse and load/lock receiver;
2686//   b) traverse all arguments -> item-array (invoke_visit_argument)
2687//   c) push receiver on stack
2688//   d) load each of the items and push on stack
2689//   e) unlock receiver
2690//   f) move receiver into receiver-register %o0
2691//   g) lock result registers and emit call operation
2692//
2693// Before issuing a call, we must spill-save all values on stack
2694// that are in caller-save register. "spill-save" moves thos registers
2695// either in a free callee-save register or spills them if no free
2696// callee save register is available.
2697//
2698// The problem is where to invoke spill-save.
2699// - if invoked between e) and f), we may lock callee save
2700//   register in "spill-save" that destroys the receiver register
2701//   before f) is executed
2702// - if we rearange the f) to be earlier, by loading %o0, it
2703//   may destroy a value on the stack that is currently in %o0
2704//   and is waiting to be spilled
2705// - if we keep the receiver locked while doing spill-save,
2706//   we cannot spill it as it is spill-locked
2707//
2708void LIRGenerator::do_Invoke(Invoke* x) {
2709  CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2710
2711  LIR_OprList* arg_list = cc->args();
2712  LIRItemList* args = invoke_visit_arguments(x);
2713  LIR_Opr receiver = LIR_OprFact::illegalOpr;
2714
2715  // setup result register
2716  LIR_Opr result_register = LIR_OprFact::illegalOpr;
2717  if (x->type() != voidType) {
2718    result_register = result_register_for(x->type());
2719  }
2720
2721  CodeEmitInfo* info = state_for(x, x->state());
2722
2723  // invokedynamics can deoptimize.
2724  CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2725
2726  invoke_load_arguments(x, args, arg_list);
2727
2728  if (x->has_receiver()) {
2729    args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2730    receiver = args->at(0)->result();
2731  }
2732
2733  // emit invoke code
2734  bool optimized = x->target_is_loaded() && x->target_is_final();
2735  assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2736
2737  // JSR 292
2738  // Preserve the SP over MethodHandle call sites.
2739  ciMethod* target = x->target();
2740  if (target->is_method_handle_invoke()) {
2741    info->set_is_method_handle_invoke(true);
2742    __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2743  }
2744
2745  switch (x->code()) {
2746    case Bytecodes::_invokestatic:
2747      __ call_static(target, result_register,
2748                     SharedRuntime::get_resolve_static_call_stub(),
2749                     arg_list, info);
2750      break;
2751    case Bytecodes::_invokespecial:
2752    case Bytecodes::_invokevirtual:
2753    case Bytecodes::_invokeinterface:
2754      // for final target we still produce an inline cache, in order
2755      // to be able to call mixed mode
2756      if (x->code() == Bytecodes::_invokespecial || optimized) {
2757        __ call_opt_virtual(target, receiver, result_register,
2758                            SharedRuntime::get_resolve_opt_virtual_call_stub(),
2759                            arg_list, info);
2760      } else if (x->vtable_index() < 0) {
2761        __ call_icvirtual(target, receiver, result_register,
2762                          SharedRuntime::get_resolve_virtual_call_stub(),
2763                          arg_list, info);
2764      } else {
2765        int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2766        int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2767        __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2768      }
2769      break;
2770    case Bytecodes::_invokedynamic: {
2771      ciBytecodeStream bcs(x->scope()->method());
2772      bcs.force_bci(x->state()->bci());
2773      assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2774      ciCPCache* cpcache = bcs.get_cpcache();
2775
2776      // Get CallSite offset from constant pool cache pointer.
2777      int index = bcs.get_method_index();
2778      size_t call_site_offset = cpcache->get_f1_offset(index);
2779
2780      // If this invokedynamic call site hasn't been executed yet in
2781      // the interpreter, the CallSite object in the constant pool
2782      // cache is still null and we need to deoptimize.
2783      if (cpcache->is_f1_null_at(index)) {
2784        // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2785        // clone all handlers.  This is handled transparently in other
2786        // places by the CodeEmitInfo cloning logic but is handled
2787        // specially here because a stub isn't being used.
2788        x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2789
2790        DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2791        __ jump(deopt_stub);
2792      }
2793
2794      // Use the receiver register for the synthetic MethodHandle
2795      // argument.
2796      receiver = LIR_Assembler::receiverOpr();
2797      LIR_Opr tmp = new_register(objectType);
2798
2799      // Load CallSite object from constant pool cache.
2800      __ oop2reg(cpcache->constant_encoding(), tmp);
2801      __ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2802
2803      // Load target MethodHandle from CallSite object.
2804      __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2805
2806      __ call_dynamic(target, receiver, result_register,
2807                      SharedRuntime::get_resolve_opt_virtual_call_stub(),
2808                      arg_list, info);
2809      break;
2810    }
2811    default:
2812      ShouldNotReachHere();
2813      break;
2814  }
2815
2816  // JSR 292
2817  // Restore the SP after MethodHandle call sites.
2818  if (target->is_method_handle_invoke()) {
2819    __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2820  }
2821
2822  if (x->type()->is_float() || x->type()->is_double()) {
2823    // Force rounding of results from non-strictfp when in strictfp
2824    // scope (or when we don't know the strictness of the callee, to
2825    // be safe.)
2826    if (method()->is_strict()) {
2827      if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2828        result_register = round_item(result_register);
2829      }
2830    }
2831  }
2832
2833  if (result_register->is_valid()) {
2834    LIR_Opr result = rlock_result(x);
2835    __ move(result_register, result);
2836  }
2837}
2838
2839
2840void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2841  assert(x->number_of_arguments() == 1, "wrong type");
2842  LIRItem value       (x->argument_at(0), this);
2843  LIR_Opr reg = rlock_result(x);
2844  value.load_item();
2845  LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2846  __ move(tmp, reg);
2847}
2848
2849
2850
2851// Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2852void LIRGenerator::do_IfOp(IfOp* x) {
2853#ifdef ASSERT
2854  {
2855    ValueTag xtag = x->x()->type()->tag();
2856    ValueTag ttag = x->tval()->type()->tag();
2857    assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2858    assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2859    assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2860  }
2861#endif
2862
2863  LIRItem left(x->x(), this);
2864  LIRItem right(x->y(), this);
2865  left.load_item();
2866  if (can_inline_as_constant(right.value())) {
2867    right.dont_load_item();
2868  } else {
2869    right.load_item();
2870  }
2871
2872  LIRItem t_val(x->tval(), this);
2873  LIRItem f_val(x->fval(), this);
2874  t_val.dont_load_item();
2875  f_val.dont_load_item();
2876  LIR_Opr reg = rlock_result(x);
2877
2878  __ cmp(lir_cond(x->cond()), left.result(), right.result());
2879  __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2880}
2881
2882void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
2883    assert(x->number_of_arguments() == expected_arguments, "wrong type");
2884    LIR_Opr reg = result_register_for(x->type());
2885    __ call_runtime_leaf(routine, getThreadTemp(),
2886                         reg, new LIR_OprList());
2887    LIR_Opr result = rlock_result(x);
2888    __ move(reg, result);
2889}
2890
2891#ifdef TRACE_HAVE_INTRINSICS
2892void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
2893    LIR_Opr thread = getThreadPointer();
2894    LIR_Opr osthread = new_pointer_register();
2895    __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
2896    size_t thread_id_size = OSThread::thread_id_size();
2897    if (thread_id_size == (size_t) BytesPerLong) {
2898      LIR_Opr id = new_register(T_LONG);
2899      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
2900      __ convert(Bytecodes::_l2i, id, rlock_result(x));
2901    } else if (thread_id_size == (size_t) BytesPerInt) {
2902      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
2903    } else {
2904      ShouldNotReachHere();
2905    }
2906}
2907
2908void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2909    CodeEmitInfo* info = state_for(x);
2910    CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2911    assert(info != NULL, "must have info");
2912    LIRItem arg(x->argument_at(1), this);
2913    arg.load_item();
2914    LIR_Opr klass = new_register(T_OBJECT);
2915    __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_OBJECT), klass, info);
2916    LIR_Opr id = new_register(T_LONG);
2917    ByteSize offset = TRACE_ID_OFFSET;
2918    LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2919    __ move(trace_id_addr, id);
2920    __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2921    __ store(id, trace_id_addr);
2922    __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
2923    __ move(id, rlock_result(x));
2924}
2925#endif
2926
2927void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2928  switch (x->id()) {
2929  case vmIntrinsics::_intBitsToFloat      :
2930  case vmIntrinsics::_doubleToRawLongBits :
2931  case vmIntrinsics::_longBitsToDouble    :
2932  case vmIntrinsics::_floatToRawIntBits   : {
2933    do_FPIntrinsics(x);
2934    break;
2935  }
2936
2937#ifdef TRACE_HAVE_INTRINSICS
2938  case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
2939  case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
2940  case vmIntrinsics::_counterTime:
2941    do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
2942    break;
2943#endif
2944
2945  case vmIntrinsics::_currentTimeMillis:
2946    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
2947    break;
2948
2949  case vmIntrinsics::_nanoTime:
2950    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
2951    break;
2952
2953  case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
2954  case vmIntrinsics::_getClass:       do_getClass(x);      break;
2955  case vmIntrinsics::_currentThread:  do_currentThread(x); break;
2956
2957  case vmIntrinsics::_dlog:           // fall through
2958  case vmIntrinsics::_dlog10:         // fall through
2959  case vmIntrinsics::_dabs:           // fall through
2960  case vmIntrinsics::_dsqrt:          // fall through
2961  case vmIntrinsics::_dtan:           // fall through
2962  case vmIntrinsics::_dsin :          // fall through
2963  case vmIntrinsics::_dcos :          do_MathIntrinsic(x); break;
2964  case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
2965
2966  // java.nio.Buffer.checkIndex
2967  case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
2968
2969  case vmIntrinsics::_compareAndSwapObject:
2970    do_CompareAndSwap(x, objectType);
2971    break;
2972  case vmIntrinsics::_compareAndSwapInt:
2973    do_CompareAndSwap(x, intType);
2974    break;
2975  case vmIntrinsics::_compareAndSwapLong:
2976    do_CompareAndSwap(x, longType);
2977    break;
2978
2979    // sun.misc.AtomicLongCSImpl.attemptUpdate
2980  case vmIntrinsics::_attemptUpdate:
2981    do_AttemptUpdate(x);
2982    break;
2983
2984  case vmIntrinsics::_Reference_get:
2985    do_Reference_get(x);
2986    break;
2987
2988  default: ShouldNotReachHere(); break;
2989  }
2990}
2991
2992void LIRGenerator::do_ProfileCall(ProfileCall* x) {
2993  // Need recv in a temporary register so it interferes with the other temporaries
2994  LIR_Opr recv = LIR_OprFact::illegalOpr;
2995  LIR_Opr mdo = new_register(T_OBJECT);
2996  // tmp is used to hold the counters on SPARC
2997  LIR_Opr tmp = new_pointer_register();
2998  if (x->recv() != NULL) {
2999    LIRItem value(x->recv(), this);
3000    value.load_item();
3001    recv = new_register(T_OBJECT);
3002    __ move(value.result(), recv);
3003  }
3004  __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
3005}
3006
3007void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3008  // We can safely ignore accessors here, since c2 will inline them anyway,
3009  // accessors are also always mature.
3010  if (!x->inlinee()->is_accessor()) {
3011    CodeEmitInfo* info = state_for(x, x->state(), true);
3012    // Notify the runtime very infrequently only to take care of counter overflows
3013    increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3014  }
3015}
3016
3017void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3018  int freq_log;
3019  int level = compilation()->env()->comp_level();
3020  if (level == CompLevel_limited_profile) {
3021    freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3022  } else if (level == CompLevel_full_profile) {
3023    freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3024  } else {
3025    ShouldNotReachHere();
3026  }
3027  // Increment the appropriate invocation/backedge counter and notify the runtime.
3028  increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3029}
3030
3031void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3032                                                ciMethod *method, int frequency,
3033                                                int bci, bool backedge, bool notify) {
3034  assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3035  int level = _compilation->env()->comp_level();
3036  assert(level > CompLevel_simple, "Shouldn't be here");
3037
3038  int offset = -1;
3039  LIR_Opr counter_holder = new_register(T_OBJECT);
3040  LIR_Opr meth;
3041  if (level == CompLevel_limited_profile) {
3042    offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
3043                                 methodOopDesc::invocation_counter_offset());
3044    __ oop2reg(method->constant_encoding(), counter_holder);
3045    meth = counter_holder;
3046  } else if (level == CompLevel_full_profile) {
3047    offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
3048                                 methodDataOopDesc::invocation_counter_offset());
3049    ciMethodData* md = method->method_data_or_null();
3050    assert(md != NULL, "Sanity");
3051    __ oop2reg(md->constant_encoding(), counter_holder);
3052    meth = new_register(T_OBJECT);
3053    __ oop2reg(method->constant_encoding(), meth);
3054  } else {
3055    ShouldNotReachHere();
3056  }
3057  LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3058  LIR_Opr result = new_register(T_INT);
3059  __ load(counter, result);
3060  __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3061  __ store(result, counter);
3062  if (notify) {
3063    LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3064    __ logical_and(result, mask, result);
3065    __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3066    // The bci for info can point to cmp for if's we want the if bci
3067    CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3068    __ branch(lir_cond_equal, T_INT, overflow);
3069    __ branch_destination(overflow->continuation());
3070  }
3071}
3072
3073void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3074  LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3075  BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3076
3077  if (x->pass_thread()) {
3078    signature->append(T_ADDRESS);
3079    args->append(getThreadPointer());
3080  }
3081
3082  for (int i = 0; i < x->number_of_arguments(); i++) {
3083    Value a = x->argument_at(i);
3084    LIRItem* item = new LIRItem(a, this);
3085    item->load_item();
3086    args->append(item->result());
3087    signature->append(as_BasicType(a->type()));
3088  }
3089
3090  LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3091  if (x->type() == voidType) {
3092    set_no_result(x);
3093  } else {
3094    __ move(result, rlock_result(x));
3095  }
3096}
3097
3098LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3099  LIRItemList args(1);
3100  LIRItem value(arg1, this);
3101  args.append(&value);
3102  BasicTypeList signature;
3103  signature.append(as_BasicType(arg1->type()));
3104
3105  return call_runtime(&signature, &args, entry, result_type, info);
3106}
3107
3108
3109LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3110  LIRItemList args(2);
3111  LIRItem value1(arg1, this);
3112  LIRItem value2(arg2, this);
3113  args.append(&value1);
3114  args.append(&value2);
3115  BasicTypeList signature;
3116  signature.append(as_BasicType(arg1->type()));
3117  signature.append(as_BasicType(arg2->type()));
3118
3119  return call_runtime(&signature, &args, entry, result_type, info);
3120}
3121
3122
3123LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3124                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
3125  // get a result register
3126  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3127  LIR_Opr result = LIR_OprFact::illegalOpr;
3128  if (result_type->tag() != voidTag) {
3129    result = new_register(result_type);
3130    phys_reg = result_register_for(result_type);
3131  }
3132
3133  // move the arguments into the correct location
3134  CallingConvention* cc = frame_map()->c_calling_convention(signature);
3135  assert(cc->length() == args->length(), "argument mismatch");
3136  for (int i = 0; i < args->length(); i++) {
3137    LIR_Opr arg = args->at(i);
3138    LIR_Opr loc = cc->at(i);
3139    if (loc->is_register()) {
3140      __ move(arg, loc);
3141    } else {
3142      LIR_Address* addr = loc->as_address_ptr();
3143//           if (!can_store_as_constant(arg)) {
3144//             LIR_Opr tmp = new_register(arg->type());
3145//             __ move(arg, tmp);
3146//             arg = tmp;
3147//           }
3148      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3149        __ unaligned_move(arg, addr);
3150      } else {
3151        __ move(arg, addr);
3152      }
3153    }
3154  }
3155
3156  if (info) {
3157    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3158  } else {
3159    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3160  }
3161  if (result->is_valid()) {
3162    __ move(phys_reg, result);
3163  }
3164  return result;
3165}
3166
3167
3168LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3169                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
3170  // get a result register
3171  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3172  LIR_Opr result = LIR_OprFact::illegalOpr;
3173  if (result_type->tag() != voidTag) {
3174    result = new_register(result_type);
3175    phys_reg = result_register_for(result_type);
3176  }
3177
3178  // move the arguments into the correct location
3179  CallingConvention* cc = frame_map()->c_calling_convention(signature);
3180
3181  assert(cc->length() == args->length(), "argument mismatch");
3182  for (int i = 0; i < args->length(); i++) {
3183    LIRItem* arg = args->at(i);
3184    LIR_Opr loc = cc->at(i);
3185    if (loc->is_register()) {
3186      arg->load_item_force(loc);
3187    } else {
3188      LIR_Address* addr = loc->as_address_ptr();
3189      arg->load_for_store(addr->type());
3190      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3191        __ unaligned_move(arg->result(), addr);
3192      } else {
3193        __ move(arg->result(), addr);
3194      }
3195    }
3196  }
3197
3198  if (info) {
3199    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3200  } else {
3201    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3202  }
3203  if (result->is_valid()) {
3204    __ move(phys_reg, result);
3205  }
3206  return result;
3207}
3208
3209void LIRGenerator::do_MemBar(MemBar* x) {
3210  if (os::is_MP()) {
3211    LIR_Code code = x->code();
3212    switch(code) {
3213      case lir_membar_acquire   : __ membar_acquire(); break;
3214      case lir_membar_release   : __ membar_release(); break;
3215      case lir_membar           : __ membar(); break;
3216      case lir_membar_loadload  : __ membar_loadload(); break;
3217      case lir_membar_storestore: __ membar_storestore(); break;
3218      case lir_membar_loadstore : __ membar_loadstore(); break;
3219      case lir_membar_storeload : __ membar_storeload(); break;
3220      default                   : ShouldNotReachHere(); break;
3221    }
3222  }
3223}
3224
3225