c1_LIR.hpp revision 0:a61af66fc99e
1/*
2 * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25class BlockBegin;
26class BlockList;
27class LIR_Assembler;
28class CodeEmitInfo;
29class CodeStub;
30class CodeStubList;
31class ArrayCopyStub;
32class LIR_Op;
33class ciType;
34class ValueType;
35class LIR_OpVisitState;
36class FpuStackSim;
37
38//---------------------------------------------------------------------
39//                 LIR Operands
40//  LIR_OprDesc
41//    LIR_OprPtr
42//      LIR_Const
43//      LIR_Address
44//---------------------------------------------------------------------
45class LIR_OprDesc;
46class LIR_OprPtr;
47class LIR_Const;
48class LIR_Address;
49class LIR_OprVisitor;
50
51
52typedef LIR_OprDesc* LIR_Opr;
53typedef int          RegNr;
54
55define_array(LIR_OprArray, LIR_Opr)
56define_stack(LIR_OprList, LIR_OprArray)
57
58define_array(LIR_OprRefArray, LIR_Opr*)
59define_stack(LIR_OprRefList, LIR_OprRefArray)
60
61define_array(CodeEmitInfoArray, CodeEmitInfo*)
62define_stack(CodeEmitInfoList, CodeEmitInfoArray)
63
64define_array(LIR_OpArray, LIR_Op*)
65define_stack(LIR_OpList, LIR_OpArray)
66
67// define LIR_OprPtr early so LIR_OprDesc can refer to it
68class LIR_OprPtr: public CompilationResourceObj {
69 public:
70  bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
71  bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
72
73  virtual LIR_Const*  as_constant()              { return NULL; }
74  virtual LIR_Address* as_address()              { return NULL; }
75  virtual BasicType type() const                 = 0;
76  virtual void print_value_on(outputStream* out) const = 0;
77};
78
79
80
81// LIR constants
82class LIR_Const: public LIR_OprPtr {
83 private:
84  JavaValue _value;
85
86  void type_check(BasicType t) const   { assert(type() == t, "type check"); }
87  void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
88
89 public:
90  LIR_Const(jint i)                              { _value.set_type(T_INT);     _value.set_jint(i); }
91  LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
92  LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
93  LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
94  LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
95  LIR_Const(void* p) {
96#ifdef _LP64
97    assert(sizeof(jlong) >= sizeof(p), "too small");;
98    _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
99#else
100    assert(sizeof(jint) >= sizeof(p), "too small");;
101    _value.set_type(T_INT);     _value.set_jint((jint)p);
102#endif
103  }
104
105  virtual BasicType type()       const { return _value.get_type(); }
106  virtual LIR_Const* as_constant()     { return this; }
107
108  jint      as_jint()    const         { type_check(T_INT   ); return _value.get_jint(); }
109  jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
110  jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
111  jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
112  jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
113  jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
114  jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
115
116#ifdef _LP64
117  address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
118#else
119  address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
120#endif
121
122
123  jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT); return _value.get_jint(); }
124  jint      as_jint_lo_bits() const    {
125    if (type() == T_DOUBLE) {
126      return low(jlong_cast(_value.get_jdouble()));
127    } else {
128      return as_jint_lo();
129    }
130  }
131  jint      as_jint_hi_bits() const    {
132    if (type() == T_DOUBLE) {
133      return high(jlong_cast(_value.get_jdouble()));
134    } else {
135      return as_jint_hi();
136    }
137  }
138
139  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
140
141
142  bool is_zero_float() {
143    jfloat f = as_jfloat();
144    jfloat ok = 0.0f;
145    return jint_cast(f) == jint_cast(ok);
146  }
147
148  bool is_one_float() {
149    jfloat f = as_jfloat();
150    return !g_isnan(f) && g_isfinite(f) && f == 1.0;
151  }
152
153  bool is_zero_double() {
154    jdouble d = as_jdouble();
155    jdouble ok = 0.0;
156    return jlong_cast(d) == jlong_cast(ok);
157  }
158
159  bool is_one_double() {
160    jdouble d = as_jdouble();
161    return !g_isnan(d) && g_isfinite(d) && d == 1.0;
162  }
163};
164
165
166//---------------------LIR Operand descriptor------------------------------------
167//
168// The class LIR_OprDesc represents a LIR instruction operand;
169// it can be a register (ALU/FPU), stack location or a constant;
170// Constants and addresses are represented as resource area allocated
171// structures (see above).
172// Registers and stack locations are inlined into the this pointer
173// (see value function).
174
175class LIR_OprDesc: public CompilationResourceObj {
176 public:
177  // value structure:
178  //     data       opr-type opr-kind
179  // +--------------+-------+-------+
180  // [max...........|7 6 5 4|3 2 1 0]
181  //                             ^
182  //                    is_pointer bit
183  //
184  // lowest bit cleared, means it is a structure pointer
185  // we need  4 bits to represent types
186
187 private:
188  friend class LIR_OprFact;
189
190  // Conversion
191  intptr_t value() const                         { return (intptr_t) this; }
192
193  bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
194    return (value() & mask) == masked_value;
195  }
196
197  enum OprKind {
198      pointer_value      = 0
199    , stack_value        = 1
200    , cpu_register       = 3
201    , fpu_register       = 5
202    , illegal_value      = 7
203  };
204
205  enum OprBits {
206      pointer_bits   = 1
207    , kind_bits      = 3
208    , type_bits      = 4
209    , size_bits      = 2
210    , destroys_bits  = 1
211    , virtual_bits   = 1
212    , is_xmm_bits    = 1
213    , last_use_bits  = 1
214    , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
215    , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
216                       is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
217    , data_bits      = BitsPerInt - non_data_bits
218    , reg_bits       = data_bits / 2      // for two registers in one value encoding
219  };
220
221  enum OprShift {
222      kind_shift     = 0
223    , type_shift     = kind_shift     + kind_bits
224    , size_shift     = type_shift     + type_bits
225    , destroys_shift = size_shift     + size_bits
226    , last_use_shift = destroys_shift + destroys_bits
227    , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
228    , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
229    , is_xmm_shift   = virtual_shift + virtual_bits
230    , data_shift     = is_xmm_shift + is_xmm_bits
231    , reg1_shift = data_shift
232    , reg2_shift = data_shift + reg_bits
233
234  };
235
236  enum OprSize {
237      single_size = 0 << size_shift
238    , double_size = 1 << size_shift
239  };
240
241  enum OprMask {
242      kind_mask      = right_n_bits(kind_bits)
243    , type_mask      = right_n_bits(type_bits) << type_shift
244    , size_mask      = right_n_bits(size_bits) << size_shift
245    , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
246    , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
247    , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
248    , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
249    , pointer_mask   = right_n_bits(pointer_bits)
250    , lower_reg_mask = right_n_bits(reg_bits)
251    , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
252  };
253
254  uintptr_t data() const                         { return value() >> data_shift; }
255  int lo_reg_half() const                        { return data() & lower_reg_mask; }
256  int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
257  OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
258  OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
259
260  static char type_char(BasicType t);
261
262 public:
263  enum {
264    vreg_base = ConcreteRegisterImpl::number_of_registers,
265    vreg_max = (1 << data_bits) - 1
266  };
267
268  static inline LIR_Opr illegalOpr();
269
270  enum OprType {
271      unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
272    , int_type      = 1 << type_shift
273    , long_type     = 2 << type_shift
274    , object_type   = 3 << type_shift
275    , pointer_type  = 4 << type_shift
276    , float_type    = 5 << type_shift
277    , double_type   = 6 << type_shift
278  };
279  friend OprType as_OprType(BasicType t);
280  friend BasicType as_BasicType(OprType t);
281
282  OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
283  OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
284
285  static OprSize size_for(BasicType t) {
286    switch (t) {
287      case T_LONG:
288      case T_DOUBLE:
289        return double_size;
290        break;
291
292      case T_FLOAT:
293      case T_BOOLEAN:
294      case T_CHAR:
295      case T_BYTE:
296      case T_SHORT:
297      case T_INT:
298      case T_OBJECT:
299      case T_ARRAY:
300        return single_size;
301        break;
302
303      default:
304        ShouldNotReachHere();
305      }
306  }
307
308
309  void validate_type() const PRODUCT_RETURN;
310
311  BasicType type() const {
312    if (is_pointer()) {
313      return pointer()->type();
314    }
315    return as_BasicType(type_field());
316  }
317
318
319  ValueType* value_type() const                  { return as_ValueType(type()); }
320
321  char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
322
323  bool is_equal(LIR_Opr opr) const         { return this == opr; }
324  // checks whether types are same
325  bool is_same_type(LIR_Opr opr) const     {
326    assert(type_field() != unknown_type &&
327           opr->type_field() != unknown_type, "shouldn't see unknown_type");
328    return type_field() == opr->type_field();
329  }
330  bool is_same_register(LIR_Opr opr) {
331    return (is_register() && opr->is_register() &&
332            kind_field() == opr->kind_field() &&
333            (value() & no_type_mask) == (opr->value() & no_type_mask));
334  }
335
336  bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
337  bool is_illegal() const      { return kind_field() == illegal_value; }
338  bool is_valid() const        { return kind_field() != illegal_value; }
339
340  bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
341  bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
342
343  bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
344  bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
345
346  bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
347  bool is_oop() const;
348
349  // semantic for fpu- and xmm-registers:
350  // * is_float and is_double return true for xmm_registers
351  //   (so is_single_fpu and is_single_xmm are true)
352  // * So you must always check for is_???_xmm prior to is_???_fpu to
353  //   distinguish between fpu- and xmm-registers
354
355  bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
356  bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
357  bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
358
359  bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
360  bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
361  bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
362  bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
363  bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
364
365  bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
366  bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
367  bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
368  bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
369  bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
370
371  bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
372  bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
373  bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
374
375  // fast accessor functions for special bits that do not work for pointers
376  // (in this functions, the check for is_pointer() is omitted)
377  bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
378  bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
379  bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
380  bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
381  BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
382
383  bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
384  bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
385  LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
386  LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
387
388
389  int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
390  int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
391  RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
392  RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
393  RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
394  RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
395  RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
396  RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
397  RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
398  RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
399  RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
400  int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
401
402  LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
403  LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
404  LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
405
406  Register as_register()    const;
407  Register as_register_lo() const;
408  Register as_register_hi() const;
409
410  Register as_pointer_register() {
411#ifdef _LP64
412    if (is_double_cpu()) {
413      assert(as_register_lo() == as_register_hi(), "should be a single register");
414      return as_register_lo();
415    }
416#endif
417    return as_register();
418  }
419
420#ifdef IA32
421  XMMRegister as_xmm_float_reg() const;
422  XMMRegister as_xmm_double_reg() const;
423  // for compatibility with RInfo
424  int fpu () const                                  { return lo_reg_half(); }
425#endif
426
427#ifdef SPARC
428  FloatRegister as_float_reg   () const;
429  FloatRegister as_double_reg  () const;
430#endif
431
432  jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
433  jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
434  jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
435  jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
436  jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
437
438  void print() const PRODUCT_RETURN;
439  void print(outputStream* out) const PRODUCT_RETURN;
440};
441
442
443inline LIR_OprDesc::OprType as_OprType(BasicType type) {
444  switch (type) {
445  case T_INT:      return LIR_OprDesc::int_type;
446  case T_LONG:     return LIR_OprDesc::long_type;
447  case T_FLOAT:    return LIR_OprDesc::float_type;
448  case T_DOUBLE:   return LIR_OprDesc::double_type;
449  case T_OBJECT:
450  case T_ARRAY:    return LIR_OprDesc::object_type;
451  case T_ILLEGAL:  // fall through
452  default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
453  }
454}
455
456inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
457  switch (t) {
458  case LIR_OprDesc::int_type:     return T_INT;
459  case LIR_OprDesc::long_type:    return T_LONG;
460  case LIR_OprDesc::float_type:   return T_FLOAT;
461  case LIR_OprDesc::double_type:  return T_DOUBLE;
462  case LIR_OprDesc::object_type:  return T_OBJECT;
463  case LIR_OprDesc::unknown_type: // fall through
464  default: ShouldNotReachHere();  return T_ILLEGAL;
465  }
466}
467
468
469// LIR_Address
470class LIR_Address: public LIR_OprPtr {
471 friend class LIR_OpVisitState;
472
473 public:
474  // NOTE: currently these must be the log2 of the scale factor (and
475  // must also be equivalent to the ScaleFactor enum in
476  // assembler_i486.hpp)
477  enum Scale {
478    times_1  =  0,
479    times_2  =  1,
480    times_4  =  2,
481    times_8  =  3
482  };
483
484 private:
485  LIR_Opr   _base;
486  LIR_Opr   _index;
487  Scale     _scale;
488  intx      _disp;
489  BasicType _type;
490
491 public:
492  LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
493       _base(base)
494     , _index(index)
495     , _scale(times_1)
496     , _type(type)
497     , _disp(0) { verify(); }
498
499  LIR_Address(LIR_Opr base, int disp, BasicType type):
500       _base(base)
501     , _index(LIR_OprDesc::illegalOpr())
502     , _scale(times_1)
503     , _type(type)
504     , _disp(disp) { verify(); }
505
506#ifdef IA32
507  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
508       _base(base)
509     , _index(index)
510     , _scale(scale)
511     , _type(type)
512     , _disp(disp) { verify(); }
513#endif
514
515  LIR_Opr base()  const                          { return _base;  }
516  LIR_Opr index() const                          { return _index; }
517  Scale   scale() const                          { return _scale; }
518  intx    disp()  const                          { return _disp;  }
519
520  bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
521
522  virtual LIR_Address* as_address()              { return this;   }
523  virtual BasicType type() const                 { return _type; }
524  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
525
526  void verify() const PRODUCT_RETURN;
527
528  static Scale scale(BasicType type);
529};
530
531
532// operand factory
533class LIR_OprFact: public AllStatic {
534 public:
535
536  static LIR_Opr illegalOpr;
537
538  static LIR_Opr single_cpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
539  static LIR_Opr single_cpu_oop(int reg)        { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
540  static LIR_Opr double_cpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
541
542  static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); }
543
544#ifdef SPARC
545  static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
546#endif
547#ifdef IA32
548  static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) | (reg  << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
549  static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::is_xmm_mask); }
550  static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) | (reg  << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); }
551#endif
552
553
554  static LIR_Opr virtual_register(int index, BasicType type) {
555    LIR_Opr res;
556    switch (type) {
557      case T_OBJECT: // fall through
558      case T_ARRAY:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
559      case T_INT:    res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
560      case T_LONG:   res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break;
561      case T_FLOAT:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
562      case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break;
563
564      default:       ShouldNotReachHere(); res = illegalOpr;
565    }
566
567#ifdef ASSERT
568    res->validate_type();
569    assert(res->vreg_number() == index, "conversion check");
570    assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
571    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
572
573    // old-style calculation; check if old and new method are equal
574    LIR_OprDesc::OprType t = as_OprType(type);
575    LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | t |
576                               ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
577                               LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
578    assert(res == old_res, "old and new method not equal");
579#endif
580
581    return res;
582  }
583
584  // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
585  // the index is platform independent; a double stack useing indeces 2 and 3 has always
586  // index 2.
587  static LIR_Opr stack(int index, BasicType type) {
588    LIR_Opr res;
589    switch (type) {
590      case T_OBJECT: // fall through
591      case T_ARRAY:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
592      case T_INT:    res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type    | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
593      case T_LONG:   res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break;
594      case T_FLOAT:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type  | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
595      case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break;
596
597      default:       ShouldNotReachHere(); res = illegalOpr;
598    }
599
600#ifdef ASSERT
601    assert(index >= 0, "index must be positive");
602    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
603
604    LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::stack_value | as_OprType(type) | LIR_OprDesc::size_for(type));
605    assert(res == old_res, "old and new method not equal");
606#endif
607
608    return res;
609  }
610
611  static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
612  static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
613  static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
614  static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
615  static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
616  static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
617  static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
618  static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
619  static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
620
621  static LIR_Opr value_type(ValueType* type);
622  static LIR_Opr dummy_value_type(ValueType* type);
623};
624
625
626//-------------------------------------------------------------------------------
627//                   LIR Instructions
628//-------------------------------------------------------------------------------
629//
630// Note:
631//  - every instruction has a result operand
632//  - every instruction has an CodeEmitInfo operand (can be revisited later)
633//  - every instruction has a LIR_OpCode operand
634//  - LIR_OpN, means an instruction that has N input operands
635//
636// class hierarchy:
637//
638class  LIR_Op;
639class    LIR_Op0;
640class      LIR_OpLabel;
641class    LIR_Op1;
642class      LIR_OpBranch;
643class      LIR_OpConvert;
644class      LIR_OpAllocObj;
645class      LIR_OpRoundFP;
646class    LIR_Op2;
647class    LIR_OpDelay;
648class    LIR_Op3;
649class      LIR_OpAllocArray;
650class    LIR_OpCall;
651class      LIR_OpJavaCall;
652class      LIR_OpRTCall;
653class    LIR_OpArrayCopy;
654class    LIR_OpLock;
655class    LIR_OpTypeCheck;
656class    LIR_OpCompareAndSwap;
657class    LIR_OpProfileCall;
658
659
660// LIR operation codes
661enum LIR_Code {
662    lir_none
663  , begin_op0
664      , lir_word_align
665      , lir_label
666      , lir_nop
667      , lir_backwardbranch_target
668      , lir_std_entry
669      , lir_osr_entry
670      , lir_build_frame
671      , lir_fpop_raw
672      , lir_24bit_FPU
673      , lir_reset_FPU
674      , lir_breakpoint
675      , lir_rtcall
676      , lir_membar
677      , lir_membar_acquire
678      , lir_membar_release
679      , lir_get_thread
680  , end_op0
681  , begin_op1
682      , lir_fxch
683      , lir_fld
684      , lir_ffree
685      , lir_push
686      , lir_pop
687      , lir_null_check
688      , lir_return
689      , lir_leal
690      , lir_neg
691      , lir_branch
692      , lir_cond_float_branch
693      , lir_move
694      , lir_prefetchr
695      , lir_prefetchw
696      , lir_convert
697      , lir_alloc_object
698      , lir_monaddr
699      , lir_roundfp
700      , lir_safepoint
701  , end_op1
702  , begin_op2
703      , lir_cmp
704      , lir_cmp_l2i
705      , lir_ucmp_fd2i
706      , lir_cmp_fd2i
707      , lir_cmove
708      , lir_add
709      , lir_sub
710      , lir_mul
711      , lir_mul_strictfp
712      , lir_div
713      , lir_div_strictfp
714      , lir_rem
715      , lir_sqrt
716      , lir_abs
717      , lir_sin
718      , lir_cos
719      , lir_tan
720      , lir_log
721      , lir_log10
722      , lir_logic_and
723      , lir_logic_or
724      , lir_logic_xor
725      , lir_shl
726      , lir_shr
727      , lir_ushr
728      , lir_alloc_array
729      , lir_throw
730      , lir_unwind
731      , lir_compare_to
732  , end_op2
733  , begin_op3
734      , lir_idiv
735      , lir_irem
736  , end_op3
737  , begin_opJavaCall
738      , lir_static_call
739      , lir_optvirtual_call
740      , lir_icvirtual_call
741      , lir_virtual_call
742  , end_opJavaCall
743  , begin_opArrayCopy
744      , lir_arraycopy
745  , end_opArrayCopy
746  , begin_opLock
747    , lir_lock
748    , lir_unlock
749  , end_opLock
750  , begin_delay_slot
751    , lir_delay_slot
752  , end_delay_slot
753  , begin_opTypeCheck
754    , lir_instanceof
755    , lir_checkcast
756    , lir_store_check
757  , end_opTypeCheck
758  , begin_opCompareAndSwap
759    , lir_cas_long
760    , lir_cas_obj
761    , lir_cas_int
762  , end_opCompareAndSwap
763  , begin_opMDOProfile
764    , lir_profile_call
765  , end_opMDOProfile
766};
767
768
769enum LIR_Condition {
770    lir_cond_equal
771  , lir_cond_notEqual
772  , lir_cond_less
773  , lir_cond_lessEqual
774  , lir_cond_greaterEqual
775  , lir_cond_greater
776  , lir_cond_belowEqual
777  , lir_cond_aboveEqual
778  , lir_cond_always
779  , lir_cond_unknown = -1
780};
781
782
783enum LIR_PatchCode {
784  lir_patch_none,
785  lir_patch_low,
786  lir_patch_high,
787  lir_patch_normal
788};
789
790
791enum LIR_MoveKind {
792  lir_move_normal,
793  lir_move_volatile,
794  lir_move_unaligned,
795  lir_move_max_flag
796};
797
798
799// --------------------------------------------------
800// LIR_Op
801// --------------------------------------------------
802class LIR_Op: public CompilationResourceObj {
803 friend class LIR_OpVisitState;
804
805#ifdef ASSERT
806 private:
807  const char *  _file;
808  int           _line;
809#endif
810
811 protected:
812  LIR_Opr       _result;
813  unsigned short _code;
814  unsigned short _flags;
815  CodeEmitInfo* _info;
816  int           _id;     // value id for register allocation
817  int           _fpu_pop_count;
818  Instruction*  _source; // for debugging
819
820  static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
821
822 protected:
823  static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
824
825 public:
826  LIR_Op()
827    : _result(LIR_OprFact::illegalOpr)
828    , _code(lir_none)
829    , _flags(0)
830    , _info(NULL)
831#ifdef ASSERT
832    , _file(NULL)
833    , _line(0)
834#endif
835    , _fpu_pop_count(0)
836    , _source(NULL)
837    , _id(-1)                             {}
838
839  LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
840    : _result(result)
841    , _code(code)
842    , _flags(0)
843    , _info(info)
844#ifdef ASSERT
845    , _file(NULL)
846    , _line(0)
847#endif
848    , _fpu_pop_count(0)
849    , _source(NULL)
850    , _id(-1)                             {}
851
852  CodeEmitInfo* info() const                  { return _info;   }
853  LIR_Code code()      const                  { return (LIR_Code)_code;   }
854  LIR_Opr result_opr() const                  { return _result; }
855  void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
856
857#ifdef ASSERT
858  void set_file_and_line(const char * file, int line) {
859    _file = file;
860    _line = line;
861  }
862#endif
863
864  virtual const char * name() const PRODUCT_RETURN0;
865
866  int id()             const                  { return _id;     }
867  void set_id(int id)                         { _id = id; }
868
869  // FPU stack simulation helpers -- only used on Intel
870  void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
871  int  fpu_pop_count() const                  { return _fpu_pop_count; }
872  bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
873
874  Instruction* source() const                 { return _source; }
875  void set_source(Instruction* ins)           { _source = ins; }
876
877  virtual void emit_code(LIR_Assembler* masm) = 0;
878  virtual void print_instr(outputStream* out) const   = 0;
879  virtual void print_on(outputStream* st) const PRODUCT_RETURN;
880
881  virtual LIR_OpCall* as_OpCall() { return NULL; }
882  virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
883  virtual LIR_OpLabel* as_OpLabel() { return NULL; }
884  virtual LIR_OpDelay* as_OpDelay() { return NULL; }
885  virtual LIR_OpLock* as_OpLock() { return NULL; }
886  virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
887  virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
888  virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
889  virtual LIR_OpBranch* as_OpBranch() { return NULL; }
890  virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
891  virtual LIR_OpConvert* as_OpConvert() { return NULL; }
892  virtual LIR_Op0* as_Op0() { return NULL; }
893  virtual LIR_Op1* as_Op1() { return NULL; }
894  virtual LIR_Op2* as_Op2() { return NULL; }
895  virtual LIR_Op3* as_Op3() { return NULL; }
896  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
897  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
898  virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
899  virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
900
901  virtual void verify() const {}
902};
903
904// for calls
905class LIR_OpCall: public LIR_Op {
906 friend class LIR_OpVisitState;
907
908 protected:
909  address      _addr;
910  LIR_OprList* _arguments;
911 protected:
912  LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
913             LIR_OprList* arguments, CodeEmitInfo* info = NULL)
914    : LIR_Op(code, result, info)
915    , _arguments(arguments)
916    , _addr(addr) {}
917
918 public:
919  address addr() const                           { return _addr; }
920  const LIR_OprList* arguments() const           { return _arguments; }
921  virtual LIR_OpCall* as_OpCall()                { return this; }
922};
923
924
925// --------------------------------------------------
926// LIR_OpJavaCall
927// --------------------------------------------------
928class LIR_OpJavaCall: public LIR_OpCall {
929 friend class LIR_OpVisitState;
930
931 private:
932  ciMethod*       _method;
933  LIR_Opr         _receiver;
934
935 public:
936  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
937                 LIR_Opr receiver, LIR_Opr result,
938                 address addr, LIR_OprList* arguments,
939                 CodeEmitInfo* info)
940  : LIR_OpCall(code, addr, result, arguments, info)
941  , _receiver(receiver)
942  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
943
944  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
945                 LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
946                 LIR_OprList* arguments, CodeEmitInfo* info)
947  : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
948  , _receiver(receiver)
949  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
950
951  LIR_Opr receiver() const                       { return _receiver; }
952  ciMethod* method() const                       { return _method;   }
953
954  intptr_t vtable_offset() const {
955    assert(_code == lir_virtual_call, "only have vtable for real vcall");
956    return (intptr_t) addr();
957  }
958
959  virtual void emit_code(LIR_Assembler* masm);
960  virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
961  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
962};
963
964// --------------------------------------------------
965// LIR_OpLabel
966// --------------------------------------------------
967// Location where a branch can continue
968class LIR_OpLabel: public LIR_Op {
969 friend class LIR_OpVisitState;
970
971 private:
972  Label* _label;
973 public:
974  LIR_OpLabel(Label* lbl)
975   : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
976   , _label(lbl)                                 {}
977  Label* label() const                           { return _label; }
978
979  virtual void emit_code(LIR_Assembler* masm);
980  virtual LIR_OpLabel* as_OpLabel() { return this; }
981  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
982};
983
984// LIR_OpArrayCopy
985class LIR_OpArrayCopy: public LIR_Op {
986 friend class LIR_OpVisitState;
987
988 private:
989  ArrayCopyStub*  _stub;
990  LIR_Opr   _src;
991  LIR_Opr   _src_pos;
992  LIR_Opr   _dst;
993  LIR_Opr   _dst_pos;
994  LIR_Opr   _length;
995  LIR_Opr   _tmp;
996  ciArrayKlass* _expected_type;
997  int       _flags;
998
999public:
1000  enum Flags {
1001    src_null_check         = 1 << 0,
1002    dst_null_check         = 1 << 1,
1003    src_pos_positive_check = 1 << 2,
1004    dst_pos_positive_check = 1 << 3,
1005    length_positive_check  = 1 << 4,
1006    src_range_check        = 1 << 5,
1007    dst_range_check        = 1 << 6,
1008    type_check             = 1 << 7,
1009    all_flags              = (1 << 8) - 1
1010  };
1011
1012  LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1013                  ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1014
1015  LIR_Opr src() const                            { return _src; }
1016  LIR_Opr src_pos() const                        { return _src_pos; }
1017  LIR_Opr dst() const                            { return _dst; }
1018  LIR_Opr dst_pos() const                        { return _dst_pos; }
1019  LIR_Opr length() const                         { return _length; }
1020  LIR_Opr tmp() const                            { return _tmp; }
1021  int flags() const                              { return _flags; }
1022  ciArrayKlass* expected_type() const            { return _expected_type; }
1023  ArrayCopyStub* stub() const                    { return _stub; }
1024
1025  virtual void emit_code(LIR_Assembler* masm);
1026  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1027  void print_instr(outputStream* out) const PRODUCT_RETURN;
1028};
1029
1030
1031// --------------------------------------------------
1032// LIR_Op0
1033// --------------------------------------------------
1034class LIR_Op0: public LIR_Op {
1035 friend class LIR_OpVisitState;
1036
1037 public:
1038  LIR_Op0(LIR_Code code)
1039   : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1040  LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1041   : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1042
1043  virtual void emit_code(LIR_Assembler* masm);
1044  virtual LIR_Op0* as_Op0() { return this; }
1045  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1046};
1047
1048
1049// --------------------------------------------------
1050// LIR_Op1
1051// --------------------------------------------------
1052
1053class LIR_Op1: public LIR_Op {
1054 friend class LIR_OpVisitState;
1055
1056 protected:
1057  LIR_Opr         _opr;   // input operand
1058  BasicType       _type;  // Operand types
1059  LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1060
1061  static void print_patch_code(outputStream* out, LIR_PatchCode code);
1062
1063  void set_kind(LIR_MoveKind kind) {
1064    assert(code() == lir_move, "must be");
1065    _flags = kind;
1066  }
1067
1068 public:
1069  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1070    : LIR_Op(code, result, info)
1071    , _opr(opr)
1072    , _patch(patch)
1073    , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1074
1075  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1076    : LIR_Op(code, result, info)
1077    , _opr(opr)
1078    , _patch(patch)
1079    , _type(type)                      {
1080    assert(code == lir_move, "must be");
1081    set_kind(kind);
1082  }
1083
1084  LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1085    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1086    , _opr(opr)
1087    , _patch(lir_patch_none)
1088    , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1089
1090  LIR_Opr in_opr()           const               { return _opr;   }
1091  LIR_PatchCode patch_code() const               { return _patch; }
1092  BasicType type()           const               { return _type;  }
1093
1094  LIR_MoveKind move_kind() const {
1095    assert(code() == lir_move, "must be");
1096    return (LIR_MoveKind)_flags;
1097  }
1098
1099  virtual void emit_code(LIR_Assembler* masm);
1100  virtual LIR_Op1* as_Op1() { return this; }
1101  virtual const char * name() const PRODUCT_RETURN0;
1102
1103  void set_in_opr(LIR_Opr opr) { _opr = opr; }
1104
1105  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1106  virtual void verify() const;
1107};
1108
1109
1110// for runtime calls
1111class LIR_OpRTCall: public LIR_OpCall {
1112 friend class LIR_OpVisitState;
1113
1114 private:
1115  LIR_Opr _tmp;
1116 public:
1117  LIR_OpRTCall(address addr, LIR_Opr tmp,
1118               LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1119    : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1120    , _tmp(tmp) {}
1121
1122  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1123  virtual void emit_code(LIR_Assembler* masm);
1124  virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1125
1126  LIR_Opr tmp() const                            { return _tmp; }
1127
1128  virtual void verify() const;
1129};
1130
1131
1132class LIR_OpBranch: public LIR_Op {
1133 friend class LIR_OpVisitState;
1134
1135 private:
1136  LIR_Condition _cond;
1137  BasicType     _type;
1138  Label*        _label;
1139  BlockBegin*   _block;  // if this is a branch to a block, this is the block
1140  BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1141  CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1142
1143 public:
1144  LIR_OpBranch(LIR_Condition cond, Label* lbl)
1145    : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1146    , _cond(cond)
1147    , _label(lbl)
1148    , _block(NULL)
1149    , _ublock(NULL)
1150    , _stub(NULL) { }
1151
1152  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1153  LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1154
1155  // for unordered comparisons
1156  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1157
1158  LIR_Condition cond()        const              { return _cond;        }
1159  BasicType     type()        const              { return _type;        }
1160  Label*        label()       const              { return _label;       }
1161  BlockBegin*   block()       const              { return _block;       }
1162  BlockBegin*   ublock()      const              { return _ublock;      }
1163  CodeStub*     stub()        const              { return _stub;       }
1164
1165  void          change_block(BlockBegin* b);
1166  void          change_ublock(BlockBegin* b);
1167  void          negate_cond();
1168
1169  virtual void emit_code(LIR_Assembler* masm);
1170  virtual LIR_OpBranch* as_OpBranch() { return this; }
1171  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1172};
1173
1174
1175class ConversionStub;
1176
1177class LIR_OpConvert: public LIR_Op1 {
1178 friend class LIR_OpVisitState;
1179
1180 private:
1181   Bytecodes::Code _bytecode;
1182   ConversionStub* _stub;
1183
1184 public:
1185   LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1186     : LIR_Op1(lir_convert, opr, result)
1187     , _stub(stub)
1188     , _bytecode(code)                           {}
1189
1190  Bytecodes::Code bytecode() const               { return _bytecode; }
1191  ConversionStub* stub() const                   { return _stub; }
1192
1193  virtual void emit_code(LIR_Assembler* masm);
1194  virtual LIR_OpConvert* as_OpConvert() { return this; }
1195  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1196
1197  static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1198};
1199
1200
1201// LIR_OpAllocObj
1202class LIR_OpAllocObj : public LIR_Op1 {
1203 friend class LIR_OpVisitState;
1204
1205 private:
1206  LIR_Opr _tmp1;
1207  LIR_Opr _tmp2;
1208  LIR_Opr _tmp3;
1209  LIR_Opr _tmp4;
1210  int     _hdr_size;
1211  int     _obj_size;
1212  CodeStub* _stub;
1213  bool    _init_check;
1214
1215 public:
1216  LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1217                 LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1218                 int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1219    : LIR_Op1(lir_alloc_object, klass, result)
1220    , _tmp1(t1)
1221    , _tmp2(t2)
1222    , _tmp3(t3)
1223    , _tmp4(t4)
1224    , _hdr_size(hdr_size)
1225    , _obj_size(obj_size)
1226    , _init_check(init_check)
1227    , _stub(stub)                                { }
1228
1229  LIR_Opr klass()        const                   { return in_opr();     }
1230  LIR_Opr obj()          const                   { return result_opr(); }
1231  LIR_Opr tmp1()         const                   { return _tmp1;        }
1232  LIR_Opr tmp2()         const                   { return _tmp2;        }
1233  LIR_Opr tmp3()         const                   { return _tmp3;        }
1234  LIR_Opr tmp4()         const                   { return _tmp4;        }
1235  int     header_size()  const                   { return _hdr_size;    }
1236  int     object_size()  const                   { return _obj_size;    }
1237  bool    init_check()   const                   { return _init_check;  }
1238  CodeStub* stub()       const                   { return _stub;        }
1239
1240  virtual void emit_code(LIR_Assembler* masm);
1241  virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1242  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1243};
1244
1245
1246// LIR_OpRoundFP
1247class LIR_OpRoundFP : public LIR_Op1 {
1248 friend class LIR_OpVisitState;
1249
1250 private:
1251  LIR_Opr _tmp;
1252
1253 public:
1254  LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1255    : LIR_Op1(lir_roundfp, reg, result)
1256    , _tmp(stack_loc_temp) {}
1257
1258  LIR_Opr tmp() const                            { return _tmp; }
1259  virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1260  void print_instr(outputStream* out) const PRODUCT_RETURN;
1261};
1262
1263// LIR_OpTypeCheck
1264class LIR_OpTypeCheck: public LIR_Op {
1265 friend class LIR_OpVisitState;
1266
1267 private:
1268  LIR_Opr       _object;
1269  LIR_Opr       _array;
1270  ciKlass*      _klass;
1271  LIR_Opr       _tmp1;
1272  LIR_Opr       _tmp2;
1273  LIR_Opr       _tmp3;
1274  bool          _fast_check;
1275  CodeEmitInfo* _info_for_patch;
1276  CodeEmitInfo* _info_for_exception;
1277  CodeStub*     _stub;
1278  // Helpers for Tier1UpdateMethodData
1279  ciMethod*     _profiled_method;
1280  int           _profiled_bci;
1281
1282public:
1283  LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1284                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1285                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
1286                  ciMethod* profiled_method, int profiled_bci);
1287  LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1288                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
1289                  ciMethod* profiled_method, int profiled_bci);
1290
1291  LIR_Opr object() const                         { return _object;         }
1292  LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1293  LIR_Opr tmp1() const                           { return _tmp1;           }
1294  LIR_Opr tmp2() const                           { return _tmp2;           }
1295  LIR_Opr tmp3() const                           { return _tmp3;           }
1296  ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1297  bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1298  CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1299  CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1300  CodeStub* stub() const                         { return _stub;           }
1301
1302  // methodDataOop profiling
1303  ciMethod* profiled_method()                    { return _profiled_method; }
1304  int       profiled_bci()                       { return _profiled_bci; }
1305
1306  virtual void emit_code(LIR_Assembler* masm);
1307  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1308  void print_instr(outputStream* out) const PRODUCT_RETURN;
1309};
1310
1311// LIR_Op2
1312class LIR_Op2: public LIR_Op {
1313 friend class LIR_OpVisitState;
1314
1315  int  _fpu_stack_size; // for sin/cos implementation on Intel
1316
1317 protected:
1318  LIR_Opr   _opr1;
1319  LIR_Opr   _opr2;
1320  BasicType _type;
1321  LIR_Opr   _tmp;
1322  LIR_Condition _condition;
1323
1324  void verify() const;
1325
1326 public:
1327  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1328    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1329    , _opr1(opr1)
1330    , _opr2(opr2)
1331    , _type(T_ILLEGAL)
1332    , _condition(condition)
1333    , _fpu_stack_size(0)
1334    , _tmp(LIR_OprFact::illegalOpr) {
1335    assert(code == lir_cmp, "code check");
1336  }
1337
1338  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result)
1339    : LIR_Op(code, result, NULL)
1340    , _opr1(opr1)
1341    , _opr2(opr2)
1342    , _type(T_ILLEGAL)
1343    , _condition(condition)
1344    , _fpu_stack_size(0)
1345    , _tmp(LIR_OprFact::illegalOpr) {
1346    assert(code == lir_cmove, "code check");
1347  }
1348
1349  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1350          CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1351    : LIR_Op(code, result, info)
1352    , _opr1(opr1)
1353    , _opr2(opr2)
1354    , _type(type)
1355    , _condition(lir_cond_unknown)
1356    , _fpu_stack_size(0)
1357    , _tmp(LIR_OprFact::illegalOpr) {
1358    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1359  }
1360
1361  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp)
1362    : LIR_Op(code, result, NULL)
1363    , _opr1(opr1)
1364    , _opr2(opr2)
1365    , _type(T_ILLEGAL)
1366    , _condition(lir_cond_unknown)
1367    , _fpu_stack_size(0)
1368    , _tmp(tmp) {
1369    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1370  }
1371
1372  LIR_Opr in_opr1() const                        { return _opr1; }
1373  LIR_Opr in_opr2() const                        { return _opr2; }
1374  BasicType type()  const                        { return _type; }
1375  LIR_Opr tmp_opr() const                        { return _tmp; }
1376  LIR_Condition condition() const  {
1377    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
1378  }
1379
1380  void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1381  int  fpu_stack_size() const                    { return _fpu_stack_size; }
1382
1383  void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1384  void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1385
1386  virtual void emit_code(LIR_Assembler* masm);
1387  virtual LIR_Op2* as_Op2() { return this; }
1388  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1389};
1390
1391class LIR_OpAllocArray : public LIR_Op {
1392 friend class LIR_OpVisitState;
1393
1394 private:
1395  LIR_Opr   _klass;
1396  LIR_Opr   _len;
1397  LIR_Opr   _tmp1;
1398  LIR_Opr   _tmp2;
1399  LIR_Opr   _tmp3;
1400  LIR_Opr   _tmp4;
1401  BasicType _type;
1402  CodeStub* _stub;
1403
1404 public:
1405  LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1406    : LIR_Op(lir_alloc_array, result, NULL)
1407    , _klass(klass)
1408    , _len(len)
1409    , _tmp1(t1)
1410    , _tmp2(t2)
1411    , _tmp3(t3)
1412    , _tmp4(t4)
1413    , _type(type)
1414    , _stub(stub) {}
1415
1416  LIR_Opr   klass()   const                      { return _klass;       }
1417  LIR_Opr   len()     const                      { return _len;         }
1418  LIR_Opr   obj()     const                      { return result_opr(); }
1419  LIR_Opr   tmp1()    const                      { return _tmp1;        }
1420  LIR_Opr   tmp2()    const                      { return _tmp2;        }
1421  LIR_Opr   tmp3()    const                      { return _tmp3;        }
1422  LIR_Opr   tmp4()    const                      { return _tmp4;        }
1423  BasicType type()    const                      { return _type;        }
1424  CodeStub* stub()    const                      { return _stub;        }
1425
1426  virtual void emit_code(LIR_Assembler* masm);
1427  virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1428  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1429};
1430
1431
1432class LIR_Op3: public LIR_Op {
1433 friend class LIR_OpVisitState;
1434
1435 private:
1436  LIR_Opr _opr1;
1437  LIR_Opr _opr2;
1438  LIR_Opr _opr3;
1439 public:
1440  LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1441    : LIR_Op(code, result, info)
1442    , _opr1(opr1)
1443    , _opr2(opr2)
1444    , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1445  LIR_Opr in_opr1() const                        { return _opr1; }
1446  LIR_Opr in_opr2() const                        { return _opr2; }
1447  LIR_Opr in_opr3() const                        { return _opr3; }
1448
1449  virtual void emit_code(LIR_Assembler* masm);
1450  virtual LIR_Op3* as_Op3() { return this; }
1451  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1452};
1453
1454
1455//--------------------------------
1456class LabelObj: public CompilationResourceObj {
1457 private:
1458  Label _label;
1459 public:
1460  LabelObj()                                     {}
1461  Label* label()                                 { return &_label; }
1462};
1463
1464
1465class LIR_OpLock: public LIR_Op {
1466 friend class LIR_OpVisitState;
1467
1468 private:
1469  LIR_Opr _hdr;
1470  LIR_Opr _obj;
1471  LIR_Opr _lock;
1472  LIR_Opr _scratch;
1473  CodeStub* _stub;
1474 public:
1475  LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1476    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1477    , _hdr(hdr)
1478    , _obj(obj)
1479    , _lock(lock)
1480    , _scratch(scratch)
1481    , _stub(stub)                      {}
1482
1483  LIR_Opr hdr_opr() const                        { return _hdr; }
1484  LIR_Opr obj_opr() const                        { return _obj; }
1485  LIR_Opr lock_opr() const                       { return _lock; }
1486  LIR_Opr scratch_opr() const                    { return _scratch; }
1487  CodeStub* stub() const                         { return _stub; }
1488
1489  virtual void emit_code(LIR_Assembler* masm);
1490  virtual LIR_OpLock* as_OpLock() { return this; }
1491  void print_instr(outputStream* out) const PRODUCT_RETURN;
1492};
1493
1494
1495class LIR_OpDelay: public LIR_Op {
1496 friend class LIR_OpVisitState;
1497
1498 private:
1499  LIR_Op* _op;
1500
1501 public:
1502  LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1503    LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1504    _op(op) {
1505    assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1506  }
1507  virtual void emit_code(LIR_Assembler* masm);
1508  virtual LIR_OpDelay* as_OpDelay() { return this; }
1509  void print_instr(outputStream* out) const PRODUCT_RETURN;
1510  LIR_Op* delay_op() const { return _op; }
1511  CodeEmitInfo* call_info() const { return info(); }
1512};
1513
1514
1515// LIR_OpCompareAndSwap
1516class LIR_OpCompareAndSwap : public LIR_Op {
1517 friend class LIR_OpVisitState;
1518
1519 private:
1520  LIR_Opr _addr;
1521  LIR_Opr _cmp_value;
1522  LIR_Opr _new_value;
1523  LIR_Opr _tmp1;
1524  LIR_Opr _tmp2;
1525
1526 public:
1527  LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2)
1528    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1529    , _addr(addr)
1530    , _cmp_value(cmp_value)
1531    , _new_value(new_value)
1532    , _tmp1(t1)
1533    , _tmp2(t2)                                  { }
1534
1535  LIR_Opr addr()        const                    { return _addr;  }
1536  LIR_Opr cmp_value()   const                    { return _cmp_value; }
1537  LIR_Opr new_value()   const                    { return _new_value; }
1538  LIR_Opr tmp1()        const                    { return _tmp1;      }
1539  LIR_Opr tmp2()        const                    { return _tmp2;      }
1540
1541  virtual void emit_code(LIR_Assembler* masm);
1542  virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1543  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1544};
1545
1546// LIR_OpProfileCall
1547class LIR_OpProfileCall : public LIR_Op {
1548 friend class LIR_OpVisitState;
1549
1550 private:
1551  ciMethod* _profiled_method;
1552  int _profiled_bci;
1553  LIR_Opr _mdo;
1554  LIR_Opr _recv;
1555  LIR_Opr _tmp1;
1556  ciKlass* _known_holder;
1557
1558 public:
1559  // Destroys recv
1560  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1561    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1562    , _profiled_method(profiled_method)
1563    , _profiled_bci(profiled_bci)
1564    , _mdo(mdo)
1565    , _recv(recv)
1566    , _tmp1(t1)
1567    , _known_holder(known_holder)                { }
1568
1569  ciMethod* profiled_method() const              { return _profiled_method;  }
1570  int       profiled_bci()    const              { return _profiled_bci;     }
1571  LIR_Opr   mdo()             const              { return _mdo;              }
1572  LIR_Opr   recv()            const              { return _recv;             }
1573  LIR_Opr   tmp1()            const              { return _tmp1;             }
1574  ciKlass*  known_holder()    const              { return _known_holder;     }
1575
1576  virtual void emit_code(LIR_Assembler* masm);
1577  virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1578  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1579};
1580
1581
1582class LIR_InsertionBuffer;
1583
1584//--------------------------------LIR_List---------------------------------------------------
1585// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1586// The LIR instructions are appended by the LIR_List class itself;
1587//
1588// Notes:
1589// - all offsets are(should be) in bytes
1590// - local positions are specified with an offset, with offset 0 being local 0
1591
1592class LIR_List: public CompilationResourceObj {
1593 private:
1594  LIR_OpList  _operations;
1595
1596  Compilation*  _compilation;
1597#ifndef PRODUCT
1598  BlockBegin*   _block;
1599#endif
1600#ifdef ASSERT
1601  const char *  _file;
1602  int           _line;
1603#endif
1604
1605  void append(LIR_Op* op) {
1606    if (op->source() == NULL)
1607      op->set_source(_compilation->current_instruction());
1608#ifndef PRODUCT
1609    if (PrintIRWithLIR) {
1610      _compilation->maybe_print_current_instruction();
1611      op->print(); tty->cr();
1612    }
1613#endif // PRODUCT
1614
1615    _operations.append(op);
1616
1617#ifdef ASSERT
1618    op->verify();
1619    op->set_file_and_line(_file, _line);
1620    _file = NULL;
1621    _line = 0;
1622#endif
1623  }
1624
1625 public:
1626  LIR_List(Compilation* compilation, BlockBegin* block = NULL);
1627
1628#ifdef ASSERT
1629  void set_file_and_line(const char * file, int line);
1630#endif
1631
1632  //---------- accessors ---------------
1633  LIR_OpList* instructions_list()                { return &_operations; }
1634  int         length() const                     { return _operations.length(); }
1635  LIR_Op*     at(int i) const                    { return _operations.at(i); }
1636
1637  NOT_PRODUCT(BlockBegin* block() const          { return _block; });
1638
1639  // insert LIR_Ops in buffer to right places in LIR_List
1640  void append(LIR_InsertionBuffer* buffer);
1641
1642  //---------- mutators ---------------
1643  void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
1644  void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
1645
1646  //---------- printing -------------
1647  void print_instructions() PRODUCT_RETURN;
1648
1649
1650  //---------- instructions -------------
1651  void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1652                        address dest, LIR_OprList* arguments,
1653                        CodeEmitInfo* info) {
1654    append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
1655  }
1656  void call_static(ciMethod* method, LIR_Opr result,
1657                   address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1658    append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
1659  }
1660  void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1661                      address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1662    append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
1663  }
1664  void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1665                    intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
1666    append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
1667  }
1668
1669  void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
1670  void word_align()                              { append(new LIR_Op0(lir_word_align)); }
1671  void membar()                                  { append(new LIR_Op0(lir_membar)); }
1672  void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
1673  void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
1674
1675  void nop()                                     { append(new LIR_Op0(lir_nop)); }
1676  void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
1677
1678  void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
1679  void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
1680
1681  void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
1682
1683  void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
1684  void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
1685
1686  // result is a stack location for old backend and vreg for UseLinearScan
1687  // stack_loc_temp is an illegal register for old backend
1688  void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
1689  void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1690  void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1691  void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1692  void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1693  void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1694  void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1695
1696  void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
1697
1698  void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
1699  void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
1700
1701  void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
1702
1703  void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
1704
1705  void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
1706
1707  void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
1708  void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
1709  void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
1710
1711  void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
1712  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
1713  void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
1714
1715  void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1716    append(new LIR_Op2(lir_compare_to,  left, right, dst));
1717  }
1718
1719  void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
1720  void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
1721
1722  void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
1723    append(new LIR_Op2(lir_cmp, condition, left, right, info));
1724  }
1725  void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
1726    cmp(condition, left, LIR_OprFact::intConst(right), info);
1727  }
1728
1729  void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
1730  void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
1731
1732  void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst) {
1733    append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
1734  }
1735
1736  void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
1737  void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
1738  void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
1739
1740  void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
1741  void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
1742  void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, tmp, to)); }
1743  void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, tmp, to)); }
1744  void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
1745  void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
1746  void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
1747
1748  void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
1749  void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
1750  void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
1751  void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
1752  void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
1753  void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
1754  void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
1755
1756  void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1757  void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1758
1759  void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
1760
1761  void prefetch(LIR_Address* addr, bool is_store);
1762
1763  void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1764  void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1765  void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
1766  void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1767  void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1768
1769  void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1770  void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1771  void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1772  void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1773
1774  void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
1775  void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
1776
1777  // jump is an unconditional branch
1778  void jump(BlockBegin* block) {
1779    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
1780  }
1781  void jump(CodeStub* stub) {
1782    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
1783  }
1784  void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
1785  void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
1786    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
1787    append(new LIR_OpBranch(cond, type, block));
1788  }
1789  void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
1790    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
1791    append(new LIR_OpBranch(cond, type, stub));
1792  }
1793  void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
1794    assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
1795    append(new LIR_OpBranch(cond, type, block, unordered));
1796  }
1797
1798  void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
1799  void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
1800  void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
1801
1802  void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
1803  void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
1804  void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
1805
1806  void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
1807  void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
1808
1809  void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
1810    append(new LIR_OpRTCall(routine, tmp, result, arguments));
1811  }
1812
1813  void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
1814                    LIR_OprList* arguments, CodeEmitInfo* info) {
1815    append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
1816  }
1817
1818  void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
1819  void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, CodeStub* stub);
1820  void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
1821
1822  void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
1823  void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
1824  void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
1825
1826  void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
1827
1828  void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
1829
1830  void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
1831                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1832                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
1833                  ciMethod* profiled_method, int profiled_bci);
1834  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
1835  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1836
1837  // methodDataOop profiling
1838  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
1839};
1840
1841void print_LIR(BlockList* blocks);
1842
1843class LIR_InsertionBuffer : public CompilationResourceObj {
1844 private:
1845  LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
1846
1847  // list of insertion points. index and count are stored alternately:
1848  // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
1849  // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
1850  intStack    _index_and_count;
1851
1852  // the LIR_Ops to be inserted
1853  LIR_OpList  _ops;
1854
1855  void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
1856  void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
1857  void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
1858
1859#ifdef ASSERT
1860  void verify();
1861#endif
1862 public:
1863  LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
1864
1865  // must be called before using the insertion buffer
1866  void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
1867  bool initialized() const  { return _lir != NULL; }
1868  // called automatically when the buffer is appended to the LIR_List
1869  void finish()             { _lir = NULL; }
1870
1871  // accessors
1872  LIR_List*  lir_list() const             { return _lir; }
1873  int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
1874  int index_at(int i) const               { return _index_and_count.at((i << 1));     }
1875  int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
1876
1877  int number_of_ops() const               { return _ops.length(); }
1878  LIR_Op* op_at(int i) const              { return _ops.at(i); }
1879
1880  // append an instruction to the buffer
1881  void append(int index, LIR_Op* op);
1882
1883  // instruction
1884  void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1885};
1886
1887
1888//
1889// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
1890// Calling a LIR_Op's visit function with a LIR_OpVisitState causes
1891// information about the input, output and temporaries used by the
1892// op to be recorded.  It also records whether the op has call semantics
1893// and also records all the CodeEmitInfos used by this op.
1894//
1895
1896
1897class LIR_OpVisitState: public StackObj {
1898 public:
1899  typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
1900
1901  enum {
1902    maxNumberOfOperands = 14,
1903    maxNumberOfInfos = 4
1904  };
1905
1906 private:
1907  LIR_Op*          _op;
1908
1909  // optimization: the operands and infos are not stored in a variable-length
1910  //               list, but in a fixed-size array to save time of size checks and resizing
1911  int              _oprs_len[numModes];
1912  LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
1913  int _info_len;
1914  CodeEmitInfo*    _info_new[maxNumberOfInfos];
1915
1916  bool             _has_call;
1917  bool             _has_slow_case;
1918
1919
1920  // only include register operands
1921  // addresses are decomposed to the base and index registers
1922  // constants and stack operands are ignored
1923  void append(LIR_Opr& opr, OprMode mode) {
1924    assert(opr->is_valid(), "should not call this otherwise");
1925    assert(mode >= 0 && mode < numModes, "bad mode");
1926
1927    if (opr->is_register()) {
1928       assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
1929      _oprs_new[mode][_oprs_len[mode]++] = &opr;
1930
1931    } else if (opr->is_pointer()) {
1932      LIR_Address* address = opr->as_address_ptr();
1933      if (address != NULL) {
1934        // special handling for addresses: add base and index register of the address
1935        // both are always input operands!
1936        if (address->_base->is_valid()) {
1937          assert(address->_base->is_register(), "must be");
1938          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
1939          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
1940        }
1941        if (address->_index->is_valid()) {
1942          assert(address->_index->is_register(), "must be");
1943          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
1944          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
1945        }
1946
1947      } else {
1948        assert(opr->is_constant(), "constant operands are not processed");
1949      }
1950    } else {
1951      assert(opr->is_stack(), "stack operands are not processed");
1952    }
1953  }
1954
1955  void append(CodeEmitInfo* info) {
1956    assert(info != NULL, "should not call this otherwise");
1957    assert(_info_len < maxNumberOfInfos, "array overflow");
1958    _info_new[_info_len++] = info;
1959  }
1960
1961 public:
1962  LIR_OpVisitState()         { reset(); }
1963
1964  LIR_Op* op() const         { return _op; }
1965  void set_op(LIR_Op* op)    { reset(); _op = op; }
1966
1967  bool has_call() const      { return _has_call; }
1968  bool has_slow_case() const { return _has_slow_case; }
1969
1970  void reset() {
1971    _op = NULL;
1972    _has_call = false;
1973    _has_slow_case = false;
1974
1975    _oprs_len[inputMode] = 0;
1976    _oprs_len[tempMode] = 0;
1977    _oprs_len[outputMode] = 0;
1978    _info_len = 0;
1979  }
1980
1981
1982  int opr_count(OprMode mode) const {
1983    assert(mode >= 0 && mode < numModes, "bad mode");
1984    return _oprs_len[mode];
1985  }
1986
1987  LIR_Opr opr_at(OprMode mode, int index) const {
1988    assert(mode >= 0 && mode < numModes, "bad mode");
1989    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
1990    return *_oprs_new[mode][index];
1991  }
1992
1993  void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
1994    assert(mode >= 0 && mode < numModes, "bad mode");
1995    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
1996    *_oprs_new[mode][index] = opr;
1997  }
1998
1999  int info_count() const {
2000    return _info_len;
2001  }
2002
2003  CodeEmitInfo* info_at(int index) const {
2004    assert(index < _info_len, "index out of bounds");
2005    return _info_new[index];
2006  }
2007
2008  XHandlers* all_xhandler();
2009
2010  // collects all register operands of the instruction
2011  void visit(LIR_Op* op);
2012
2013#if ASSERT
2014  // check that an operation has no operands
2015  bool no_operands(LIR_Op* op);
2016#endif
2017
2018  // LIR_Op visitor functions use these to fill in the state
2019  void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2020  void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2021  void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2022  void do_info(CodeEmitInfo* info)        { append(info); }
2023
2024  void do_stub(CodeStub* stub);
2025  void do_call()                          { _has_call = true; }
2026  void do_slow_case()                     { _has_slow_case = true; }
2027  void do_slow_case(CodeEmitInfo* info) {
2028    _has_slow_case = true;
2029    append(info);
2030  }
2031};
2032
2033
2034inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2035