c1_LIR.hpp revision 1601:126ea7725993
1/*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25class BlockBegin;
26class BlockList;
27class LIR_Assembler;
28class CodeEmitInfo;
29class CodeStub;
30class CodeStubList;
31class ArrayCopyStub;
32class LIR_Op;
33class ciType;
34class ValueType;
35class LIR_OpVisitState;
36class FpuStackSim;
37
38//---------------------------------------------------------------------
39//                 LIR Operands
40//  LIR_OprDesc
41//    LIR_OprPtr
42//      LIR_Const
43//      LIR_Address
44//---------------------------------------------------------------------
45class LIR_OprDesc;
46class LIR_OprPtr;
47class LIR_Const;
48class LIR_Address;
49class LIR_OprVisitor;
50
51
52typedef LIR_OprDesc* LIR_Opr;
53typedef int          RegNr;
54
55define_array(LIR_OprArray, LIR_Opr)
56define_stack(LIR_OprList, LIR_OprArray)
57
58define_array(LIR_OprRefArray, LIR_Opr*)
59define_stack(LIR_OprRefList, LIR_OprRefArray)
60
61define_array(CodeEmitInfoArray, CodeEmitInfo*)
62define_stack(CodeEmitInfoList, CodeEmitInfoArray)
63
64define_array(LIR_OpArray, LIR_Op*)
65define_stack(LIR_OpList, LIR_OpArray)
66
67// define LIR_OprPtr early so LIR_OprDesc can refer to it
68class LIR_OprPtr: public CompilationResourceObj {
69 public:
70  bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
71  bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
72
73  virtual LIR_Const*  as_constant()              { return NULL; }
74  virtual LIR_Address* as_address()              { return NULL; }
75  virtual BasicType type() const                 = 0;
76  virtual void print_value_on(outputStream* out) const = 0;
77};
78
79
80
81// LIR constants
82class LIR_Const: public LIR_OprPtr {
83 private:
84  JavaValue _value;
85
86  void type_check(BasicType t) const   { assert(type() == t, "type check"); }
87  void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
88  void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
89
90 public:
91  LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
92  LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
93  LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
94  LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
95  LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
96  LIR_Const(void* p) {
97#ifdef _LP64
98    assert(sizeof(jlong) >= sizeof(p), "too small");;
99    _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
100#else
101    assert(sizeof(jint) >= sizeof(p), "too small");;
102    _value.set_type(T_INT);     _value.set_jint((jint)p);
103#endif
104  }
105
106  virtual BasicType type()       const { return _value.get_type(); }
107  virtual LIR_Const* as_constant()     { return this; }
108
109  jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
110  jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
111  jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
112  jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
113  jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
114  jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
115  jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
116
117#ifdef _LP64
118  address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
119#else
120  address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
121#endif
122
123
124  jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
125  jint      as_jint_lo_bits() const    {
126    if (type() == T_DOUBLE) {
127      return low(jlong_cast(_value.get_jdouble()));
128    } else {
129      return as_jint_lo();
130    }
131  }
132  jint      as_jint_hi_bits() const    {
133    if (type() == T_DOUBLE) {
134      return high(jlong_cast(_value.get_jdouble()));
135    } else {
136      return as_jint_hi();
137    }
138  }
139  jlong      as_jlong_bits() const    {
140    if (type() == T_DOUBLE) {
141      return jlong_cast(_value.get_jdouble());
142    } else {
143      return as_jlong();
144    }
145  }
146
147  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
148
149
150  bool is_zero_float() {
151    jfloat f = as_jfloat();
152    jfloat ok = 0.0f;
153    return jint_cast(f) == jint_cast(ok);
154  }
155
156  bool is_one_float() {
157    jfloat f = as_jfloat();
158    return !g_isnan(f) && g_isfinite(f) && f == 1.0;
159  }
160
161  bool is_zero_double() {
162    jdouble d = as_jdouble();
163    jdouble ok = 0.0;
164    return jlong_cast(d) == jlong_cast(ok);
165  }
166
167  bool is_one_double() {
168    jdouble d = as_jdouble();
169    return !g_isnan(d) && g_isfinite(d) && d == 1.0;
170  }
171};
172
173
174//---------------------LIR Operand descriptor------------------------------------
175//
176// The class LIR_OprDesc represents a LIR instruction operand;
177// it can be a register (ALU/FPU), stack location or a constant;
178// Constants and addresses are represented as resource area allocated
179// structures (see above).
180// Registers and stack locations are inlined into the this pointer
181// (see value function).
182
183class LIR_OprDesc: public CompilationResourceObj {
184 public:
185  // value structure:
186  //     data       opr-type opr-kind
187  // +--------------+-------+-------+
188  // [max...........|7 6 5 4|3 2 1 0]
189  //                             ^
190  //                    is_pointer bit
191  //
192  // lowest bit cleared, means it is a structure pointer
193  // we need  4 bits to represent types
194
195 private:
196  friend class LIR_OprFact;
197
198  // Conversion
199  intptr_t value() const                         { return (intptr_t) this; }
200
201  bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
202    return (value() & mask) == masked_value;
203  }
204
205  enum OprKind {
206      pointer_value      = 0
207    , stack_value        = 1
208    , cpu_register       = 3
209    , fpu_register       = 5
210    , illegal_value      = 7
211  };
212
213  enum OprBits {
214      pointer_bits   = 1
215    , kind_bits      = 3
216    , type_bits      = 4
217    , size_bits      = 2
218    , destroys_bits  = 1
219    , virtual_bits   = 1
220    , is_xmm_bits    = 1
221    , last_use_bits  = 1
222    , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
223    , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
224                       is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
225    , data_bits      = BitsPerInt - non_data_bits
226    , reg_bits       = data_bits / 2      // for two registers in one value encoding
227  };
228
229  enum OprShift {
230      kind_shift     = 0
231    , type_shift     = kind_shift     + kind_bits
232    , size_shift     = type_shift     + type_bits
233    , destroys_shift = size_shift     + size_bits
234    , last_use_shift = destroys_shift + destroys_bits
235    , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
236    , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
237    , is_xmm_shift   = virtual_shift + virtual_bits
238    , data_shift     = is_xmm_shift + is_xmm_bits
239    , reg1_shift = data_shift
240    , reg2_shift = data_shift + reg_bits
241
242  };
243
244  enum OprSize {
245      single_size = 0 << size_shift
246    , double_size = 1 << size_shift
247  };
248
249  enum OprMask {
250      kind_mask      = right_n_bits(kind_bits)
251    , type_mask      = right_n_bits(type_bits) << type_shift
252    , size_mask      = right_n_bits(size_bits) << size_shift
253    , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
254    , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
255    , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
256    , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
257    , pointer_mask   = right_n_bits(pointer_bits)
258    , lower_reg_mask = right_n_bits(reg_bits)
259    , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
260  };
261
262  uintptr_t data() const                         { return value() >> data_shift; }
263  int lo_reg_half() const                        { return data() & lower_reg_mask; }
264  int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
265  OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
266  OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
267
268  static char type_char(BasicType t);
269
270 public:
271  enum {
272    vreg_base = ConcreteRegisterImpl::number_of_registers,
273    vreg_max = (1 << data_bits) - 1
274  };
275
276  static inline LIR_Opr illegalOpr();
277
278  enum OprType {
279      unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
280    , int_type      = 1 << type_shift
281    , long_type     = 2 << type_shift
282    , object_type   = 3 << type_shift
283    , pointer_type  = 4 << type_shift
284    , float_type    = 5 << type_shift
285    , double_type   = 6 << type_shift
286  };
287  friend OprType as_OprType(BasicType t);
288  friend BasicType as_BasicType(OprType t);
289
290  OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
291  OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
292
293  static OprSize size_for(BasicType t) {
294    switch (t) {
295      case T_LONG:
296      case T_DOUBLE:
297        return double_size;
298        break;
299
300      case T_FLOAT:
301      case T_BOOLEAN:
302      case T_CHAR:
303      case T_BYTE:
304      case T_SHORT:
305      case T_INT:
306      case T_OBJECT:
307      case T_ARRAY:
308        return single_size;
309        break;
310
311      default:
312        ShouldNotReachHere();
313        return single_size;
314      }
315  }
316
317
318  void validate_type() const PRODUCT_RETURN;
319
320  BasicType type() const {
321    if (is_pointer()) {
322      return pointer()->type();
323    }
324    return as_BasicType(type_field());
325  }
326
327
328  ValueType* value_type() const                  { return as_ValueType(type()); }
329
330  char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
331
332  bool is_equal(LIR_Opr opr) const         { return this == opr; }
333  // checks whether types are same
334  bool is_same_type(LIR_Opr opr) const     {
335    assert(type_field() != unknown_type &&
336           opr->type_field() != unknown_type, "shouldn't see unknown_type");
337    return type_field() == opr->type_field();
338  }
339  bool is_same_register(LIR_Opr opr) {
340    return (is_register() && opr->is_register() &&
341            kind_field() == opr->kind_field() &&
342            (value() & no_type_mask) == (opr->value() & no_type_mask));
343  }
344
345  bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
346  bool is_illegal() const      { return kind_field() == illegal_value; }
347  bool is_valid() const        { return kind_field() != illegal_value; }
348
349  bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
350  bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
351
352  bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
353  bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
354
355  bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
356  bool is_oop() const;
357
358  // semantic for fpu- and xmm-registers:
359  // * is_float and is_double return true for xmm_registers
360  //   (so is_single_fpu and is_single_xmm are true)
361  // * So you must always check for is_???_xmm prior to is_???_fpu to
362  //   distinguish between fpu- and xmm-registers
363
364  bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
365  bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
366  bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
367
368  bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
369  bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
370  bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
371  bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
372  bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
373
374  bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
375  bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
376  bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
377  bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
378  bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
379
380  bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
381  bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
382  bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
383
384  // fast accessor functions for special bits that do not work for pointers
385  // (in this functions, the check for is_pointer() is omitted)
386  bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
387  bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
388  bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
389  bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
390  BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
391
392  bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
393  bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
394  LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
395  LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
396
397
398  int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
399  int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
400  RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
401  RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
402  RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
403  RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
404  RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
405  RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
406  RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
407  RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
408  RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
409  int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
410
411  LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
412  LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
413  LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
414
415  Register as_register()    const;
416  Register as_register_lo() const;
417  Register as_register_hi() const;
418
419  Register as_pointer_register() {
420#ifdef _LP64
421    if (is_double_cpu()) {
422      assert(as_register_lo() == as_register_hi(), "should be a single register");
423      return as_register_lo();
424    }
425#endif
426    return as_register();
427  }
428
429#ifdef X86
430  XMMRegister as_xmm_float_reg() const;
431  XMMRegister as_xmm_double_reg() const;
432  // for compatibility with RInfo
433  int fpu () const                                  { return lo_reg_half(); }
434#endif // X86
435#if defined(SPARC) || defined(ARM) || defined(PPC)
436  FloatRegister as_float_reg   () const;
437  FloatRegister as_double_reg  () const;
438#endif
439
440  jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
441  jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
442  jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
443  jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
444  jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
445
446  void print() const PRODUCT_RETURN;
447  void print(outputStream* out) const PRODUCT_RETURN;
448};
449
450
451inline LIR_OprDesc::OprType as_OprType(BasicType type) {
452  switch (type) {
453  case T_INT:      return LIR_OprDesc::int_type;
454  case T_LONG:     return LIR_OprDesc::long_type;
455  case T_FLOAT:    return LIR_OprDesc::float_type;
456  case T_DOUBLE:   return LIR_OprDesc::double_type;
457  case T_OBJECT:
458  case T_ARRAY:    return LIR_OprDesc::object_type;
459  case T_ILLEGAL:  // fall through
460  default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
461  }
462}
463
464inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
465  switch (t) {
466  case LIR_OprDesc::int_type:     return T_INT;
467  case LIR_OprDesc::long_type:    return T_LONG;
468  case LIR_OprDesc::float_type:   return T_FLOAT;
469  case LIR_OprDesc::double_type:  return T_DOUBLE;
470  case LIR_OprDesc::object_type:  return T_OBJECT;
471  case LIR_OprDesc::unknown_type: // fall through
472  default: ShouldNotReachHere();  return T_ILLEGAL;
473  }
474}
475
476
477// LIR_Address
478class LIR_Address: public LIR_OprPtr {
479 friend class LIR_OpVisitState;
480
481 public:
482  // NOTE: currently these must be the log2 of the scale factor (and
483  // must also be equivalent to the ScaleFactor enum in
484  // assembler_i486.hpp)
485  enum Scale {
486    times_1  =  0,
487    times_2  =  1,
488    times_4  =  2,
489    times_8  =  3
490  };
491
492 private:
493  LIR_Opr   _base;
494  LIR_Opr   _index;
495  Scale     _scale;
496  intx      _disp;
497  BasicType _type;
498
499 public:
500  LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
501       _base(base)
502     , _index(index)
503     , _scale(times_1)
504     , _type(type)
505     , _disp(0) { verify(); }
506
507  LIR_Address(LIR_Opr base, intx disp, BasicType type):
508       _base(base)
509     , _index(LIR_OprDesc::illegalOpr())
510     , _scale(times_1)
511     , _type(type)
512     , _disp(disp) { verify(); }
513
514  LIR_Address(LIR_Opr base, BasicType type):
515       _base(base)
516     , _index(LIR_OprDesc::illegalOpr())
517     , _scale(times_1)
518     , _type(type)
519     , _disp(0) { verify(); }
520
521#if defined(X86) || defined(ARM)
522  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
523       _base(base)
524     , _index(index)
525     , _scale(scale)
526     , _type(type)
527     , _disp(disp) { verify(); }
528#endif // X86 || ARM
529
530  LIR_Opr base()  const                          { return _base;  }
531  LIR_Opr index() const                          { return _index; }
532  Scale   scale() const                          { return _scale; }
533  intx    disp()  const                          { return _disp;  }
534
535  bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
536
537  virtual LIR_Address* as_address()              { return this;   }
538  virtual BasicType type() const                 { return _type; }
539  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
540
541  void verify() const PRODUCT_RETURN;
542
543  static Scale scale(BasicType type);
544};
545
546
547// operand factory
548class LIR_OprFact: public AllStatic {
549 public:
550
551  static LIR_Opr illegalOpr;
552
553  static LIR_Opr single_cpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
554  static LIR_Opr single_cpu_oop(int reg)        { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
555  static LIR_Opr double_cpu(int reg1, int reg2) {
556    LP64_ONLY(assert(reg1 == reg2, "must be identical"));
557    return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
558                               (reg2 << LIR_OprDesc::reg2_shift) |
559                               LIR_OprDesc::long_type            |
560                               LIR_OprDesc::cpu_register         |
561                               LIR_OprDesc::double_size);
562  }
563
564  static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
565                                                                             LIR_OprDesc::float_type           |
566                                                                             LIR_OprDesc::fpu_register         |
567                                                                             LIR_OprDesc::single_size); }
568#if defined(ARM)
569  static LIR_Opr double_fpu(int reg1, int reg2)    { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
570  static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
571  static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
572#endif
573#ifdef SPARC
574  static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
575                                                                             (reg2 << LIR_OprDesc::reg2_shift) |
576                                                                             LIR_OprDesc::double_type          |
577                                                                             LIR_OprDesc::fpu_register         |
578                                                                             LIR_OprDesc::double_size); }
579#endif
580#ifdef X86
581  static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
582                                                                             (reg  << LIR_OprDesc::reg2_shift) |
583                                                                             LIR_OprDesc::double_type          |
584                                                                             LIR_OprDesc::fpu_register         |
585                                                                             LIR_OprDesc::double_size); }
586
587  static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
588                                                                             LIR_OprDesc::float_type           |
589                                                                             LIR_OprDesc::fpu_register         |
590                                                                             LIR_OprDesc::single_size          |
591                                                                             LIR_OprDesc::is_xmm_mask); }
592  static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
593                                                                             (reg  << LIR_OprDesc::reg2_shift) |
594                                                                             LIR_OprDesc::double_type          |
595                                                                             LIR_OprDesc::fpu_register         |
596                                                                             LIR_OprDesc::double_size          |
597                                                                             LIR_OprDesc::is_xmm_mask); }
598#endif // X86
599#ifdef PPC
600  static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
601                                                                             (reg  << LIR_OprDesc::reg2_shift) |
602                                                                             LIR_OprDesc::double_type          |
603                                                                             LIR_OprDesc::fpu_register         |
604                                                                             LIR_OprDesc::double_size); }
605  static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift)        |
606                                                                             LIR_OprDesc::float_type           |
607                                                                             LIR_OprDesc::cpu_register         |
608                                                                             LIR_OprDesc::single_size); }
609  static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift)        |
610                                                                             (reg1 << LIR_OprDesc::reg2_shift) |
611                                                                             LIR_OprDesc::double_type          |
612                                                                             LIR_OprDesc::cpu_register         |
613                                                                             LIR_OprDesc::double_size); }
614#endif // PPC
615
616  static LIR_Opr virtual_register(int index, BasicType type) {
617    LIR_Opr res;
618    switch (type) {
619      case T_OBJECT: // fall through
620      case T_ARRAY:
621        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
622                                            LIR_OprDesc::object_type  |
623                                            LIR_OprDesc::cpu_register |
624                                            LIR_OprDesc::single_size  |
625                                            LIR_OprDesc::virtual_mask);
626        break;
627
628      case T_INT:
629        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
630                                  LIR_OprDesc::int_type              |
631                                  LIR_OprDesc::cpu_register          |
632                                  LIR_OprDesc::single_size           |
633                                  LIR_OprDesc::virtual_mask);
634        break;
635
636      case T_LONG:
637        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
638                                  LIR_OprDesc::long_type             |
639                                  LIR_OprDesc::cpu_register          |
640                                  LIR_OprDesc::double_size           |
641                                  LIR_OprDesc::virtual_mask);
642        break;
643
644#ifdef __SOFTFP__
645      case T_FLOAT:
646        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
647                                  LIR_OprDesc::float_type  |
648                                  LIR_OprDesc::cpu_register |
649                                  LIR_OprDesc::single_size |
650                                  LIR_OprDesc::virtual_mask);
651        break;
652      case T_DOUBLE:
653        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
654                                  LIR_OprDesc::double_type |
655                                  LIR_OprDesc::cpu_register |
656                                  LIR_OprDesc::double_size |
657                                  LIR_OprDesc::virtual_mask);
658        break;
659#else // __SOFTFP__
660      case T_FLOAT:
661        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
662                                  LIR_OprDesc::float_type           |
663                                  LIR_OprDesc::fpu_register         |
664                                  LIR_OprDesc::single_size          |
665                                  LIR_OprDesc::virtual_mask);
666        break;
667
668      case
669        T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
670                                            LIR_OprDesc::double_type           |
671                                            LIR_OprDesc::fpu_register          |
672                                            LIR_OprDesc::double_size           |
673                                            LIR_OprDesc::virtual_mask);
674        break;
675#endif // __SOFTFP__
676      default:       ShouldNotReachHere(); res = illegalOpr;
677    }
678
679#ifdef ASSERT
680    res->validate_type();
681    assert(res->vreg_number() == index, "conversion check");
682    assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
683    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
684
685    // old-style calculation; check if old and new method are equal
686    LIR_OprDesc::OprType t = as_OprType(type);
687#ifdef __SOFTFP__
688    LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
689                               t |
690                               LIR_OprDesc::cpu_register |
691                               LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
692#else // __SOFTFP__
693    LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
694                                          ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
695                               LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
696    assert(res == old_res, "old and new method not equal");
697#endif // __SOFTFP__
698#endif // ASSERT
699
700    return res;
701  }
702
703  // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
704  // the index is platform independent; a double stack useing indeces 2 and 3 has always
705  // index 2.
706  static LIR_Opr stack(int index, BasicType type) {
707    LIR_Opr res;
708    switch (type) {
709      case T_OBJECT: // fall through
710      case T_ARRAY:
711        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
712                                  LIR_OprDesc::object_type           |
713                                  LIR_OprDesc::stack_value           |
714                                  LIR_OprDesc::single_size);
715        break;
716
717      case T_INT:
718        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
719                                  LIR_OprDesc::int_type              |
720                                  LIR_OprDesc::stack_value           |
721                                  LIR_OprDesc::single_size);
722        break;
723
724      case T_LONG:
725        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
726                                  LIR_OprDesc::long_type             |
727                                  LIR_OprDesc::stack_value           |
728                                  LIR_OprDesc::double_size);
729        break;
730
731      case T_FLOAT:
732        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
733                                  LIR_OprDesc::float_type            |
734                                  LIR_OprDesc::stack_value           |
735                                  LIR_OprDesc::single_size);
736        break;
737      case T_DOUBLE:
738        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
739                                  LIR_OprDesc::double_type           |
740                                  LIR_OprDesc::stack_value           |
741                                  LIR_OprDesc::double_size);
742        break;
743
744      default:       ShouldNotReachHere(); res = illegalOpr;
745    }
746
747#ifdef ASSERT
748    assert(index >= 0, "index must be positive");
749    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
750
751    LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
752                                          LIR_OprDesc::stack_value           |
753                                          as_OprType(type)                   |
754                                          LIR_OprDesc::size_for(type));
755    assert(res == old_res, "old and new method not equal");
756#endif
757
758    return res;
759  }
760
761  static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
762  static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
763  static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
764  static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
765  static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
766  static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
767  static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
768  static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
769  static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
770  static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
771
772  static LIR_Opr value_type(ValueType* type);
773  static LIR_Opr dummy_value_type(ValueType* type);
774};
775
776
777//-------------------------------------------------------------------------------
778//                   LIR Instructions
779//-------------------------------------------------------------------------------
780//
781// Note:
782//  - every instruction has a result operand
783//  - every instruction has an CodeEmitInfo operand (can be revisited later)
784//  - every instruction has a LIR_OpCode operand
785//  - LIR_OpN, means an instruction that has N input operands
786//
787// class hierarchy:
788//
789class  LIR_Op;
790class    LIR_Op0;
791class      LIR_OpLabel;
792class    LIR_Op1;
793class      LIR_OpBranch;
794class      LIR_OpConvert;
795class      LIR_OpAllocObj;
796class      LIR_OpRoundFP;
797class    LIR_Op2;
798class    LIR_OpDelay;
799class    LIR_Op3;
800class      LIR_OpAllocArray;
801class    LIR_OpCall;
802class      LIR_OpJavaCall;
803class      LIR_OpRTCall;
804class    LIR_OpArrayCopy;
805class    LIR_OpLock;
806class    LIR_OpTypeCheck;
807class    LIR_OpCompareAndSwap;
808class    LIR_OpProfileCall;
809
810
811// LIR operation codes
812enum LIR_Code {
813    lir_none
814  , begin_op0
815      , lir_word_align
816      , lir_label
817      , lir_nop
818      , lir_backwardbranch_target
819      , lir_std_entry
820      , lir_osr_entry
821      , lir_build_frame
822      , lir_fpop_raw
823      , lir_24bit_FPU
824      , lir_reset_FPU
825      , lir_breakpoint
826      , lir_rtcall
827      , lir_membar
828      , lir_membar_acquire
829      , lir_membar_release
830      , lir_get_thread
831  , end_op0
832  , begin_op1
833      , lir_fxch
834      , lir_fld
835      , lir_ffree
836      , lir_push
837      , lir_pop
838      , lir_null_check
839      , lir_return
840      , lir_leal
841      , lir_neg
842      , lir_branch
843      , lir_cond_float_branch
844      , lir_move
845      , lir_prefetchr
846      , lir_prefetchw
847      , lir_convert
848      , lir_alloc_object
849      , lir_monaddr
850      , lir_roundfp
851      , lir_safepoint
852      , lir_unwind
853  , end_op1
854  , begin_op2
855      , lir_cmp
856      , lir_cmp_l2i
857      , lir_ucmp_fd2i
858      , lir_cmp_fd2i
859      , lir_cmove
860      , lir_add
861      , lir_sub
862      , lir_mul
863      , lir_mul_strictfp
864      , lir_div
865      , lir_div_strictfp
866      , lir_rem
867      , lir_sqrt
868      , lir_abs
869      , lir_sin
870      , lir_cos
871      , lir_tan
872      , lir_log
873      , lir_log10
874      , lir_logic_and
875      , lir_logic_or
876      , lir_logic_xor
877      , lir_shl
878      , lir_shr
879      , lir_ushr
880      , lir_alloc_array
881      , lir_throw
882      , lir_compare_to
883  , end_op2
884  , begin_op3
885      , lir_idiv
886      , lir_irem
887  , end_op3
888  , begin_opJavaCall
889      , lir_static_call
890      , lir_optvirtual_call
891      , lir_icvirtual_call
892      , lir_virtual_call
893      , lir_dynamic_call
894  , end_opJavaCall
895  , begin_opArrayCopy
896      , lir_arraycopy
897  , end_opArrayCopy
898  , begin_opLock
899    , lir_lock
900    , lir_unlock
901  , end_opLock
902  , begin_delay_slot
903    , lir_delay_slot
904  , end_delay_slot
905  , begin_opTypeCheck
906    , lir_instanceof
907    , lir_checkcast
908    , lir_store_check
909  , end_opTypeCheck
910  , begin_opCompareAndSwap
911    , lir_cas_long
912    , lir_cas_obj
913    , lir_cas_int
914  , end_opCompareAndSwap
915  , begin_opMDOProfile
916    , lir_profile_call
917  , end_opMDOProfile
918};
919
920
921enum LIR_Condition {
922    lir_cond_equal
923  , lir_cond_notEqual
924  , lir_cond_less
925  , lir_cond_lessEqual
926  , lir_cond_greaterEqual
927  , lir_cond_greater
928  , lir_cond_belowEqual
929  , lir_cond_aboveEqual
930  , lir_cond_always
931  , lir_cond_unknown = -1
932};
933
934
935enum LIR_PatchCode {
936  lir_patch_none,
937  lir_patch_low,
938  lir_patch_high,
939  lir_patch_normal
940};
941
942
943enum LIR_MoveKind {
944  lir_move_normal,
945  lir_move_volatile,
946  lir_move_unaligned,
947  lir_move_max_flag
948};
949
950
951// --------------------------------------------------
952// LIR_Op
953// --------------------------------------------------
954class LIR_Op: public CompilationResourceObj {
955 friend class LIR_OpVisitState;
956
957#ifdef ASSERT
958 private:
959  const char *  _file;
960  int           _line;
961#endif
962
963 protected:
964  LIR_Opr       _result;
965  unsigned short _code;
966  unsigned short _flags;
967  CodeEmitInfo* _info;
968  int           _id;     // value id for register allocation
969  int           _fpu_pop_count;
970  Instruction*  _source; // for debugging
971
972  static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
973
974 protected:
975  static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
976
977 public:
978  LIR_Op()
979    : _result(LIR_OprFact::illegalOpr)
980    , _code(lir_none)
981    , _flags(0)
982    , _info(NULL)
983#ifdef ASSERT
984    , _file(NULL)
985    , _line(0)
986#endif
987    , _fpu_pop_count(0)
988    , _source(NULL)
989    , _id(-1)                             {}
990
991  LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
992    : _result(result)
993    , _code(code)
994    , _flags(0)
995    , _info(info)
996#ifdef ASSERT
997    , _file(NULL)
998    , _line(0)
999#endif
1000    , _fpu_pop_count(0)
1001    , _source(NULL)
1002    , _id(-1)                             {}
1003
1004  CodeEmitInfo* info() const                  { return _info;   }
1005  LIR_Code code()      const                  { return (LIR_Code)_code;   }
1006  LIR_Opr result_opr() const                  { return _result; }
1007  void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1008
1009#ifdef ASSERT
1010  void set_file_and_line(const char * file, int line) {
1011    _file = file;
1012    _line = line;
1013  }
1014#endif
1015
1016  virtual const char * name() const PRODUCT_RETURN0;
1017
1018  int id()             const                  { return _id;     }
1019  void set_id(int id)                         { _id = id; }
1020
1021  // FPU stack simulation helpers -- only used on Intel
1022  void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1023  int  fpu_pop_count() const                  { return _fpu_pop_count; }
1024  bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1025
1026  Instruction* source() const                 { return _source; }
1027  void set_source(Instruction* ins)           { _source = ins; }
1028
1029  virtual void emit_code(LIR_Assembler* masm) = 0;
1030  virtual void print_instr(outputStream* out) const   = 0;
1031  virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1032
1033  virtual LIR_OpCall* as_OpCall() { return NULL; }
1034  virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1035  virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1036  virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1037  virtual LIR_OpLock* as_OpLock() { return NULL; }
1038  virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1039  virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1040  virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1041  virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1042  virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1043  virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1044  virtual LIR_Op0* as_Op0() { return NULL; }
1045  virtual LIR_Op1* as_Op1() { return NULL; }
1046  virtual LIR_Op2* as_Op2() { return NULL; }
1047  virtual LIR_Op3* as_Op3() { return NULL; }
1048  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1049  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1050  virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1051  virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1052
1053  virtual void verify() const {}
1054};
1055
1056// for calls
1057class LIR_OpCall: public LIR_Op {
1058 friend class LIR_OpVisitState;
1059
1060 protected:
1061  address      _addr;
1062  LIR_OprList* _arguments;
1063 protected:
1064  LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1065             LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1066    : LIR_Op(code, result, info)
1067    , _arguments(arguments)
1068    , _addr(addr) {}
1069
1070 public:
1071  address addr() const                           { return _addr; }
1072  const LIR_OprList* arguments() const           { return _arguments; }
1073  virtual LIR_OpCall* as_OpCall()                { return this; }
1074};
1075
1076
1077// --------------------------------------------------
1078// LIR_OpJavaCall
1079// --------------------------------------------------
1080class LIR_OpJavaCall: public LIR_OpCall {
1081 friend class LIR_OpVisitState;
1082
1083 private:
1084  ciMethod* _method;
1085  LIR_Opr   _receiver;
1086  LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1087
1088 public:
1089  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1090                 LIR_Opr receiver, LIR_Opr result,
1091                 address addr, LIR_OprList* arguments,
1092                 CodeEmitInfo* info)
1093  : LIR_OpCall(code, addr, result, arguments, info)
1094  , _receiver(receiver)
1095  , _method(method)
1096  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1097  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1098
1099  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1100                 LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1101                 LIR_OprList* arguments, CodeEmitInfo* info)
1102  : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1103  , _receiver(receiver)
1104  , _method(method)
1105  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1106  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1107
1108  LIR_Opr receiver() const                       { return _receiver; }
1109  ciMethod* method() const                       { return _method;   }
1110
1111  // JSR 292 support.
1112  bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1113  bool is_method_handle_invoke() const {
1114    return
1115      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
1116      ||
1117      (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
1118       methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
1119  }
1120
1121  intptr_t vtable_offset() const {
1122    assert(_code == lir_virtual_call, "only have vtable for real vcall");
1123    return (intptr_t) addr();
1124  }
1125
1126  virtual void emit_code(LIR_Assembler* masm);
1127  virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1128  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1129};
1130
1131// --------------------------------------------------
1132// LIR_OpLabel
1133// --------------------------------------------------
1134// Location where a branch can continue
1135class LIR_OpLabel: public LIR_Op {
1136 friend class LIR_OpVisitState;
1137
1138 private:
1139  Label* _label;
1140 public:
1141  LIR_OpLabel(Label* lbl)
1142   : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1143   , _label(lbl)                                 {}
1144  Label* label() const                           { return _label; }
1145
1146  virtual void emit_code(LIR_Assembler* masm);
1147  virtual LIR_OpLabel* as_OpLabel() { return this; }
1148  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1149};
1150
1151// LIR_OpArrayCopy
1152class LIR_OpArrayCopy: public LIR_Op {
1153 friend class LIR_OpVisitState;
1154
1155 private:
1156  ArrayCopyStub*  _stub;
1157  LIR_Opr   _src;
1158  LIR_Opr   _src_pos;
1159  LIR_Opr   _dst;
1160  LIR_Opr   _dst_pos;
1161  LIR_Opr   _length;
1162  LIR_Opr   _tmp;
1163  ciArrayKlass* _expected_type;
1164  int       _flags;
1165
1166public:
1167  enum Flags {
1168    src_null_check         = 1 << 0,
1169    dst_null_check         = 1 << 1,
1170    src_pos_positive_check = 1 << 2,
1171    dst_pos_positive_check = 1 << 3,
1172    length_positive_check  = 1 << 4,
1173    src_range_check        = 1 << 5,
1174    dst_range_check        = 1 << 6,
1175    type_check             = 1 << 7,
1176    all_flags              = (1 << 8) - 1
1177  };
1178
1179  LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1180                  ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1181
1182  LIR_Opr src() const                            { return _src; }
1183  LIR_Opr src_pos() const                        { return _src_pos; }
1184  LIR_Opr dst() const                            { return _dst; }
1185  LIR_Opr dst_pos() const                        { return _dst_pos; }
1186  LIR_Opr length() const                         { return _length; }
1187  LIR_Opr tmp() const                            { return _tmp; }
1188  int flags() const                              { return _flags; }
1189  ciArrayKlass* expected_type() const            { return _expected_type; }
1190  ArrayCopyStub* stub() const                    { return _stub; }
1191
1192  virtual void emit_code(LIR_Assembler* masm);
1193  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1194  void print_instr(outputStream* out) const PRODUCT_RETURN;
1195};
1196
1197
1198// --------------------------------------------------
1199// LIR_Op0
1200// --------------------------------------------------
1201class LIR_Op0: public LIR_Op {
1202 friend class LIR_OpVisitState;
1203
1204 public:
1205  LIR_Op0(LIR_Code code)
1206   : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1207  LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1208   : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1209
1210  virtual void emit_code(LIR_Assembler* masm);
1211  virtual LIR_Op0* as_Op0() { return this; }
1212  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1213};
1214
1215
1216// --------------------------------------------------
1217// LIR_Op1
1218// --------------------------------------------------
1219
1220class LIR_Op1: public LIR_Op {
1221 friend class LIR_OpVisitState;
1222
1223 protected:
1224  LIR_Opr         _opr;   // input operand
1225  BasicType       _type;  // Operand types
1226  LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1227
1228  static void print_patch_code(outputStream* out, LIR_PatchCode code);
1229
1230  void set_kind(LIR_MoveKind kind) {
1231    assert(code() == lir_move, "must be");
1232    _flags = kind;
1233  }
1234
1235 public:
1236  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1237    : LIR_Op(code, result, info)
1238    , _opr(opr)
1239    , _patch(patch)
1240    , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1241
1242  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1243    : LIR_Op(code, result, info)
1244    , _opr(opr)
1245    , _patch(patch)
1246    , _type(type)                      {
1247    assert(code == lir_move, "must be");
1248    set_kind(kind);
1249  }
1250
1251  LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1252    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1253    , _opr(opr)
1254    , _patch(lir_patch_none)
1255    , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1256
1257  LIR_Opr in_opr()           const               { return _opr;   }
1258  LIR_PatchCode patch_code() const               { return _patch; }
1259  BasicType type()           const               { return _type;  }
1260
1261  LIR_MoveKind move_kind() const {
1262    assert(code() == lir_move, "must be");
1263    return (LIR_MoveKind)_flags;
1264  }
1265
1266  virtual void emit_code(LIR_Assembler* masm);
1267  virtual LIR_Op1* as_Op1() { return this; }
1268  virtual const char * name() const PRODUCT_RETURN0;
1269
1270  void set_in_opr(LIR_Opr opr) { _opr = opr; }
1271
1272  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1273  virtual void verify() const;
1274};
1275
1276
1277// for runtime calls
1278class LIR_OpRTCall: public LIR_OpCall {
1279 friend class LIR_OpVisitState;
1280
1281 private:
1282  LIR_Opr _tmp;
1283 public:
1284  LIR_OpRTCall(address addr, LIR_Opr tmp,
1285               LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1286    : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1287    , _tmp(tmp) {}
1288
1289  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1290  virtual void emit_code(LIR_Assembler* masm);
1291  virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1292
1293  LIR_Opr tmp() const                            { return _tmp; }
1294
1295  virtual void verify() const;
1296};
1297
1298
1299class LIR_OpBranch: public LIR_Op {
1300 friend class LIR_OpVisitState;
1301
1302 private:
1303  LIR_Condition _cond;
1304  BasicType     _type;
1305  Label*        _label;
1306  BlockBegin*   _block;  // if this is a branch to a block, this is the block
1307  BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1308  CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1309
1310 public:
1311  LIR_OpBranch(LIR_Condition cond, Label* lbl)
1312    : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1313    , _cond(cond)
1314    , _label(lbl)
1315    , _block(NULL)
1316    , _ublock(NULL)
1317    , _stub(NULL) { }
1318
1319  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1320  LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1321
1322  // for unordered comparisons
1323  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1324
1325  LIR_Condition cond()        const              { return _cond;        }
1326  BasicType     type()        const              { return _type;        }
1327  Label*        label()       const              { return _label;       }
1328  BlockBegin*   block()       const              { return _block;       }
1329  BlockBegin*   ublock()      const              { return _ublock;      }
1330  CodeStub*     stub()        const              { return _stub;       }
1331
1332  void          change_block(BlockBegin* b);
1333  void          change_ublock(BlockBegin* b);
1334  void          negate_cond();
1335
1336  virtual void emit_code(LIR_Assembler* masm);
1337  virtual LIR_OpBranch* as_OpBranch() { return this; }
1338  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1339};
1340
1341
1342class ConversionStub;
1343
1344class LIR_OpConvert: public LIR_Op1 {
1345 friend class LIR_OpVisitState;
1346
1347 private:
1348   Bytecodes::Code _bytecode;
1349   ConversionStub* _stub;
1350#ifdef PPC
1351  LIR_Opr _tmp1;
1352  LIR_Opr _tmp2;
1353#endif
1354
1355 public:
1356   LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1357     : LIR_Op1(lir_convert, opr, result)
1358     , _stub(stub)
1359#ifdef PPC
1360     , _tmp1(LIR_OprDesc::illegalOpr())
1361     , _tmp2(LIR_OprDesc::illegalOpr())
1362#endif
1363     , _bytecode(code)                           {}
1364
1365#ifdef PPC
1366   LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
1367                 ,LIR_Opr tmp1, LIR_Opr tmp2)
1368     : LIR_Op1(lir_convert, opr, result)
1369     , _stub(stub)
1370     , _tmp1(tmp1)
1371     , _tmp2(tmp2)
1372     , _bytecode(code)                           {}
1373#endif
1374
1375  Bytecodes::Code bytecode() const               { return _bytecode; }
1376  ConversionStub* stub() const                   { return _stub; }
1377#ifdef PPC
1378  LIR_Opr tmp1() const                           { return _tmp1; }
1379  LIR_Opr tmp2() const                           { return _tmp2; }
1380#endif
1381
1382  virtual void emit_code(LIR_Assembler* masm);
1383  virtual LIR_OpConvert* as_OpConvert() { return this; }
1384  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1385
1386  static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1387};
1388
1389
1390// LIR_OpAllocObj
1391class LIR_OpAllocObj : public LIR_Op1 {
1392 friend class LIR_OpVisitState;
1393
1394 private:
1395  LIR_Opr _tmp1;
1396  LIR_Opr _tmp2;
1397  LIR_Opr _tmp3;
1398  LIR_Opr _tmp4;
1399  int     _hdr_size;
1400  int     _obj_size;
1401  CodeStub* _stub;
1402  bool    _init_check;
1403
1404 public:
1405  LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1406                 LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1407                 int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1408    : LIR_Op1(lir_alloc_object, klass, result)
1409    , _tmp1(t1)
1410    , _tmp2(t2)
1411    , _tmp3(t3)
1412    , _tmp4(t4)
1413    , _hdr_size(hdr_size)
1414    , _obj_size(obj_size)
1415    , _init_check(init_check)
1416    , _stub(stub)                                { }
1417
1418  LIR_Opr klass()        const                   { return in_opr();     }
1419  LIR_Opr obj()          const                   { return result_opr(); }
1420  LIR_Opr tmp1()         const                   { return _tmp1;        }
1421  LIR_Opr tmp2()         const                   { return _tmp2;        }
1422  LIR_Opr tmp3()         const                   { return _tmp3;        }
1423  LIR_Opr tmp4()         const                   { return _tmp4;        }
1424  int     header_size()  const                   { return _hdr_size;    }
1425  int     object_size()  const                   { return _obj_size;    }
1426  bool    init_check()   const                   { return _init_check;  }
1427  CodeStub* stub()       const                   { return _stub;        }
1428
1429  virtual void emit_code(LIR_Assembler* masm);
1430  virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1431  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1432};
1433
1434
1435// LIR_OpRoundFP
1436class LIR_OpRoundFP : public LIR_Op1 {
1437 friend class LIR_OpVisitState;
1438
1439 private:
1440  LIR_Opr _tmp;
1441
1442 public:
1443  LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1444    : LIR_Op1(lir_roundfp, reg, result)
1445    , _tmp(stack_loc_temp) {}
1446
1447  LIR_Opr tmp() const                            { return _tmp; }
1448  virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1449  void print_instr(outputStream* out) const PRODUCT_RETURN;
1450};
1451
1452// LIR_OpTypeCheck
1453class LIR_OpTypeCheck: public LIR_Op {
1454 friend class LIR_OpVisitState;
1455
1456 private:
1457  LIR_Opr       _object;
1458  LIR_Opr       _array;
1459  ciKlass*      _klass;
1460  LIR_Opr       _tmp1;
1461  LIR_Opr       _tmp2;
1462  LIR_Opr       _tmp3;
1463  bool          _fast_check;
1464  CodeEmitInfo* _info_for_patch;
1465  CodeEmitInfo* _info_for_exception;
1466  CodeStub*     _stub;
1467  // Helpers for Tier1UpdateMethodData
1468  ciMethod*     _profiled_method;
1469  int           _profiled_bci;
1470
1471public:
1472  LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1473                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1474                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
1475                  ciMethod* profiled_method, int profiled_bci);
1476  LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1477                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
1478                  ciMethod* profiled_method, int profiled_bci);
1479
1480  LIR_Opr object() const                         { return _object;         }
1481  LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1482  LIR_Opr tmp1() const                           { return _tmp1;           }
1483  LIR_Opr tmp2() const                           { return _tmp2;           }
1484  LIR_Opr tmp3() const                           { return _tmp3;           }
1485  ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1486  bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1487  CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1488  CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1489  CodeStub* stub() const                         { return _stub;           }
1490
1491  // methodDataOop profiling
1492  ciMethod* profiled_method()                    { return _profiled_method; }
1493  int       profiled_bci()                       { return _profiled_bci; }
1494
1495  virtual void emit_code(LIR_Assembler* masm);
1496  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1497  void print_instr(outputStream* out) const PRODUCT_RETURN;
1498};
1499
1500// LIR_Op2
1501class LIR_Op2: public LIR_Op {
1502 friend class LIR_OpVisitState;
1503
1504  int  _fpu_stack_size; // for sin/cos implementation on Intel
1505
1506 protected:
1507  LIR_Opr   _opr1;
1508  LIR_Opr   _opr2;
1509  BasicType _type;
1510  LIR_Opr   _tmp;
1511  LIR_Condition _condition;
1512
1513  void verify() const;
1514
1515 public:
1516  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1517    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1518    , _opr1(opr1)
1519    , _opr2(opr2)
1520    , _type(T_ILLEGAL)
1521    , _condition(condition)
1522    , _fpu_stack_size(0)
1523    , _tmp(LIR_OprFact::illegalOpr) {
1524    assert(code == lir_cmp, "code check");
1525  }
1526
1527  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result)
1528    : LIR_Op(code, result, NULL)
1529    , _opr1(opr1)
1530    , _opr2(opr2)
1531    , _type(T_ILLEGAL)
1532    , _condition(condition)
1533    , _fpu_stack_size(0)
1534    , _tmp(LIR_OprFact::illegalOpr) {
1535    assert(code == lir_cmove, "code check");
1536  }
1537
1538  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1539          CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1540    : LIR_Op(code, result, info)
1541    , _opr1(opr1)
1542    , _opr2(opr2)
1543    , _type(type)
1544    , _condition(lir_cond_unknown)
1545    , _fpu_stack_size(0)
1546    , _tmp(LIR_OprFact::illegalOpr) {
1547    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1548  }
1549
1550  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp)
1551    : LIR_Op(code, result, NULL)
1552    , _opr1(opr1)
1553    , _opr2(opr2)
1554    , _type(T_ILLEGAL)
1555    , _condition(lir_cond_unknown)
1556    , _fpu_stack_size(0)
1557    , _tmp(tmp) {
1558    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1559  }
1560
1561  LIR_Opr in_opr1() const                        { return _opr1; }
1562  LIR_Opr in_opr2() const                        { return _opr2; }
1563  BasicType type()  const                        { return _type; }
1564  LIR_Opr tmp_opr() const                        { return _tmp; }
1565  LIR_Condition condition() const  {
1566    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
1567  }
1568  void set_condition(LIR_Condition condition) {
1569    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1570  }
1571
1572  void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1573  int  fpu_stack_size() const                    { return _fpu_stack_size; }
1574
1575  void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1576  void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1577
1578  virtual void emit_code(LIR_Assembler* masm);
1579  virtual LIR_Op2* as_Op2() { return this; }
1580  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1581};
1582
1583class LIR_OpAllocArray : public LIR_Op {
1584 friend class LIR_OpVisitState;
1585
1586 private:
1587  LIR_Opr   _klass;
1588  LIR_Opr   _len;
1589  LIR_Opr   _tmp1;
1590  LIR_Opr   _tmp2;
1591  LIR_Opr   _tmp3;
1592  LIR_Opr   _tmp4;
1593  BasicType _type;
1594  CodeStub* _stub;
1595
1596 public:
1597  LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1598    : LIR_Op(lir_alloc_array, result, NULL)
1599    , _klass(klass)
1600    , _len(len)
1601    , _tmp1(t1)
1602    , _tmp2(t2)
1603    , _tmp3(t3)
1604    , _tmp4(t4)
1605    , _type(type)
1606    , _stub(stub) {}
1607
1608  LIR_Opr   klass()   const                      { return _klass;       }
1609  LIR_Opr   len()     const                      { return _len;         }
1610  LIR_Opr   obj()     const                      { return result_opr(); }
1611  LIR_Opr   tmp1()    const                      { return _tmp1;        }
1612  LIR_Opr   tmp2()    const                      { return _tmp2;        }
1613  LIR_Opr   tmp3()    const                      { return _tmp3;        }
1614  LIR_Opr   tmp4()    const                      { return _tmp4;        }
1615  BasicType type()    const                      { return _type;        }
1616  CodeStub* stub()    const                      { return _stub;        }
1617
1618  virtual void emit_code(LIR_Assembler* masm);
1619  virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1620  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1621};
1622
1623
1624class LIR_Op3: public LIR_Op {
1625 friend class LIR_OpVisitState;
1626
1627 private:
1628  LIR_Opr _opr1;
1629  LIR_Opr _opr2;
1630  LIR_Opr _opr3;
1631 public:
1632  LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1633    : LIR_Op(code, result, info)
1634    , _opr1(opr1)
1635    , _opr2(opr2)
1636    , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1637  LIR_Opr in_opr1() const                        { return _opr1; }
1638  LIR_Opr in_opr2() const                        { return _opr2; }
1639  LIR_Opr in_opr3() const                        { return _opr3; }
1640
1641  virtual void emit_code(LIR_Assembler* masm);
1642  virtual LIR_Op3* as_Op3() { return this; }
1643  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1644};
1645
1646
1647//--------------------------------
1648class LabelObj: public CompilationResourceObj {
1649 private:
1650  Label _label;
1651 public:
1652  LabelObj()                                     {}
1653  Label* label()                                 { return &_label; }
1654};
1655
1656
1657class LIR_OpLock: public LIR_Op {
1658 friend class LIR_OpVisitState;
1659
1660 private:
1661  LIR_Opr _hdr;
1662  LIR_Opr _obj;
1663  LIR_Opr _lock;
1664  LIR_Opr _scratch;
1665  CodeStub* _stub;
1666 public:
1667  LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1668    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1669    , _hdr(hdr)
1670    , _obj(obj)
1671    , _lock(lock)
1672    , _scratch(scratch)
1673    , _stub(stub)                      {}
1674
1675  LIR_Opr hdr_opr() const                        { return _hdr; }
1676  LIR_Opr obj_opr() const                        { return _obj; }
1677  LIR_Opr lock_opr() const                       { return _lock; }
1678  LIR_Opr scratch_opr() const                    { return _scratch; }
1679  CodeStub* stub() const                         { return _stub; }
1680
1681  virtual void emit_code(LIR_Assembler* masm);
1682  virtual LIR_OpLock* as_OpLock() { return this; }
1683  void print_instr(outputStream* out) const PRODUCT_RETURN;
1684};
1685
1686
1687class LIR_OpDelay: public LIR_Op {
1688 friend class LIR_OpVisitState;
1689
1690 private:
1691  LIR_Op* _op;
1692
1693 public:
1694  LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1695    LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1696    _op(op) {
1697    assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1698  }
1699  virtual void emit_code(LIR_Assembler* masm);
1700  virtual LIR_OpDelay* as_OpDelay() { return this; }
1701  void print_instr(outputStream* out) const PRODUCT_RETURN;
1702  LIR_Op* delay_op() const { return _op; }
1703  CodeEmitInfo* call_info() const { return info(); }
1704};
1705
1706
1707// LIR_OpCompareAndSwap
1708class LIR_OpCompareAndSwap : public LIR_Op {
1709 friend class LIR_OpVisitState;
1710
1711 private:
1712  LIR_Opr _addr;
1713  LIR_Opr _cmp_value;
1714  LIR_Opr _new_value;
1715  LIR_Opr _tmp1;
1716  LIR_Opr _tmp2;
1717
1718 public:
1719  LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1720                       LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1721    : LIR_Op(code, result, NULL)  // no result, no info
1722    , _addr(addr)
1723    , _cmp_value(cmp_value)
1724    , _new_value(new_value)
1725    , _tmp1(t1)
1726    , _tmp2(t2)                                  { }
1727
1728  LIR_Opr addr()        const                    { return _addr;  }
1729  LIR_Opr cmp_value()   const                    { return _cmp_value; }
1730  LIR_Opr new_value()   const                    { return _new_value; }
1731  LIR_Opr tmp1()        const                    { return _tmp1;      }
1732  LIR_Opr tmp2()        const                    { return _tmp2;      }
1733
1734  virtual void emit_code(LIR_Assembler* masm);
1735  virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1736  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1737};
1738
1739// LIR_OpProfileCall
1740class LIR_OpProfileCall : public LIR_Op {
1741 friend class LIR_OpVisitState;
1742
1743 private:
1744  ciMethod* _profiled_method;
1745  int _profiled_bci;
1746  LIR_Opr _mdo;
1747  LIR_Opr _recv;
1748  LIR_Opr _tmp1;
1749  ciKlass* _known_holder;
1750
1751 public:
1752  // Destroys recv
1753  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1754    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1755    , _profiled_method(profiled_method)
1756    , _profiled_bci(profiled_bci)
1757    , _mdo(mdo)
1758    , _recv(recv)
1759    , _tmp1(t1)
1760    , _known_holder(known_holder)                { }
1761
1762  ciMethod* profiled_method() const              { return _profiled_method;  }
1763  int       profiled_bci()    const              { return _profiled_bci;     }
1764  LIR_Opr   mdo()             const              { return _mdo;              }
1765  LIR_Opr   recv()            const              { return _recv;             }
1766  LIR_Opr   tmp1()            const              { return _tmp1;             }
1767  ciKlass*  known_holder()    const              { return _known_holder;     }
1768
1769  virtual void emit_code(LIR_Assembler* masm);
1770  virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1771  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1772};
1773
1774
1775class LIR_InsertionBuffer;
1776
1777//--------------------------------LIR_List---------------------------------------------------
1778// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1779// The LIR instructions are appended by the LIR_List class itself;
1780//
1781// Notes:
1782// - all offsets are(should be) in bytes
1783// - local positions are specified with an offset, with offset 0 being local 0
1784
1785class LIR_List: public CompilationResourceObj {
1786 private:
1787  LIR_OpList  _operations;
1788
1789  Compilation*  _compilation;
1790#ifndef PRODUCT
1791  BlockBegin*   _block;
1792#endif
1793#ifdef ASSERT
1794  const char *  _file;
1795  int           _line;
1796#endif
1797
1798  void append(LIR_Op* op) {
1799    if (op->source() == NULL)
1800      op->set_source(_compilation->current_instruction());
1801#ifndef PRODUCT
1802    if (PrintIRWithLIR) {
1803      _compilation->maybe_print_current_instruction();
1804      op->print(); tty->cr();
1805    }
1806#endif // PRODUCT
1807
1808    _operations.append(op);
1809
1810#ifdef ASSERT
1811    op->verify();
1812    op->set_file_and_line(_file, _line);
1813    _file = NULL;
1814    _line = 0;
1815#endif
1816  }
1817
1818 public:
1819  LIR_List(Compilation* compilation, BlockBegin* block = NULL);
1820
1821#ifdef ASSERT
1822  void set_file_and_line(const char * file, int line);
1823#endif
1824
1825  //---------- accessors ---------------
1826  LIR_OpList* instructions_list()                { return &_operations; }
1827  int         length() const                     { return _operations.length(); }
1828  LIR_Op*     at(int i) const                    { return _operations.at(i); }
1829
1830  NOT_PRODUCT(BlockBegin* block() const          { return _block; });
1831
1832  // insert LIR_Ops in buffer to right places in LIR_List
1833  void append(LIR_InsertionBuffer* buffer);
1834
1835  //---------- mutators ---------------
1836  void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
1837  void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
1838
1839  //---------- printing -------------
1840  void print_instructions() PRODUCT_RETURN;
1841
1842
1843  //---------- instructions -------------
1844  void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1845                        address dest, LIR_OprList* arguments,
1846                        CodeEmitInfo* info) {
1847    append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
1848  }
1849  void call_static(ciMethod* method, LIR_Opr result,
1850                   address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1851    append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
1852  }
1853  void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1854                      address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1855    append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
1856  }
1857  void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1858                    intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
1859    append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
1860  }
1861  void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1862                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1863    append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
1864  }
1865
1866  void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
1867  void word_align()                              { append(new LIR_Op0(lir_word_align)); }
1868  void membar()                                  { append(new LIR_Op0(lir_membar)); }
1869  void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
1870  void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
1871
1872  void nop()                                     { append(new LIR_Op0(lir_nop)); }
1873  void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
1874
1875  void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
1876  void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
1877
1878  void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
1879
1880  void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
1881  void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
1882
1883  // result is a stack location for old backend and vreg for UseLinearScan
1884  // stack_loc_temp is an illegal register for old backend
1885  void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
1886  void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1887  void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1888  void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1889  void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1890  void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1891  void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1892
1893  void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
1894
1895  void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
1896  void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
1897
1898  void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
1899
1900  void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
1901
1902#ifdef PPC
1903  void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
1904#endif
1905  void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
1906
1907  void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
1908  void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
1909  void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
1910
1911  void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
1912  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1913    append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
1914  }
1915  void unwind_exception(LIR_Opr exceptionOop) {
1916    append(new LIR_Op1(lir_unwind, exceptionOop));
1917  }
1918
1919  void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1920    append(new LIR_Op2(lir_compare_to,  left, right, dst));
1921  }
1922
1923  void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
1924  void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
1925
1926  void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
1927    append(new LIR_Op2(lir_cmp, condition, left, right, info));
1928  }
1929  void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
1930    cmp(condition, left, LIR_OprFact::intConst(right), info);
1931  }
1932
1933  void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
1934  void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
1935
1936  void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst) {
1937    append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
1938  }
1939
1940  void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1941                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
1942  void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1943               LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
1944  void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1945               LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
1946
1947  void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
1948  void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
1949  void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, LIR_OprFact::illegalOpr, to, tmp)); }
1950  void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
1951  void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
1952  void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
1953  void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
1954
1955  void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
1956  void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
1957  void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
1958  void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
1959  void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
1960  void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
1961  void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
1962
1963  void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1964  void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1965
1966  void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
1967
1968  void prefetch(LIR_Address* addr, bool is_store);
1969
1970  void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1971  void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1972  void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
1973  void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
1974  void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
1975
1976  void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1977  void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1978  void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1979  void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
1980
1981  void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
1982  void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
1983
1984  // jump is an unconditional branch
1985  void jump(BlockBegin* block) {
1986    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
1987  }
1988  void jump(CodeStub* stub) {
1989    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
1990  }
1991  void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
1992  void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
1993    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
1994    append(new LIR_OpBranch(cond, type, block));
1995  }
1996  void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
1997    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
1998    append(new LIR_OpBranch(cond, type, stub));
1999  }
2000  void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
2001    assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
2002    append(new LIR_OpBranch(cond, type, block, unordered));
2003  }
2004
2005  void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2006  void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2007  void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2008
2009  void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2010  void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2011  void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2012
2013  void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2014  void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2015
2016  void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2017    append(new LIR_OpRTCall(routine, tmp, result, arguments));
2018  }
2019
2020  void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2021                    LIR_OprList* arguments, CodeEmitInfo* info) {
2022    append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2023  }
2024
2025  void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2026  void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2027  void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2028
2029  void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
2030  void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
2031  void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2032
2033  void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2034
2035  void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
2036
2037  void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2038                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2039                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2040                  ciMethod* profiled_method, int profiled_bci);
2041  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
2042  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
2043
2044  // methodDataOop profiling
2045  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
2046};
2047
2048void print_LIR(BlockList* blocks);
2049
2050class LIR_InsertionBuffer : public CompilationResourceObj {
2051 private:
2052  LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2053
2054  // list of insertion points. index and count are stored alternately:
2055  // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2056  // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2057  intStack    _index_and_count;
2058
2059  // the LIR_Ops to be inserted
2060  LIR_OpList  _ops;
2061
2062  void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2063  void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2064  void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2065
2066#ifdef ASSERT
2067  void verify();
2068#endif
2069 public:
2070  LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2071
2072  // must be called before using the insertion buffer
2073  void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2074  bool initialized() const  { return _lir != NULL; }
2075  // called automatically when the buffer is appended to the LIR_List
2076  void finish()             { _lir = NULL; }
2077
2078  // accessors
2079  LIR_List*  lir_list() const             { return _lir; }
2080  int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2081  int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2082  int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2083
2084  int number_of_ops() const               { return _ops.length(); }
2085  LIR_Op* op_at(int i) const              { return _ops.at(i); }
2086
2087  // append an instruction to the buffer
2088  void append(int index, LIR_Op* op);
2089
2090  // instruction
2091  void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2092};
2093
2094
2095//
2096// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2097// Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2098// information about the input, output and temporaries used by the
2099// op to be recorded.  It also records whether the op has call semantics
2100// and also records all the CodeEmitInfos used by this op.
2101//
2102
2103
2104class LIR_OpVisitState: public StackObj {
2105 public:
2106  typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2107
2108  enum {
2109    maxNumberOfOperands = 16,
2110    maxNumberOfInfos = 4
2111  };
2112
2113 private:
2114  LIR_Op*          _op;
2115
2116  // optimization: the operands and infos are not stored in a variable-length
2117  //               list, but in a fixed-size array to save time of size checks and resizing
2118  int              _oprs_len[numModes];
2119  LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2120  int _info_len;
2121  CodeEmitInfo*    _info_new[maxNumberOfInfos];
2122
2123  bool             _has_call;
2124  bool             _has_slow_case;
2125
2126
2127  // only include register operands
2128  // addresses are decomposed to the base and index registers
2129  // constants and stack operands are ignored
2130  void append(LIR_Opr& opr, OprMode mode) {
2131    assert(opr->is_valid(), "should not call this otherwise");
2132    assert(mode >= 0 && mode < numModes, "bad mode");
2133
2134    if (opr->is_register()) {
2135       assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2136      _oprs_new[mode][_oprs_len[mode]++] = &opr;
2137
2138    } else if (opr->is_pointer()) {
2139      LIR_Address* address = opr->as_address_ptr();
2140      if (address != NULL) {
2141        // special handling for addresses: add base and index register of the address
2142        // both are always input operands!
2143        if (address->_base->is_valid()) {
2144          assert(address->_base->is_register(), "must be");
2145          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2146          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
2147        }
2148        if (address->_index->is_valid()) {
2149          assert(address->_index->is_register(), "must be");
2150          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2151          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
2152        }
2153
2154      } else {
2155        assert(opr->is_constant(), "constant operands are not processed");
2156      }
2157    } else {
2158      assert(opr->is_stack(), "stack operands are not processed");
2159    }
2160  }
2161
2162  void append(CodeEmitInfo* info) {
2163    assert(info != NULL, "should not call this otherwise");
2164    assert(_info_len < maxNumberOfInfos, "array overflow");
2165    _info_new[_info_len++] = info;
2166  }
2167
2168 public:
2169  LIR_OpVisitState()         { reset(); }
2170
2171  LIR_Op* op() const         { return _op; }
2172  void set_op(LIR_Op* op)    { reset(); _op = op; }
2173
2174  bool has_call() const      { return _has_call; }
2175  bool has_slow_case() const { return _has_slow_case; }
2176
2177  void reset() {
2178    _op = NULL;
2179    _has_call = false;
2180    _has_slow_case = false;
2181
2182    _oprs_len[inputMode] = 0;
2183    _oprs_len[tempMode] = 0;
2184    _oprs_len[outputMode] = 0;
2185    _info_len = 0;
2186  }
2187
2188
2189  int opr_count(OprMode mode) const {
2190    assert(mode >= 0 && mode < numModes, "bad mode");
2191    return _oprs_len[mode];
2192  }
2193
2194  LIR_Opr opr_at(OprMode mode, int index) const {
2195    assert(mode >= 0 && mode < numModes, "bad mode");
2196    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2197    return *_oprs_new[mode][index];
2198  }
2199
2200  void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2201    assert(mode >= 0 && mode < numModes, "bad mode");
2202    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2203    *_oprs_new[mode][index] = opr;
2204  }
2205
2206  int info_count() const {
2207    return _info_len;
2208  }
2209
2210  CodeEmitInfo* info_at(int index) const {
2211    assert(index < _info_len, "index out of bounds");
2212    return _info_new[index];
2213  }
2214
2215  XHandlers* all_xhandler();
2216
2217  // collects all register operands of the instruction
2218  void visit(LIR_Op* op);
2219
2220#if ASSERT
2221  // check that an operation has no operands
2222  bool no_operands(LIR_Op* op);
2223#endif
2224
2225  // LIR_Op visitor functions use these to fill in the state
2226  void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2227  void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2228  void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2229  void do_info(CodeEmitInfo* info)        { append(info); }
2230
2231  void do_stub(CodeStub* stub);
2232  void do_call()                          { _has_call = true; }
2233  void do_slow_case()                     { _has_slow_case = true; }
2234  void do_slow_case(CodeEmitInfo* info) {
2235    _has_slow_case = true;
2236    append(info);
2237  }
2238};
2239
2240
2241inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2242