c1_LIR.hpp revision 3602:da91efe96a93
1/*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_C1_C1_LIR_HPP
26#define SHARE_VM_C1_C1_LIR_HPP
27
28#include "c1/c1_ValueType.hpp"
29#include "oops/method.hpp"
30
31class BlockBegin;
32class BlockList;
33class LIR_Assembler;
34class CodeEmitInfo;
35class CodeStub;
36class CodeStubList;
37class ArrayCopyStub;
38class LIR_Op;
39class ciType;
40class ValueType;
41class LIR_OpVisitState;
42class FpuStackSim;
43
44//---------------------------------------------------------------------
45//                 LIR Operands
46//  LIR_OprDesc
47//    LIR_OprPtr
48//      LIR_Const
49//      LIR_Address
50//---------------------------------------------------------------------
51class LIR_OprDesc;
52class LIR_OprPtr;
53class LIR_Const;
54class LIR_Address;
55class LIR_OprVisitor;
56
57
58typedef LIR_OprDesc* LIR_Opr;
59typedef int          RegNr;
60
61define_array(LIR_OprArray, LIR_Opr)
62define_stack(LIR_OprList, LIR_OprArray)
63
64define_array(LIR_OprRefArray, LIR_Opr*)
65define_stack(LIR_OprRefList, LIR_OprRefArray)
66
67define_array(CodeEmitInfoArray, CodeEmitInfo*)
68define_stack(CodeEmitInfoList, CodeEmitInfoArray)
69
70define_array(LIR_OpArray, LIR_Op*)
71define_stack(LIR_OpList, LIR_OpArray)
72
73// define LIR_OprPtr early so LIR_OprDesc can refer to it
74class LIR_OprPtr: public CompilationResourceObj {
75 public:
76  bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
77  bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
78
79  virtual LIR_Const*  as_constant()              { return NULL; }
80  virtual LIR_Address* as_address()              { return NULL; }
81  virtual BasicType type() const                 = 0;
82  virtual void print_value_on(outputStream* out) const = 0;
83};
84
85
86
87// LIR constants
88class LIR_Const: public LIR_OprPtr {
89 private:
90  JavaValue _value;
91
92  void type_check(BasicType t) const   { assert(type() == t, "type check"); }
93  void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
94  void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
95
96 public:
97  LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
98  LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
99  LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
100  LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
101  LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
102  LIR_Const(void* p) {
103#ifdef _LP64
104    assert(sizeof(jlong) >= sizeof(p), "too small");;
105    _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
106#else
107    assert(sizeof(jint) >= sizeof(p), "too small");;
108    _value.set_type(T_INT);     _value.set_jint((jint)p);
109#endif
110  }
111  LIR_Const(Metadata* m) {
112    _value.set_type(T_METADATA);
113#ifdef _LP64
114    _value.set_jlong((jlong)m);
115#else
116    _value.set_jint((jint)m);
117#endif // _LP64
118  }
119
120  virtual BasicType type()       const { return _value.get_type(); }
121  virtual LIR_Const* as_constant()     { return this; }
122
123  jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
124  jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
125  jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
126  jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
127  jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
128  jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
129  jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
130
131#ifdef _LP64
132  address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
133  Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
134#else
135  address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
136  Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
137#endif
138
139
140  jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
141  jint      as_jint_lo_bits() const    {
142    if (type() == T_DOUBLE) {
143      return low(jlong_cast(_value.get_jdouble()));
144    } else {
145      return as_jint_lo();
146    }
147  }
148  jint      as_jint_hi_bits() const    {
149    if (type() == T_DOUBLE) {
150      return high(jlong_cast(_value.get_jdouble()));
151    } else {
152      return as_jint_hi();
153    }
154  }
155  jlong      as_jlong_bits() const    {
156    if (type() == T_DOUBLE) {
157      return jlong_cast(_value.get_jdouble());
158    } else {
159      return as_jlong();
160    }
161  }
162
163  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
164
165
166  bool is_zero_float() {
167    jfloat f = as_jfloat();
168    jfloat ok = 0.0f;
169    return jint_cast(f) == jint_cast(ok);
170  }
171
172  bool is_one_float() {
173    jfloat f = as_jfloat();
174    return !g_isnan(f) && g_isfinite(f) && f == 1.0;
175  }
176
177  bool is_zero_double() {
178    jdouble d = as_jdouble();
179    jdouble ok = 0.0;
180    return jlong_cast(d) == jlong_cast(ok);
181  }
182
183  bool is_one_double() {
184    jdouble d = as_jdouble();
185    return !g_isnan(d) && g_isfinite(d) && d == 1.0;
186  }
187};
188
189
190//---------------------LIR Operand descriptor------------------------------------
191//
192// The class LIR_OprDesc represents a LIR instruction operand;
193// it can be a register (ALU/FPU), stack location or a constant;
194// Constants and addresses are represented as resource area allocated
195// structures (see above).
196// Registers and stack locations are inlined into the this pointer
197// (see value function).
198
199class LIR_OprDesc: public CompilationResourceObj {
200 public:
201  // value structure:
202  //     data       opr-type opr-kind
203  // +--------------+-------+-------+
204  // [max...........|7 6 5 4|3 2 1 0]
205  //                             ^
206  //                    is_pointer bit
207  //
208  // lowest bit cleared, means it is a structure pointer
209  // we need  4 bits to represent types
210
211 private:
212  friend class LIR_OprFact;
213
214  // Conversion
215  intptr_t value() const                         { return (intptr_t) this; }
216
217  bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
218    return (value() & mask) == masked_value;
219  }
220
221  enum OprKind {
222      pointer_value      = 0
223    , stack_value        = 1
224    , cpu_register       = 3
225    , fpu_register       = 5
226    , illegal_value      = 7
227  };
228
229  enum OprBits {
230      pointer_bits   = 1
231    , kind_bits      = 3
232    , type_bits      = 4
233    , size_bits      = 2
234    , destroys_bits  = 1
235    , virtual_bits   = 1
236    , is_xmm_bits    = 1
237    , last_use_bits  = 1
238    , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
239    , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
240                       is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
241    , data_bits      = BitsPerInt - non_data_bits
242    , reg_bits       = data_bits / 2      // for two registers in one value encoding
243  };
244
245  enum OprShift {
246      kind_shift     = 0
247    , type_shift     = kind_shift     + kind_bits
248    , size_shift     = type_shift     + type_bits
249    , destroys_shift = size_shift     + size_bits
250    , last_use_shift = destroys_shift + destroys_bits
251    , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
252    , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
253    , is_xmm_shift   = virtual_shift + virtual_bits
254    , data_shift     = is_xmm_shift + is_xmm_bits
255    , reg1_shift = data_shift
256    , reg2_shift = data_shift + reg_bits
257
258  };
259
260  enum OprSize {
261      single_size = 0 << size_shift
262    , double_size = 1 << size_shift
263  };
264
265  enum OprMask {
266      kind_mask      = right_n_bits(kind_bits)
267    , type_mask      = right_n_bits(type_bits) << type_shift
268    , size_mask      = right_n_bits(size_bits) << size_shift
269    , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
270    , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
271    , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
272    , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
273    , pointer_mask   = right_n_bits(pointer_bits)
274    , lower_reg_mask = right_n_bits(reg_bits)
275    , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
276  };
277
278  uintptr_t data() const                         { return value() >> data_shift; }
279  int lo_reg_half() const                        { return data() & lower_reg_mask; }
280  int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
281  OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
282  OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
283
284  static char type_char(BasicType t);
285
286 public:
287  enum {
288    vreg_base = ConcreteRegisterImpl::number_of_registers,
289    vreg_max = (1 << data_bits) - 1
290  };
291
292  static inline LIR_Opr illegalOpr();
293
294  enum OprType {
295      unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
296    , int_type      = 1 << type_shift
297    , long_type     = 2 << type_shift
298    , object_type   = 3 << type_shift
299    , address_type  = 4 << type_shift
300    , float_type    = 5 << type_shift
301    , double_type   = 6 << type_shift
302  };
303  friend OprType as_OprType(BasicType t);
304  friend BasicType as_BasicType(OprType t);
305
306  OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
307  OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
308
309  static OprSize size_for(BasicType t) {
310    switch (t) {
311      case T_LONG:
312      case T_DOUBLE:
313        return double_size;
314        break;
315
316      case T_FLOAT:
317      case T_BOOLEAN:
318      case T_CHAR:
319      case T_BYTE:
320      case T_SHORT:
321      case T_INT:
322      case T_ADDRESS:
323      case T_OBJECT:
324      case T_ARRAY:
325        return single_size;
326        break;
327
328      default:
329        ShouldNotReachHere();
330        return single_size;
331      }
332  }
333
334
335  void validate_type() const PRODUCT_RETURN;
336
337  BasicType type() const {
338    if (is_pointer()) {
339      return pointer()->type();
340    }
341    return as_BasicType(type_field());
342  }
343
344
345  ValueType* value_type() const                  { return as_ValueType(type()); }
346
347  char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
348
349  bool is_equal(LIR_Opr opr) const         { return this == opr; }
350  // checks whether types are same
351  bool is_same_type(LIR_Opr opr) const     {
352    assert(type_field() != unknown_type &&
353           opr->type_field() != unknown_type, "shouldn't see unknown_type");
354    return type_field() == opr->type_field();
355  }
356  bool is_same_register(LIR_Opr opr) {
357    return (is_register() && opr->is_register() &&
358            kind_field() == opr->kind_field() &&
359            (value() & no_type_mask) == (opr->value() & no_type_mask));
360  }
361
362  bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
363  bool is_illegal() const      { return kind_field() == illegal_value; }
364  bool is_valid() const        { return kind_field() != illegal_value; }
365
366  bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
367  bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
368
369  bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
370  bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
371
372  bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
373  bool is_oop() const;
374
375  // semantic for fpu- and xmm-registers:
376  // * is_float and is_double return true for xmm_registers
377  //   (so is_single_fpu and is_single_xmm are true)
378  // * So you must always check for is_???_xmm prior to is_???_fpu to
379  //   distinguish between fpu- and xmm-registers
380
381  bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
382  bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
383  bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
384
385  bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
386  bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
387  bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
388  bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
389  bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
390
391  bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
392  bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
393  bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
394  bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
395  bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
396
397  bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
398  bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
399  bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
400
401  // fast accessor functions for special bits that do not work for pointers
402  // (in this functions, the check for is_pointer() is omitted)
403  bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
404  bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
405  bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
406  bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
407  BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
408
409  bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
410  bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
411  LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
412  LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
413
414
415  int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
416  int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
417  RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
418  RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
419  RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
420  RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
421  RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
422  RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
423  RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
424  RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
425  RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
426  int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
427
428  LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
429  LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
430  LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
431
432  Register as_register()    const;
433  Register as_register_lo() const;
434  Register as_register_hi() const;
435
436  Register as_pointer_register() {
437#ifdef _LP64
438    if (is_double_cpu()) {
439      assert(as_register_lo() == as_register_hi(), "should be a single register");
440      return as_register_lo();
441    }
442#endif
443    return as_register();
444  }
445
446#ifdef X86
447  XMMRegister as_xmm_float_reg() const;
448  XMMRegister as_xmm_double_reg() const;
449  // for compatibility with RInfo
450  int fpu () const                                  { return lo_reg_half(); }
451#endif // X86
452#if defined(SPARC) || defined(ARM) || defined(PPC)
453  FloatRegister as_float_reg   () const;
454  FloatRegister as_double_reg  () const;
455#endif
456
457  jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
458  jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
459  jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
460  jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
461  jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
462
463  void print() const PRODUCT_RETURN;
464  void print(outputStream* out) const PRODUCT_RETURN;
465};
466
467
468inline LIR_OprDesc::OprType as_OprType(BasicType type) {
469  switch (type) {
470  case T_INT:      return LIR_OprDesc::int_type;
471  case T_LONG:     return LIR_OprDesc::long_type;
472  case T_FLOAT:    return LIR_OprDesc::float_type;
473  case T_DOUBLE:   return LIR_OprDesc::double_type;
474  case T_OBJECT:
475  case T_ARRAY:    return LIR_OprDesc::object_type;
476  case T_ADDRESS:  return LIR_OprDesc::address_type;
477  case T_ILLEGAL:  // fall through
478  default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
479  }
480}
481
482inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
483  switch (t) {
484  case LIR_OprDesc::int_type:     return T_INT;
485  case LIR_OprDesc::long_type:    return T_LONG;
486  case LIR_OprDesc::float_type:   return T_FLOAT;
487  case LIR_OprDesc::double_type:  return T_DOUBLE;
488  case LIR_OprDesc::object_type:  return T_OBJECT;
489  case LIR_OprDesc::address_type: return T_ADDRESS;
490  case LIR_OprDesc::unknown_type: // fall through
491  default: ShouldNotReachHere();  return T_ILLEGAL;
492  }
493}
494
495
496// LIR_Address
497class LIR_Address: public LIR_OprPtr {
498 friend class LIR_OpVisitState;
499
500 public:
501  // NOTE: currently these must be the log2 of the scale factor (and
502  // must also be equivalent to the ScaleFactor enum in
503  // assembler_i486.hpp)
504  enum Scale {
505    times_1  =  0,
506    times_2  =  1,
507    times_4  =  2,
508    times_8  =  3
509  };
510
511 private:
512  LIR_Opr   _base;
513  LIR_Opr   _index;
514  Scale     _scale;
515  intx      _disp;
516  BasicType _type;
517
518 public:
519  LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
520       _base(base)
521     , _index(index)
522     , _scale(times_1)
523     , _type(type)
524     , _disp(0) { verify(); }
525
526  LIR_Address(LIR_Opr base, intx disp, BasicType type):
527       _base(base)
528     , _index(LIR_OprDesc::illegalOpr())
529     , _scale(times_1)
530     , _type(type)
531     , _disp(disp) { verify(); }
532
533  LIR_Address(LIR_Opr base, BasicType type):
534       _base(base)
535     , _index(LIR_OprDesc::illegalOpr())
536     , _scale(times_1)
537     , _type(type)
538     , _disp(0) { verify(); }
539
540#if defined(X86) || defined(ARM)
541  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
542       _base(base)
543     , _index(index)
544     , _scale(scale)
545     , _type(type)
546     , _disp(disp) { verify(); }
547#endif // X86 || ARM
548
549  LIR_Opr base()  const                          { return _base;  }
550  LIR_Opr index() const                          { return _index; }
551  Scale   scale() const                          { return _scale; }
552  intx    disp()  const                          { return _disp;  }
553
554  bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
555
556  virtual LIR_Address* as_address()              { return this;   }
557  virtual BasicType type() const                 { return _type; }
558  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
559
560  void verify() const PRODUCT_RETURN;
561
562  static Scale scale(BasicType type);
563};
564
565
566// operand factory
567class LIR_OprFact: public AllStatic {
568 public:
569
570  static LIR_Opr illegalOpr;
571
572  static LIR_Opr single_cpu(int reg) {
573    return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
574                               LIR_OprDesc::int_type             |
575                               LIR_OprDesc::cpu_register         |
576                               LIR_OprDesc::single_size);
577  }
578  static LIR_Opr single_cpu_oop(int reg) {
579    return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
580                               LIR_OprDesc::object_type          |
581                               LIR_OprDesc::cpu_register         |
582                               LIR_OprDesc::single_size);
583  }
584  static LIR_Opr single_cpu_address(int reg) {
585    return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
586                               LIR_OprDesc::address_type         |
587                               LIR_OprDesc::cpu_register         |
588                               LIR_OprDesc::single_size);
589  }
590  static LIR_Opr double_cpu(int reg1, int reg2) {
591    LP64_ONLY(assert(reg1 == reg2, "must be identical"));
592    return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
593                               (reg2 << LIR_OprDesc::reg2_shift) |
594                               LIR_OprDesc::long_type            |
595                               LIR_OprDesc::cpu_register         |
596                               LIR_OprDesc::double_size);
597  }
598
599  static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
600                                                                             LIR_OprDesc::float_type           |
601                                                                             LIR_OprDesc::fpu_register         |
602                                                                             LIR_OprDesc::single_size); }
603#if defined(ARM)
604  static LIR_Opr double_fpu(int reg1, int reg2)    { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
605  static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
606  static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
607#endif
608#ifdef SPARC
609  static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
610                                                                             (reg2 << LIR_OprDesc::reg2_shift) |
611                                                                             LIR_OprDesc::double_type          |
612                                                                             LIR_OprDesc::fpu_register         |
613                                                                             LIR_OprDesc::double_size); }
614#endif
615#ifdef X86
616  static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
617                                                                             (reg  << LIR_OprDesc::reg2_shift) |
618                                                                             LIR_OprDesc::double_type          |
619                                                                             LIR_OprDesc::fpu_register         |
620                                                                             LIR_OprDesc::double_size); }
621
622  static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
623                                                                             LIR_OprDesc::float_type           |
624                                                                             LIR_OprDesc::fpu_register         |
625                                                                             LIR_OprDesc::single_size          |
626                                                                             LIR_OprDesc::is_xmm_mask); }
627  static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
628                                                                             (reg  << LIR_OprDesc::reg2_shift) |
629                                                                             LIR_OprDesc::double_type          |
630                                                                             LIR_OprDesc::fpu_register         |
631                                                                             LIR_OprDesc::double_size          |
632                                                                             LIR_OprDesc::is_xmm_mask); }
633#endif // X86
634#ifdef PPC
635  static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
636                                                                             (reg  << LIR_OprDesc::reg2_shift) |
637                                                                             LIR_OprDesc::double_type          |
638                                                                             LIR_OprDesc::fpu_register         |
639                                                                             LIR_OprDesc::double_size); }
640  static LIR_Opr single_softfp(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift)        |
641                                                                             LIR_OprDesc::float_type           |
642                                                                             LIR_OprDesc::cpu_register         |
643                                                                             LIR_OprDesc::single_size); }
644  static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift)        |
645                                                                             (reg1 << LIR_OprDesc::reg2_shift) |
646                                                                             LIR_OprDesc::double_type          |
647                                                                             LIR_OprDesc::cpu_register         |
648                                                                             LIR_OprDesc::double_size); }
649#endif // PPC
650
651  static LIR_Opr virtual_register(int index, BasicType type) {
652    LIR_Opr res;
653    switch (type) {
654      case T_OBJECT: // fall through
655      case T_ARRAY:
656        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
657                                            LIR_OprDesc::object_type  |
658                                            LIR_OprDesc::cpu_register |
659                                            LIR_OprDesc::single_size  |
660                                            LIR_OprDesc::virtual_mask);
661        break;
662
663      case T_INT:
664        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
665                                  LIR_OprDesc::int_type              |
666                                  LIR_OprDesc::cpu_register          |
667                                  LIR_OprDesc::single_size           |
668                                  LIR_OprDesc::virtual_mask);
669        break;
670
671      case T_ADDRESS:
672        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
673                                  LIR_OprDesc::address_type          |
674                                  LIR_OprDesc::cpu_register          |
675                                  LIR_OprDesc::single_size           |
676                                  LIR_OprDesc::virtual_mask);
677        break;
678
679      case T_LONG:
680        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
681                                  LIR_OprDesc::long_type             |
682                                  LIR_OprDesc::cpu_register          |
683                                  LIR_OprDesc::double_size           |
684                                  LIR_OprDesc::virtual_mask);
685        break;
686
687#ifdef __SOFTFP__
688      case T_FLOAT:
689        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
690                                  LIR_OprDesc::float_type  |
691                                  LIR_OprDesc::cpu_register |
692                                  LIR_OprDesc::single_size |
693                                  LIR_OprDesc::virtual_mask);
694        break;
695      case T_DOUBLE:
696        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
697                                  LIR_OprDesc::double_type |
698                                  LIR_OprDesc::cpu_register |
699                                  LIR_OprDesc::double_size |
700                                  LIR_OprDesc::virtual_mask);
701        break;
702#else // __SOFTFP__
703      case T_FLOAT:
704        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
705                                  LIR_OprDesc::float_type           |
706                                  LIR_OprDesc::fpu_register         |
707                                  LIR_OprDesc::single_size          |
708                                  LIR_OprDesc::virtual_mask);
709        break;
710
711      case
712        T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
713                                            LIR_OprDesc::double_type           |
714                                            LIR_OprDesc::fpu_register          |
715                                            LIR_OprDesc::double_size           |
716                                            LIR_OprDesc::virtual_mask);
717        break;
718#endif // __SOFTFP__
719      default:       ShouldNotReachHere(); res = illegalOpr;
720    }
721
722#ifdef ASSERT
723    res->validate_type();
724    assert(res->vreg_number() == index, "conversion check");
725    assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
726    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
727
728    // old-style calculation; check if old and new method are equal
729    LIR_OprDesc::OprType t = as_OprType(type);
730#ifdef __SOFTFP__
731    LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
732                               t |
733                               LIR_OprDesc::cpu_register |
734                               LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
735#else // __SOFTFP__
736    LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
737                                          ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
738                               LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
739    assert(res == old_res, "old and new method not equal");
740#endif // __SOFTFP__
741#endif // ASSERT
742
743    return res;
744  }
745
746  // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
747  // the index is platform independent; a double stack useing indeces 2 and 3 has always
748  // index 2.
749  static LIR_Opr stack(int index, BasicType type) {
750    LIR_Opr res;
751    switch (type) {
752      case T_OBJECT: // fall through
753      case T_ARRAY:
754        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
755                                  LIR_OprDesc::object_type           |
756                                  LIR_OprDesc::stack_value           |
757                                  LIR_OprDesc::single_size);
758        break;
759
760      case T_INT:
761        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
762                                  LIR_OprDesc::int_type              |
763                                  LIR_OprDesc::stack_value           |
764                                  LIR_OprDesc::single_size);
765        break;
766
767      case T_ADDRESS:
768        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
769                                  LIR_OprDesc::address_type          |
770                                  LIR_OprDesc::stack_value           |
771                                  LIR_OprDesc::single_size);
772        break;
773
774      case T_LONG:
775        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
776                                  LIR_OprDesc::long_type             |
777                                  LIR_OprDesc::stack_value           |
778                                  LIR_OprDesc::double_size);
779        break;
780
781      case T_FLOAT:
782        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
783                                  LIR_OprDesc::float_type            |
784                                  LIR_OprDesc::stack_value           |
785                                  LIR_OprDesc::single_size);
786        break;
787      case T_DOUBLE:
788        res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
789                                  LIR_OprDesc::double_type           |
790                                  LIR_OprDesc::stack_value           |
791                                  LIR_OprDesc::double_size);
792        break;
793
794      default:       ShouldNotReachHere(); res = illegalOpr;
795    }
796
797#ifdef ASSERT
798    assert(index >= 0, "index must be positive");
799    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
800
801    LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
802                                          LIR_OprDesc::stack_value           |
803                                          as_OprType(type)                   |
804                                          LIR_OprDesc::size_for(type));
805    assert(res == old_res, "old and new method not equal");
806#endif
807
808    return res;
809  }
810
811  static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
812  static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
813  static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
814  static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
815  static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
816  static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
817  static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
818  static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
819  static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
820  static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
821  static LIR_Opr metadataConst(Metadata* m)      { return (LIR_Opr)(new LIR_Const(m)); }
822
823  static LIR_Opr value_type(ValueType* type);
824  static LIR_Opr dummy_value_type(ValueType* type);
825};
826
827
828//-------------------------------------------------------------------------------
829//                   LIR Instructions
830//-------------------------------------------------------------------------------
831//
832// Note:
833//  - every instruction has a result operand
834//  - every instruction has an CodeEmitInfo operand (can be revisited later)
835//  - every instruction has a LIR_OpCode operand
836//  - LIR_OpN, means an instruction that has N input operands
837//
838// class hierarchy:
839//
840class  LIR_Op;
841class    LIR_Op0;
842class      LIR_OpLabel;
843class    LIR_Op1;
844class      LIR_OpBranch;
845class      LIR_OpConvert;
846class      LIR_OpAllocObj;
847class      LIR_OpRoundFP;
848class    LIR_Op2;
849class    LIR_OpDelay;
850class    LIR_Op3;
851class      LIR_OpAllocArray;
852class    LIR_OpCall;
853class      LIR_OpJavaCall;
854class      LIR_OpRTCall;
855class    LIR_OpArrayCopy;
856class    LIR_OpLock;
857class    LIR_OpTypeCheck;
858class    LIR_OpCompareAndSwap;
859class    LIR_OpProfileCall;
860
861
862// LIR operation codes
863enum LIR_Code {
864    lir_none
865  , begin_op0
866      , lir_word_align
867      , lir_label
868      , lir_nop
869      , lir_backwardbranch_target
870      , lir_std_entry
871      , lir_osr_entry
872      , lir_build_frame
873      , lir_fpop_raw
874      , lir_24bit_FPU
875      , lir_reset_FPU
876      , lir_breakpoint
877      , lir_rtcall
878      , lir_membar
879      , lir_membar_acquire
880      , lir_membar_release
881      , lir_membar_loadload
882      , lir_membar_storestore
883      , lir_membar_loadstore
884      , lir_membar_storeload
885      , lir_get_thread
886  , end_op0
887  , begin_op1
888      , lir_fxch
889      , lir_fld
890      , lir_ffree
891      , lir_push
892      , lir_pop
893      , lir_null_check
894      , lir_return
895      , lir_leal
896      , lir_neg
897      , lir_branch
898      , lir_cond_float_branch
899      , lir_move
900      , lir_prefetchr
901      , lir_prefetchw
902      , lir_convert
903      , lir_alloc_object
904      , lir_monaddr
905      , lir_roundfp
906      , lir_safepoint
907      , lir_pack64
908      , lir_unpack64
909      , lir_unwind
910  , end_op1
911  , begin_op2
912      , lir_cmp
913      , lir_cmp_l2i
914      , lir_ucmp_fd2i
915      , lir_cmp_fd2i
916      , lir_cmove
917      , lir_add
918      , lir_sub
919      , lir_mul
920      , lir_mul_strictfp
921      , lir_div
922      , lir_div_strictfp
923      , lir_rem
924      , lir_sqrt
925      , lir_abs
926      , lir_sin
927      , lir_cos
928      , lir_tan
929      , lir_log
930      , lir_log10
931      , lir_exp
932      , lir_pow
933      , lir_logic_and
934      , lir_logic_or
935      , lir_logic_xor
936      , lir_shl
937      , lir_shr
938      , lir_ushr
939      , lir_alloc_array
940      , lir_throw
941      , lir_compare_to
942  , end_op2
943  , begin_op3
944      , lir_idiv
945      , lir_irem
946  , end_op3
947  , begin_opJavaCall
948      , lir_static_call
949      , lir_optvirtual_call
950      , lir_icvirtual_call
951      , lir_virtual_call
952      , lir_dynamic_call
953  , end_opJavaCall
954  , begin_opArrayCopy
955      , lir_arraycopy
956  , end_opArrayCopy
957  , begin_opLock
958    , lir_lock
959    , lir_unlock
960  , end_opLock
961  , begin_delay_slot
962    , lir_delay_slot
963  , end_delay_slot
964  , begin_opTypeCheck
965    , lir_instanceof
966    , lir_checkcast
967    , lir_store_check
968  , end_opTypeCheck
969  , begin_opCompareAndSwap
970    , lir_cas_long
971    , lir_cas_obj
972    , lir_cas_int
973  , end_opCompareAndSwap
974  , begin_opMDOProfile
975    , lir_profile_call
976  , end_opMDOProfile
977};
978
979
980enum LIR_Condition {
981    lir_cond_equal
982  , lir_cond_notEqual
983  , lir_cond_less
984  , lir_cond_lessEqual
985  , lir_cond_greaterEqual
986  , lir_cond_greater
987  , lir_cond_belowEqual
988  , lir_cond_aboveEqual
989  , lir_cond_always
990  , lir_cond_unknown = -1
991};
992
993
994enum LIR_PatchCode {
995  lir_patch_none,
996  lir_patch_low,
997  lir_patch_high,
998  lir_patch_normal
999};
1000
1001
1002enum LIR_MoveKind {
1003  lir_move_normal,
1004  lir_move_volatile,
1005  lir_move_unaligned,
1006  lir_move_wide,
1007  lir_move_max_flag
1008};
1009
1010
1011// --------------------------------------------------
1012// LIR_Op
1013// --------------------------------------------------
1014class LIR_Op: public CompilationResourceObj {
1015 friend class LIR_OpVisitState;
1016
1017#ifdef ASSERT
1018 private:
1019  const char *  _file;
1020  int           _line;
1021#endif
1022
1023 protected:
1024  LIR_Opr       _result;
1025  unsigned short _code;
1026  unsigned short _flags;
1027  CodeEmitInfo* _info;
1028  int           _id;     // value id for register allocation
1029  int           _fpu_pop_count;
1030  Instruction*  _source; // for debugging
1031
1032  static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1033
1034 protected:
1035  static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1036
1037 public:
1038  LIR_Op()
1039    : _result(LIR_OprFact::illegalOpr)
1040    , _code(lir_none)
1041    , _flags(0)
1042    , _info(NULL)
1043#ifdef ASSERT
1044    , _file(NULL)
1045    , _line(0)
1046#endif
1047    , _fpu_pop_count(0)
1048    , _source(NULL)
1049    , _id(-1)                             {}
1050
1051  LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1052    : _result(result)
1053    , _code(code)
1054    , _flags(0)
1055    , _info(info)
1056#ifdef ASSERT
1057    , _file(NULL)
1058    , _line(0)
1059#endif
1060    , _fpu_pop_count(0)
1061    , _source(NULL)
1062    , _id(-1)                             {}
1063
1064  CodeEmitInfo* info() const                  { return _info;   }
1065  LIR_Code code()      const                  { return (LIR_Code)_code;   }
1066  LIR_Opr result_opr() const                  { return _result; }
1067  void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1068
1069#ifdef ASSERT
1070  void set_file_and_line(const char * file, int line) {
1071    _file = file;
1072    _line = line;
1073  }
1074#endif
1075
1076  virtual const char * name() const PRODUCT_RETURN0;
1077
1078  int id()             const                  { return _id;     }
1079  void set_id(int id)                         { _id = id; }
1080
1081  // FPU stack simulation helpers -- only used on Intel
1082  void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1083  int  fpu_pop_count() const                  { return _fpu_pop_count; }
1084  bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1085
1086  Instruction* source() const                 { return _source; }
1087  void set_source(Instruction* ins)           { _source = ins; }
1088
1089  virtual void emit_code(LIR_Assembler* masm) = 0;
1090  virtual void print_instr(outputStream* out) const   = 0;
1091  virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1092
1093  virtual LIR_OpCall* as_OpCall() { return NULL; }
1094  virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1095  virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1096  virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1097  virtual LIR_OpLock* as_OpLock() { return NULL; }
1098  virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1099  virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1100  virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1101  virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1102  virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1103  virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1104  virtual LIR_Op0* as_Op0() { return NULL; }
1105  virtual LIR_Op1* as_Op1() { return NULL; }
1106  virtual LIR_Op2* as_Op2() { return NULL; }
1107  virtual LIR_Op3* as_Op3() { return NULL; }
1108  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1109  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1110  virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1111  virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1112
1113  virtual void verify() const {}
1114};
1115
1116// for calls
1117class LIR_OpCall: public LIR_Op {
1118 friend class LIR_OpVisitState;
1119
1120 protected:
1121  address      _addr;
1122  LIR_OprList* _arguments;
1123 protected:
1124  LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1125             LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1126    : LIR_Op(code, result, info)
1127    , _arguments(arguments)
1128    , _addr(addr) {}
1129
1130 public:
1131  address addr() const                           { return _addr; }
1132  const LIR_OprList* arguments() const           { return _arguments; }
1133  virtual LIR_OpCall* as_OpCall()                { return this; }
1134};
1135
1136
1137// --------------------------------------------------
1138// LIR_OpJavaCall
1139// --------------------------------------------------
1140class LIR_OpJavaCall: public LIR_OpCall {
1141 friend class LIR_OpVisitState;
1142
1143 private:
1144  ciMethod* _method;
1145  LIR_Opr   _receiver;
1146  LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1147
1148 public:
1149  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1150                 LIR_Opr receiver, LIR_Opr result,
1151                 address addr, LIR_OprList* arguments,
1152                 CodeEmitInfo* info)
1153  : LIR_OpCall(code, addr, result, arguments, info)
1154  , _receiver(receiver)
1155  , _method(method)
1156  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1157  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1158
1159  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1160                 LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1161                 LIR_OprList* arguments, CodeEmitInfo* info)
1162  : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1163  , _receiver(receiver)
1164  , _method(method)
1165  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1166  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1167
1168  LIR_Opr receiver() const                       { return _receiver; }
1169  ciMethod* method() const                       { return _method;   }
1170
1171  // JSR 292 support.
1172  bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1173  bool is_method_handle_invoke() const {
1174    return
1175      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
1176      ||
1177      method()->is_compiled_lambda_form()  // Java-generated adapter
1178      ||
1179      method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
1180  }
1181
1182  intptr_t vtable_offset() const {
1183    assert(_code == lir_virtual_call, "only have vtable for real vcall");
1184    return (intptr_t) addr();
1185  }
1186
1187  virtual void emit_code(LIR_Assembler* masm);
1188  virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1189  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1190};
1191
1192// --------------------------------------------------
1193// LIR_OpLabel
1194// --------------------------------------------------
1195// Location where a branch can continue
1196class LIR_OpLabel: public LIR_Op {
1197 friend class LIR_OpVisitState;
1198
1199 private:
1200  Label* _label;
1201 public:
1202  LIR_OpLabel(Label* lbl)
1203   : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1204   , _label(lbl)                                 {}
1205  Label* label() const                           { return _label; }
1206
1207  virtual void emit_code(LIR_Assembler* masm);
1208  virtual LIR_OpLabel* as_OpLabel() { return this; }
1209  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1210};
1211
1212// LIR_OpArrayCopy
1213class LIR_OpArrayCopy: public LIR_Op {
1214 friend class LIR_OpVisitState;
1215
1216 private:
1217  ArrayCopyStub*  _stub;
1218  LIR_Opr   _src;
1219  LIR_Opr   _src_pos;
1220  LIR_Opr   _dst;
1221  LIR_Opr   _dst_pos;
1222  LIR_Opr   _length;
1223  LIR_Opr   _tmp;
1224  ciArrayKlass* _expected_type;
1225  int       _flags;
1226
1227public:
1228  enum Flags {
1229    src_null_check         = 1 << 0,
1230    dst_null_check         = 1 << 1,
1231    src_pos_positive_check = 1 << 2,
1232    dst_pos_positive_check = 1 << 3,
1233    length_positive_check  = 1 << 4,
1234    src_range_check        = 1 << 5,
1235    dst_range_check        = 1 << 6,
1236    type_check             = 1 << 7,
1237    overlapping            = 1 << 8,
1238    unaligned              = 1 << 9,
1239    src_objarray           = 1 << 10,
1240    dst_objarray           = 1 << 11,
1241    all_flags              = (1 << 12) - 1
1242  };
1243
1244  LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1245                  ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1246
1247  LIR_Opr src() const                            { return _src; }
1248  LIR_Opr src_pos() const                        { return _src_pos; }
1249  LIR_Opr dst() const                            { return _dst; }
1250  LIR_Opr dst_pos() const                        { return _dst_pos; }
1251  LIR_Opr length() const                         { return _length; }
1252  LIR_Opr tmp() const                            { return _tmp; }
1253  int flags() const                              { return _flags; }
1254  ciArrayKlass* expected_type() const            { return _expected_type; }
1255  ArrayCopyStub* stub() const                    { return _stub; }
1256
1257  virtual void emit_code(LIR_Assembler* masm);
1258  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1259  void print_instr(outputStream* out) const PRODUCT_RETURN;
1260};
1261
1262
1263// --------------------------------------------------
1264// LIR_Op0
1265// --------------------------------------------------
1266class LIR_Op0: public LIR_Op {
1267 friend class LIR_OpVisitState;
1268
1269 public:
1270  LIR_Op0(LIR_Code code)
1271   : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1272  LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1273   : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1274
1275  virtual void emit_code(LIR_Assembler* masm);
1276  virtual LIR_Op0* as_Op0() { return this; }
1277  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1278};
1279
1280
1281// --------------------------------------------------
1282// LIR_Op1
1283// --------------------------------------------------
1284
1285class LIR_Op1: public LIR_Op {
1286 friend class LIR_OpVisitState;
1287
1288 protected:
1289  LIR_Opr         _opr;   // input operand
1290  BasicType       _type;  // Operand types
1291  LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1292
1293  static void print_patch_code(outputStream* out, LIR_PatchCode code);
1294
1295  void set_kind(LIR_MoveKind kind) {
1296    assert(code() == lir_move, "must be");
1297    _flags = kind;
1298  }
1299
1300 public:
1301  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1302    : LIR_Op(code, result, info)
1303    , _opr(opr)
1304    , _patch(patch)
1305    , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1306
1307  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1308    : LIR_Op(code, result, info)
1309    , _opr(opr)
1310    , _patch(patch)
1311    , _type(type)                      {
1312    assert(code == lir_move, "must be");
1313    set_kind(kind);
1314  }
1315
1316  LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1317    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1318    , _opr(opr)
1319    , _patch(lir_patch_none)
1320    , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1321
1322  LIR_Opr in_opr()           const               { return _opr;   }
1323  LIR_PatchCode patch_code() const               { return _patch; }
1324  BasicType type()           const               { return _type;  }
1325
1326  LIR_MoveKind move_kind() const {
1327    assert(code() == lir_move, "must be");
1328    return (LIR_MoveKind)_flags;
1329  }
1330
1331  virtual void emit_code(LIR_Assembler* masm);
1332  virtual LIR_Op1* as_Op1() { return this; }
1333  virtual const char * name() const PRODUCT_RETURN0;
1334
1335  void set_in_opr(LIR_Opr opr) { _opr = opr; }
1336
1337  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1338  virtual void verify() const;
1339};
1340
1341
1342// for runtime calls
1343class LIR_OpRTCall: public LIR_OpCall {
1344 friend class LIR_OpVisitState;
1345
1346 private:
1347  LIR_Opr _tmp;
1348 public:
1349  LIR_OpRTCall(address addr, LIR_Opr tmp,
1350               LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1351    : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1352    , _tmp(tmp) {}
1353
1354  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1355  virtual void emit_code(LIR_Assembler* masm);
1356  virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1357
1358  LIR_Opr tmp() const                            { return _tmp; }
1359
1360  virtual void verify() const;
1361};
1362
1363
1364class LIR_OpBranch: public LIR_Op {
1365 friend class LIR_OpVisitState;
1366
1367 private:
1368  LIR_Condition _cond;
1369  BasicType     _type;
1370  Label*        _label;
1371  BlockBegin*   _block;  // if this is a branch to a block, this is the block
1372  BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1373  CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1374
1375 public:
1376  LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
1377    : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1378    , _cond(cond)
1379    , _type(type)
1380    , _label(lbl)
1381    , _block(NULL)
1382    , _ublock(NULL)
1383    , _stub(NULL) { }
1384
1385  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
1386  LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
1387
1388  // for unordered comparisons
1389  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
1390
1391  LIR_Condition cond()        const              { return _cond;        }
1392  BasicType     type()        const              { return _type;        }
1393  Label*        label()       const              { return _label;       }
1394  BlockBegin*   block()       const              { return _block;       }
1395  BlockBegin*   ublock()      const              { return _ublock;      }
1396  CodeStub*     stub()        const              { return _stub;       }
1397
1398  void          change_block(BlockBegin* b);
1399  void          change_ublock(BlockBegin* b);
1400  void          negate_cond();
1401
1402  virtual void emit_code(LIR_Assembler* masm);
1403  virtual LIR_OpBranch* as_OpBranch() { return this; }
1404  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1405};
1406
1407
1408class ConversionStub;
1409
1410class LIR_OpConvert: public LIR_Op1 {
1411 friend class LIR_OpVisitState;
1412
1413 private:
1414   Bytecodes::Code _bytecode;
1415   ConversionStub* _stub;
1416#ifdef PPC
1417  LIR_Opr _tmp1;
1418  LIR_Opr _tmp2;
1419#endif
1420
1421 public:
1422   LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1423     : LIR_Op1(lir_convert, opr, result)
1424     , _stub(stub)
1425#ifdef PPC
1426     , _tmp1(LIR_OprDesc::illegalOpr())
1427     , _tmp2(LIR_OprDesc::illegalOpr())
1428#endif
1429     , _bytecode(code)                           {}
1430
1431#ifdef PPC
1432   LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
1433                 ,LIR_Opr tmp1, LIR_Opr tmp2)
1434     : LIR_Op1(lir_convert, opr, result)
1435     , _stub(stub)
1436     , _tmp1(tmp1)
1437     , _tmp2(tmp2)
1438     , _bytecode(code)                           {}
1439#endif
1440
1441  Bytecodes::Code bytecode() const               { return _bytecode; }
1442  ConversionStub* stub() const                   { return _stub; }
1443#ifdef PPC
1444  LIR_Opr tmp1() const                           { return _tmp1; }
1445  LIR_Opr tmp2() const                           { return _tmp2; }
1446#endif
1447
1448  virtual void emit_code(LIR_Assembler* masm);
1449  virtual LIR_OpConvert* as_OpConvert() { return this; }
1450  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1451
1452  static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1453};
1454
1455
1456// LIR_OpAllocObj
1457class LIR_OpAllocObj : public LIR_Op1 {
1458 friend class LIR_OpVisitState;
1459
1460 private:
1461  LIR_Opr _tmp1;
1462  LIR_Opr _tmp2;
1463  LIR_Opr _tmp3;
1464  LIR_Opr _tmp4;
1465  int     _hdr_size;
1466  int     _obj_size;
1467  CodeStub* _stub;
1468  bool    _init_check;
1469
1470 public:
1471  LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1472                 LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1473                 int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1474    : LIR_Op1(lir_alloc_object, klass, result)
1475    , _tmp1(t1)
1476    , _tmp2(t2)
1477    , _tmp3(t3)
1478    , _tmp4(t4)
1479    , _hdr_size(hdr_size)
1480    , _obj_size(obj_size)
1481    , _init_check(init_check)
1482    , _stub(stub)                                { }
1483
1484  LIR_Opr klass()        const                   { return in_opr();     }
1485  LIR_Opr obj()          const                   { return result_opr(); }
1486  LIR_Opr tmp1()         const                   { return _tmp1;        }
1487  LIR_Opr tmp2()         const                   { return _tmp2;        }
1488  LIR_Opr tmp3()         const                   { return _tmp3;        }
1489  LIR_Opr tmp4()         const                   { return _tmp4;        }
1490  int     header_size()  const                   { return _hdr_size;    }
1491  int     object_size()  const                   { return _obj_size;    }
1492  bool    init_check()   const                   { return _init_check;  }
1493  CodeStub* stub()       const                   { return _stub;        }
1494
1495  virtual void emit_code(LIR_Assembler* masm);
1496  virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1497  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1498};
1499
1500
1501// LIR_OpRoundFP
1502class LIR_OpRoundFP : public LIR_Op1 {
1503 friend class LIR_OpVisitState;
1504
1505 private:
1506  LIR_Opr _tmp;
1507
1508 public:
1509  LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1510    : LIR_Op1(lir_roundfp, reg, result)
1511    , _tmp(stack_loc_temp) {}
1512
1513  LIR_Opr tmp() const                            { return _tmp; }
1514  virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1515  void print_instr(outputStream* out) const PRODUCT_RETURN;
1516};
1517
1518// LIR_OpTypeCheck
1519class LIR_OpTypeCheck: public LIR_Op {
1520 friend class LIR_OpVisitState;
1521
1522 private:
1523  LIR_Opr       _object;
1524  LIR_Opr       _array;
1525  ciKlass*      _klass;
1526  LIR_Opr       _tmp1;
1527  LIR_Opr       _tmp2;
1528  LIR_Opr       _tmp3;
1529  bool          _fast_check;
1530  CodeEmitInfo* _info_for_patch;
1531  CodeEmitInfo* _info_for_exception;
1532  CodeStub*     _stub;
1533  ciMethod*     _profiled_method;
1534  int           _profiled_bci;
1535  bool          _should_profile;
1536
1537public:
1538  LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1539                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1540                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1541  LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1542                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1543
1544  LIR_Opr object() const                         { return _object;         }
1545  LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1546  LIR_Opr tmp1() const                           { return _tmp1;           }
1547  LIR_Opr tmp2() const                           { return _tmp2;           }
1548  LIR_Opr tmp3() const                           { return _tmp3;           }
1549  ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1550  bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1551  CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1552  CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1553  CodeStub* stub() const                         { return _stub;           }
1554
1555  // MethodData* profiling
1556  void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1557  void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1558  void set_should_profile(bool b)                { _should_profile = b;       }
1559  ciMethod* profiled_method() const              { return _profiled_method;   }
1560  int       profiled_bci() const                 { return _profiled_bci;      }
1561  bool      should_profile() const               { return _should_profile;    }
1562
1563  virtual void emit_code(LIR_Assembler* masm);
1564  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1565  void print_instr(outputStream* out) const PRODUCT_RETURN;
1566};
1567
1568// LIR_Op2
1569class LIR_Op2: public LIR_Op {
1570 friend class LIR_OpVisitState;
1571
1572  int  _fpu_stack_size; // for sin/cos implementation on Intel
1573
1574 protected:
1575  LIR_Opr   _opr1;
1576  LIR_Opr   _opr2;
1577  BasicType _type;
1578  LIR_Opr   _tmp1;
1579  LIR_Opr   _tmp2;
1580  LIR_Opr   _tmp3;
1581  LIR_Opr   _tmp4;
1582  LIR_Opr   _tmp5;
1583  LIR_Condition _condition;
1584
1585  void verify() const;
1586
1587 public:
1588  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1589    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1590    , _opr1(opr1)
1591    , _opr2(opr2)
1592    , _type(T_ILLEGAL)
1593    , _condition(condition)
1594    , _fpu_stack_size(0)
1595    , _tmp1(LIR_OprFact::illegalOpr)
1596    , _tmp2(LIR_OprFact::illegalOpr)
1597    , _tmp3(LIR_OprFact::illegalOpr)
1598    , _tmp4(LIR_OprFact::illegalOpr)
1599    , _tmp5(LIR_OprFact::illegalOpr) {
1600    assert(code == lir_cmp, "code check");
1601  }
1602
1603  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1604    : LIR_Op(code, result, NULL)
1605    , _opr1(opr1)
1606    , _opr2(opr2)
1607    , _type(type)
1608    , _condition(condition)
1609    , _fpu_stack_size(0)
1610    , _tmp1(LIR_OprFact::illegalOpr)
1611    , _tmp2(LIR_OprFact::illegalOpr)
1612    , _tmp3(LIR_OprFact::illegalOpr)
1613    , _tmp4(LIR_OprFact::illegalOpr)
1614    , _tmp5(LIR_OprFact::illegalOpr) {
1615    assert(code == lir_cmove, "code check");
1616    assert(type != T_ILLEGAL, "cmove should have type");
1617  }
1618
1619  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1620          CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1621    : LIR_Op(code, result, info)
1622    , _opr1(opr1)
1623    , _opr2(opr2)
1624    , _type(type)
1625    , _condition(lir_cond_unknown)
1626    , _fpu_stack_size(0)
1627    , _tmp1(LIR_OprFact::illegalOpr)
1628    , _tmp2(LIR_OprFact::illegalOpr)
1629    , _tmp3(LIR_OprFact::illegalOpr)
1630    , _tmp4(LIR_OprFact::illegalOpr)
1631    , _tmp5(LIR_OprFact::illegalOpr) {
1632    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1633  }
1634
1635  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1636          LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1637    : LIR_Op(code, result, NULL)
1638    , _opr1(opr1)
1639    , _opr2(opr2)
1640    , _type(T_ILLEGAL)
1641    , _condition(lir_cond_unknown)
1642    , _fpu_stack_size(0)
1643    , _tmp1(tmp1)
1644    , _tmp2(tmp2)
1645    , _tmp3(tmp3)
1646    , _tmp4(tmp4)
1647    , _tmp5(tmp5) {
1648    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1649  }
1650
1651  LIR_Opr in_opr1() const                        { return _opr1; }
1652  LIR_Opr in_opr2() const                        { return _opr2; }
1653  BasicType type()  const                        { return _type; }
1654  LIR_Opr tmp1_opr() const                       { return _tmp1; }
1655  LIR_Opr tmp2_opr() const                       { return _tmp2; }
1656  LIR_Opr tmp3_opr() const                       { return _tmp3; }
1657  LIR_Opr tmp4_opr() const                       { return _tmp4; }
1658  LIR_Opr tmp5_opr() const                       { return _tmp5; }
1659  LIR_Condition condition() const  {
1660    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
1661  }
1662  void set_condition(LIR_Condition condition) {
1663    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1664  }
1665
1666  void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1667  int  fpu_stack_size() const                    { return _fpu_stack_size; }
1668
1669  void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1670  void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1671
1672  virtual void emit_code(LIR_Assembler* masm);
1673  virtual LIR_Op2* as_Op2() { return this; }
1674  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1675};
1676
1677class LIR_OpAllocArray : public LIR_Op {
1678 friend class LIR_OpVisitState;
1679
1680 private:
1681  LIR_Opr   _klass;
1682  LIR_Opr   _len;
1683  LIR_Opr   _tmp1;
1684  LIR_Opr   _tmp2;
1685  LIR_Opr   _tmp3;
1686  LIR_Opr   _tmp4;
1687  BasicType _type;
1688  CodeStub* _stub;
1689
1690 public:
1691  LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1692    : LIR_Op(lir_alloc_array, result, NULL)
1693    , _klass(klass)
1694    , _len(len)
1695    , _tmp1(t1)
1696    , _tmp2(t2)
1697    , _tmp3(t3)
1698    , _tmp4(t4)
1699    , _type(type)
1700    , _stub(stub) {}
1701
1702  LIR_Opr   klass()   const                      { return _klass;       }
1703  LIR_Opr   len()     const                      { return _len;         }
1704  LIR_Opr   obj()     const                      { return result_opr(); }
1705  LIR_Opr   tmp1()    const                      { return _tmp1;        }
1706  LIR_Opr   tmp2()    const                      { return _tmp2;        }
1707  LIR_Opr   tmp3()    const                      { return _tmp3;        }
1708  LIR_Opr   tmp4()    const                      { return _tmp4;        }
1709  BasicType type()    const                      { return _type;        }
1710  CodeStub* stub()    const                      { return _stub;        }
1711
1712  virtual void emit_code(LIR_Assembler* masm);
1713  virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1714  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1715};
1716
1717
1718class LIR_Op3: public LIR_Op {
1719 friend class LIR_OpVisitState;
1720
1721 private:
1722  LIR_Opr _opr1;
1723  LIR_Opr _opr2;
1724  LIR_Opr _opr3;
1725 public:
1726  LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1727    : LIR_Op(code, result, info)
1728    , _opr1(opr1)
1729    , _opr2(opr2)
1730    , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1731  LIR_Opr in_opr1() const                        { return _opr1; }
1732  LIR_Opr in_opr2() const                        { return _opr2; }
1733  LIR_Opr in_opr3() const                        { return _opr3; }
1734
1735  virtual void emit_code(LIR_Assembler* masm);
1736  virtual LIR_Op3* as_Op3() { return this; }
1737  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1738};
1739
1740
1741//--------------------------------
1742class LabelObj: public CompilationResourceObj {
1743 private:
1744  Label _label;
1745 public:
1746  LabelObj()                                     {}
1747  Label* label()                                 { return &_label; }
1748};
1749
1750
1751class LIR_OpLock: public LIR_Op {
1752 friend class LIR_OpVisitState;
1753
1754 private:
1755  LIR_Opr _hdr;
1756  LIR_Opr _obj;
1757  LIR_Opr _lock;
1758  LIR_Opr _scratch;
1759  CodeStub* _stub;
1760 public:
1761  LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1762    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1763    , _hdr(hdr)
1764    , _obj(obj)
1765    , _lock(lock)
1766    , _scratch(scratch)
1767    , _stub(stub)                      {}
1768
1769  LIR_Opr hdr_opr() const                        { return _hdr; }
1770  LIR_Opr obj_opr() const                        { return _obj; }
1771  LIR_Opr lock_opr() const                       { return _lock; }
1772  LIR_Opr scratch_opr() const                    { return _scratch; }
1773  CodeStub* stub() const                         { return _stub; }
1774
1775  virtual void emit_code(LIR_Assembler* masm);
1776  virtual LIR_OpLock* as_OpLock() { return this; }
1777  void print_instr(outputStream* out) const PRODUCT_RETURN;
1778};
1779
1780
1781class LIR_OpDelay: public LIR_Op {
1782 friend class LIR_OpVisitState;
1783
1784 private:
1785  LIR_Op* _op;
1786
1787 public:
1788  LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1789    LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1790    _op(op) {
1791    assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
1792  }
1793  virtual void emit_code(LIR_Assembler* masm);
1794  virtual LIR_OpDelay* as_OpDelay() { return this; }
1795  void print_instr(outputStream* out) const PRODUCT_RETURN;
1796  LIR_Op* delay_op() const { return _op; }
1797  CodeEmitInfo* call_info() const { return info(); }
1798};
1799
1800
1801// LIR_OpCompareAndSwap
1802class LIR_OpCompareAndSwap : public LIR_Op {
1803 friend class LIR_OpVisitState;
1804
1805 private:
1806  LIR_Opr _addr;
1807  LIR_Opr _cmp_value;
1808  LIR_Opr _new_value;
1809  LIR_Opr _tmp1;
1810  LIR_Opr _tmp2;
1811
1812 public:
1813  LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1814                       LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1815    : LIR_Op(code, result, NULL)  // no result, no info
1816    , _addr(addr)
1817    , _cmp_value(cmp_value)
1818    , _new_value(new_value)
1819    , _tmp1(t1)
1820    , _tmp2(t2)                                  { }
1821
1822  LIR_Opr addr()        const                    { return _addr;  }
1823  LIR_Opr cmp_value()   const                    { return _cmp_value; }
1824  LIR_Opr new_value()   const                    { return _new_value; }
1825  LIR_Opr tmp1()        const                    { return _tmp1;      }
1826  LIR_Opr tmp2()        const                    { return _tmp2;      }
1827
1828  virtual void emit_code(LIR_Assembler* masm);
1829  virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1830  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1831};
1832
1833// LIR_OpProfileCall
1834class LIR_OpProfileCall : public LIR_Op {
1835 friend class LIR_OpVisitState;
1836
1837 private:
1838  ciMethod* _profiled_method;
1839  int       _profiled_bci;
1840  ciMethod* _profiled_callee;
1841  LIR_Opr   _mdo;
1842  LIR_Opr   _recv;
1843  LIR_Opr   _tmp1;
1844  ciKlass*  _known_holder;
1845
1846 public:
1847  // Destroys recv
1848  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1849    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
1850    , _profiled_method(profiled_method)
1851    , _profiled_bci(profiled_bci)
1852    , _profiled_callee(profiled_callee)
1853    , _mdo(mdo)
1854    , _recv(recv)
1855    , _tmp1(t1)
1856    , _known_holder(known_holder)                { }
1857
1858  ciMethod* profiled_method() const              { return _profiled_method;  }
1859  int       profiled_bci()    const              { return _profiled_bci;     }
1860  ciMethod* profiled_callee() const              { return _profiled_callee;  }
1861  LIR_Opr   mdo()             const              { return _mdo;              }
1862  LIR_Opr   recv()            const              { return _recv;             }
1863  LIR_Opr   tmp1()            const              { return _tmp1;             }
1864  ciKlass*  known_holder()    const              { return _known_holder;     }
1865
1866  virtual void emit_code(LIR_Assembler* masm);
1867  virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1868  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1869};
1870
1871class LIR_InsertionBuffer;
1872
1873//--------------------------------LIR_List---------------------------------------------------
1874// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1875// The LIR instructions are appended by the LIR_List class itself;
1876//
1877// Notes:
1878// - all offsets are(should be) in bytes
1879// - local positions are specified with an offset, with offset 0 being local 0
1880
1881class LIR_List: public CompilationResourceObj {
1882 private:
1883  LIR_OpList  _operations;
1884
1885  Compilation*  _compilation;
1886#ifndef PRODUCT
1887  BlockBegin*   _block;
1888#endif
1889#ifdef ASSERT
1890  const char *  _file;
1891  int           _line;
1892#endif
1893
1894  void append(LIR_Op* op) {
1895    if (op->source() == NULL)
1896      op->set_source(_compilation->current_instruction());
1897#ifndef PRODUCT
1898    if (PrintIRWithLIR) {
1899      _compilation->maybe_print_current_instruction();
1900      op->print(); tty->cr();
1901    }
1902#endif // PRODUCT
1903
1904    _operations.append(op);
1905
1906#ifdef ASSERT
1907    op->verify();
1908    op->set_file_and_line(_file, _line);
1909    _file = NULL;
1910    _line = 0;
1911#endif
1912  }
1913
1914 public:
1915  LIR_List(Compilation* compilation, BlockBegin* block = NULL);
1916
1917#ifdef ASSERT
1918  void set_file_and_line(const char * file, int line);
1919#endif
1920
1921  //---------- accessors ---------------
1922  LIR_OpList* instructions_list()                { return &_operations; }
1923  int         length() const                     { return _operations.length(); }
1924  LIR_Op*     at(int i) const                    { return _operations.at(i); }
1925
1926  NOT_PRODUCT(BlockBegin* block() const          { return _block; });
1927
1928  // insert LIR_Ops in buffer to right places in LIR_List
1929  void append(LIR_InsertionBuffer* buffer);
1930
1931  //---------- mutators ---------------
1932  void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
1933  void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
1934  void remove_at(int i)                          { _operations.remove_at(i); }
1935
1936  //---------- printing -------------
1937  void print_instructions() PRODUCT_RETURN;
1938
1939
1940  //---------- instructions -------------
1941  void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1942                        address dest, LIR_OprList* arguments,
1943                        CodeEmitInfo* info) {
1944    append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
1945  }
1946  void call_static(ciMethod* method, LIR_Opr result,
1947                   address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1948    append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
1949  }
1950  void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1951                      address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1952    append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
1953  }
1954  void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1955                    intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
1956    append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
1957  }
1958  void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
1959                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
1960    append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
1961  }
1962
1963  void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
1964  void word_align()                              { append(new LIR_Op0(lir_word_align)); }
1965  void membar()                                  { append(new LIR_Op0(lir_membar)); }
1966  void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
1967  void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
1968  void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
1969  void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
1970  void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
1971  void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
1972
1973  void nop()                                     { append(new LIR_Op0(lir_nop)); }
1974  void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
1975
1976  void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
1977  void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
1978
1979  void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
1980
1981  void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
1982  void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
1983
1984  // result is a stack location for old backend and vreg for UseLinearScan
1985  // stack_loc_temp is an illegal register for old backend
1986  void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
1987  void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1988  void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1989  void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
1990  void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
1991  void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
1992  void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
1993  void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
1994    if (UseCompressedOops) {
1995      append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
1996    } else {
1997      move(src, dst, info);
1998    }
1999  }
2000  void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2001    if (UseCompressedOops) {
2002      append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2003    } else {
2004      move(src, dst, info);
2005    }
2006  }
2007  void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2008
2009  void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
2010  void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2011
2012  void oop2reg  (Metadata* o, LIR_Opr reg)       { append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg));   }
2013  void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2014
2015  void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
2016
2017  void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2018
2019#ifdef PPC
2020  void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
2021#endif
2022  void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2023
2024  void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2025  void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2026  void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2027
2028  void   pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64,   src, dst, T_LONG, lir_patch_none, NULL)); }
2029  void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
2030
2031  void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
2032  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2033    append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2034  }
2035  void unwind_exception(LIR_Opr exceptionOop) {
2036    append(new LIR_Op1(lir_unwind, exceptionOop));
2037  }
2038
2039  void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2040    append(new LIR_Op2(lir_compare_to,  left, right, dst));
2041  }
2042
2043  void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2044  void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2045
2046  void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2047    append(new LIR_Op2(lir_cmp, condition, left, right, info));
2048  }
2049  void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2050    cmp(condition, left, LIR_OprFact::intConst(right), info);
2051  }
2052
2053  void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2054  void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2055
2056  void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2057    append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2058  }
2059
2060  void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2061                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2062  void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2063               LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2064  void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2065               LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2066
2067  void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2068  void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2069  void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, LIR_OprFact::illegalOpr, to, tmp)); }
2070  void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2071  void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
2072  void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
2073  void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2074  void exp (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5)                { append(new LIR_Op2(lir_exp , from, tmp1, to, tmp2, tmp3, tmp4, tmp5)); }
2075  void pow (LIR_Opr arg1, LIR_Opr arg2, LIR_Opr res, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5) { append(new LIR_Op2(lir_pow, arg1, arg2, res, tmp1, tmp2, tmp3, tmp4, tmp5)); }
2076
2077  void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2078  void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2079  void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2080  void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
2081  void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2082  void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
2083  void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2084
2085  void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2086  void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2087
2088  void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2089
2090  void prefetch(LIR_Address* addr, bool is_store);
2091
2092  void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2093  void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2094  void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2095  void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2096  void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2097
2098  void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2099  void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2100  void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2101  void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2102
2103  void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2104  void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2105
2106  // jump is an unconditional branch
2107  void jump(BlockBegin* block) {
2108    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
2109  }
2110  void jump(CodeStub* stub) {
2111    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
2112  }
2113  void branch(LIR_Condition cond, BasicType type, Label* lbl)        { append(new LIR_OpBranch(cond, type, lbl)); }
2114  void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
2115    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2116    append(new LIR_OpBranch(cond, type, block));
2117  }
2118  void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
2119    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
2120    append(new LIR_OpBranch(cond, type, stub));
2121  }
2122  void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
2123    assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
2124    append(new LIR_OpBranch(cond, type, block, unordered));
2125  }
2126
2127  void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2128  void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2129  void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2130
2131  void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2132  void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2133  void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2134
2135  void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2136  void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2137
2138  void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2139    append(new LIR_OpRTCall(routine, tmp, result, arguments));
2140  }
2141
2142  void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2143                    LIR_OprList* arguments, CodeEmitInfo* info) {
2144    append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2145  }
2146
2147  void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2148  void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2149  void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2150
2151  void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
2152  void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
2153  void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2154
2155  void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2156
2157  void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
2158
2159  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2160  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2161
2162  void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2163                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2164                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2165                  ciMethod* profiled_method, int profiled_bci);
2166  // MethodData* profiling
2167  void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2168    append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
2169  }
2170};
2171
2172void print_LIR(BlockList* blocks);
2173
2174class LIR_InsertionBuffer : public CompilationResourceObj {
2175 private:
2176  LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2177
2178  // list of insertion points. index and count are stored alternately:
2179  // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2180  // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2181  intStack    _index_and_count;
2182
2183  // the LIR_Ops to be inserted
2184  LIR_OpList  _ops;
2185
2186  void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2187  void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2188  void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2189
2190#ifdef ASSERT
2191  void verify();
2192#endif
2193 public:
2194  LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2195
2196  // must be called before using the insertion buffer
2197  void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2198  bool initialized() const  { return _lir != NULL; }
2199  // called automatically when the buffer is appended to the LIR_List
2200  void finish()             { _lir = NULL; }
2201
2202  // accessors
2203  LIR_List*  lir_list() const             { return _lir; }
2204  int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2205  int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2206  int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2207
2208  int number_of_ops() const               { return _ops.length(); }
2209  LIR_Op* op_at(int i) const              { return _ops.at(i); }
2210
2211  // append an instruction to the buffer
2212  void append(int index, LIR_Op* op);
2213
2214  // instruction
2215  void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2216};
2217
2218
2219//
2220// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2221// Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2222// information about the input, output and temporaries used by the
2223// op to be recorded.  It also records whether the op has call semantics
2224// and also records all the CodeEmitInfos used by this op.
2225//
2226
2227
2228class LIR_OpVisitState: public StackObj {
2229 public:
2230  typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2231
2232  enum {
2233    maxNumberOfOperands = 16,
2234    maxNumberOfInfos = 4
2235  };
2236
2237 private:
2238  LIR_Op*          _op;
2239
2240  // optimization: the operands and infos are not stored in a variable-length
2241  //               list, but in a fixed-size array to save time of size checks and resizing
2242  int              _oprs_len[numModes];
2243  LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2244  int _info_len;
2245  CodeEmitInfo*    _info_new[maxNumberOfInfos];
2246
2247  bool             _has_call;
2248  bool             _has_slow_case;
2249
2250
2251  // only include register operands
2252  // addresses are decomposed to the base and index registers
2253  // constants and stack operands are ignored
2254  void append(LIR_Opr& opr, OprMode mode) {
2255    assert(opr->is_valid(), "should not call this otherwise");
2256    assert(mode >= 0 && mode < numModes, "bad mode");
2257
2258    if (opr->is_register()) {
2259       assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2260      _oprs_new[mode][_oprs_len[mode]++] = &opr;
2261
2262    } else if (opr->is_pointer()) {
2263      LIR_Address* address = opr->as_address_ptr();
2264      if (address != NULL) {
2265        // special handling for addresses: add base and index register of the address
2266        // both are always input operands!
2267        if (address->_base->is_valid()) {
2268          assert(address->_base->is_register(), "must be");
2269          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2270          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
2271        }
2272        if (address->_index->is_valid()) {
2273          assert(address->_index->is_register(), "must be");
2274          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
2275          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
2276        }
2277
2278      } else {
2279        assert(opr->is_constant(), "constant operands are not processed");
2280      }
2281    } else {
2282      assert(opr->is_stack(), "stack operands are not processed");
2283    }
2284  }
2285
2286  void append(CodeEmitInfo* info) {
2287    assert(info != NULL, "should not call this otherwise");
2288    assert(_info_len < maxNumberOfInfos, "array overflow");
2289    _info_new[_info_len++] = info;
2290  }
2291
2292 public:
2293  LIR_OpVisitState()         { reset(); }
2294
2295  LIR_Op* op() const         { return _op; }
2296  void set_op(LIR_Op* op)    { reset(); _op = op; }
2297
2298  bool has_call() const      { return _has_call; }
2299  bool has_slow_case() const { return _has_slow_case; }
2300
2301  void reset() {
2302    _op = NULL;
2303    _has_call = false;
2304    _has_slow_case = false;
2305
2306    _oprs_len[inputMode] = 0;
2307    _oprs_len[tempMode] = 0;
2308    _oprs_len[outputMode] = 0;
2309    _info_len = 0;
2310  }
2311
2312
2313  int opr_count(OprMode mode) const {
2314    assert(mode >= 0 && mode < numModes, "bad mode");
2315    return _oprs_len[mode];
2316  }
2317
2318  LIR_Opr opr_at(OprMode mode, int index) const {
2319    assert(mode >= 0 && mode < numModes, "bad mode");
2320    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2321    return *_oprs_new[mode][index];
2322  }
2323
2324  void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2325    assert(mode >= 0 && mode < numModes, "bad mode");
2326    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2327    *_oprs_new[mode][index] = opr;
2328  }
2329
2330  int info_count() const {
2331    return _info_len;
2332  }
2333
2334  CodeEmitInfo* info_at(int index) const {
2335    assert(index < _info_len, "index out of bounds");
2336    return _info_new[index];
2337  }
2338
2339  XHandlers* all_xhandler();
2340
2341  // collects all register operands of the instruction
2342  void visit(LIR_Op* op);
2343
2344#if ASSERT
2345  // check that an operation has no operands
2346  bool no_operands(LIR_Op* op);
2347#endif
2348
2349  // LIR_Op visitor functions use these to fill in the state
2350  void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2351  void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2352  void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2353  void do_info(CodeEmitInfo* info)        { append(info); }
2354
2355  void do_stub(CodeStub* stub);
2356  void do_call()                          { _has_call = true; }
2357  void do_slow_case()                     { _has_slow_case = true; }
2358  void do_slow_case(CodeEmitInfo* info) {
2359    _has_slow_case = true;
2360    append(info);
2361  }
2362};
2363
2364
2365inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2366
2367#endif // SHARE_VM_C1_C1_LIR_HPP
2368