1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_C1_C1_LIR_HPP
  26 #define SHARE_C1_C1_LIR_HPP
  27 
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_ValueType.hpp"
  30 #include "oops/method.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 class BlockBegin;
  34 class BlockList;
  35 class LIR_Assembler;
  36 class CodeEmitInfo;
  37 class CodeStub;
  38 class CodeStubList;
  39 class C1SafepointPollStub;
  40 class ArrayCopyStub;
  41 class LIR_Op;
  42 class ciType;
  43 class ValueType;
  44 class LIR_OpVisitState;
  45 class FpuStackSim;
  46 
  47 //---------------------------------------------------------------------
  48 //                 LIR Operands
  49 //    LIR_OprPtr
  50 //      LIR_Const
  51 //      LIR_Address
  52 //---------------------------------------------------------------------
  53 class LIR_OprPtr;
  54 class LIR_Const;
  55 class LIR_Address;
  56 class LIR_OprVisitor;
  57 class LIR_Opr;
  58 
  59 typedef int          RegNr;
  60 
  61 typedef GrowableArray<LIR_Opr> LIR_OprList;
  62 typedef GrowableArray<LIR_Op*> LIR_OpArray;
  63 typedef GrowableArray<LIR_Op*> LIR_OpList;
  64 
  65 // define LIR_OprPtr early so LIR_Opr can refer to it
  66 class LIR_OprPtr: public CompilationResourceObj {
  67  public:
  68   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  69   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  70 
  71   virtual LIR_Const*  as_constant()              { return NULL; }
  72   virtual LIR_Address* as_address()              { return NULL; }
  73   virtual BasicType type() const                 = 0;
  74   virtual void print_value_on(outputStream* out) const = 0;
  75 };
  76 
  77 
  78 
  79 // LIR constants
  80 class LIR_Const: public LIR_OprPtr {
  81  private:
  82   JavaValue _value;
  83 
  84   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  85   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  86   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  87 
  88  public:
  89   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  90   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  91   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  92   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  93   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  94   LIR_Const(void* p) {
  95 #ifdef _LP64
  96     assert(sizeof(jlong) >= sizeof(p), "too small");;
  97     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
  98 #else
  99     assert(sizeof(jint) >= sizeof(p), "too small");;
 100     _value.set_type(T_INT);     _value.set_jint((jint)p);
 101 #endif
 102   }
 103   LIR_Const(Metadata* m) {
 104     _value.set_type(T_METADATA);
 105 #ifdef _LP64
 106     _value.set_jlong((jlong)m);
 107 #else
 108     _value.set_jint((jint)m);
 109 #endif // _LP64
 110   }
 111 
 112   virtual BasicType type()       const { return _value.get_type(); }
 113   virtual LIR_Const* as_constant()     { return this; }
 114 
 115   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 116   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 117   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 118   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 119   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 120   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 121   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 122 
 123 #ifdef _LP64
 124   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 125   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
 126 #else
 127   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 128   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
 129 #endif
 130 
 131 
 132   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 133   jint      as_jint_lo_bits() const    {
 134     if (type() == T_DOUBLE) {
 135       return low(jlong_cast(_value.get_jdouble()));
 136     } else {
 137       return as_jint_lo();
 138     }
 139   }
 140   jint      as_jint_hi_bits() const    {
 141     if (type() == T_DOUBLE) {
 142       return high(jlong_cast(_value.get_jdouble()));
 143     } else {
 144       return as_jint_hi();
 145     }
 146   }
 147   jlong      as_jlong_bits() const    {
 148     if (type() == T_DOUBLE) {
 149       return jlong_cast(_value.get_jdouble());
 150     } else {
 151       return as_jlong();
 152     }
 153   }
 154 
 155   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 156 
 157 
 158   bool is_zero_float() {
 159     jfloat f = as_jfloat();
 160     jfloat ok = 0.0f;
 161     return jint_cast(f) == jint_cast(ok);
 162   }
 163 
 164   bool is_one_float() {
 165     jfloat f = as_jfloat();
 166     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 167   }
 168 
 169   bool is_zero_double() {
 170     jdouble d = as_jdouble();
 171     jdouble ok = 0.0;
 172     return jlong_cast(d) == jlong_cast(ok);
 173   }
 174 
 175   bool is_one_double() {
 176     jdouble d = as_jdouble();
 177     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 178   }
 179 };
 180 
 181 
 182 //---------------------LIR Operand descriptor------------------------------------
 183 //
 184 // The class LIR_Opr represents a LIR instruction operand;
 185 // it can be a register (ALU/FPU), stack location or a constant;
 186 // Constants and addresses are represented as resource area allocated
 187 // structures (see above), and pointers are stored in the _value field (cast to
 188 // an intptr_t).
 189 // Registers and stack locations are represented inline as integers.
 190 // (see value function).
 191 
 192 // Previously, this class was derived from CompilationResourceObj.
 193 // However, deriving from any of the "Obj" types in allocation.hpp seems
 194 // detrimental, since in some build modes it would add a vtable to this class,
 195 // which make it no longer be a 1-word trivially-copyable wrapper object,
 196 // which is the entire point of it.
 197 
 198 class LIR_Opr {
 199  public:
 200   // value structure:
 201   //     data       opr-type opr-kind
 202   // +--------------+-------+-------+
 203   // [max...........|7 6 5 4|3 2 1 0]
 204   //                               ^
 205   //                         is_pointer bit
 206   //
 207   // lowest bit cleared, means it is a structure pointer
 208   // we need  4 bits to represent types
 209 
 210  private:
 211   friend class LIR_OprFact;
 212 
 213   intptr_t _value;
 214   // Conversion
 215   intptr_t value() const                         { return _value; }
 216 
 217   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 218     return (value() & mask) == masked_value;
 219   }
 220 
 221   enum OprKind {
 222       pointer_value      = 0
 223     , stack_value        = 1
 224     , cpu_register       = 3
 225     , fpu_register       = 5
 226     , illegal_value      = 7
 227   };
 228 
 229   enum OprBits {
 230       pointer_bits   = 1
 231     , kind_bits      = 3
 232     , type_bits      = 4
 233     , size_bits      = 2
 234     , destroys_bits  = 1
 235     , virtual_bits   = 1
 236     , is_xmm_bits    = 1
 237     , last_use_bits  = 1
 238     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 239     , non_data_bits  = pointer_bits + kind_bits + type_bits + size_bits + destroys_bits + virtual_bits
 240                        + is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits
 241     , data_bits      = BitsPerInt - non_data_bits
 242     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 243   };
 244 
 245   enum OprShift {
 246       kind_shift     = 0
 247     , type_shift     = kind_shift     + kind_bits
 248     , size_shift     = type_shift     + type_bits
 249     , destroys_shift = size_shift     + size_bits
 250     , last_use_shift = destroys_shift + destroys_bits
 251     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 252     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 253     , is_xmm_shift   = virtual_shift + virtual_bits
 254     , data_shift     = is_xmm_shift + is_xmm_bits
 255     , reg1_shift = data_shift
 256     , reg2_shift = data_shift + reg_bits
 257 
 258   };
 259 
 260   enum OprSize {
 261       single_size = 0 << size_shift
 262     , double_size = 1 << size_shift
 263   };
 264 
 265   enum OprMask {
 266       kind_mask      = right_n_bits(kind_bits)
 267     , type_mask      = right_n_bits(type_bits) << type_shift
 268     , size_mask      = right_n_bits(size_bits) << size_shift
 269     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 270     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 271     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 272     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 273     , pointer_mask   = right_n_bits(pointer_bits)
 274     , lower_reg_mask = right_n_bits(reg_bits)
 275     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 276   };
 277 
 278   uintptr_t data() const                         { return value() >> data_shift; }
 279   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 280   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 281   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 282   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 283 
 284   static char type_char(BasicType t);
 285 
 286  public:
 287   LIR_Opr() : _value(0) {}
 288   LIR_Opr(intptr_t val) : _value(val) {}
 289   LIR_Opr(LIR_OprPtr *val) : _value(reinterpret_cast<intptr_t>(val)) {}
 290   bool operator==(const LIR_Opr &other) const { return _value == other._value; }
 291   bool operator!=(const LIR_Opr &other) const { return _value != other._value; }
 292   explicit operator bool() const { return _value != 0; }
 293 
 294   // UGLY HACK: make this value object look like a pointer (to itself). This
 295   // operator overload should be removed, and all callers updated from
 296   // `opr->fn()` to `opr.fn()`.
 297   const LIR_Opr* operator->() const { return this; }
 298   LIR_Opr* operator->() { return this; }
 299 
 300   enum {
 301     vreg_base = ConcreteRegisterImpl::number_of_registers,
 302     vreg_max = (1 << data_bits) - 1
 303   };
 304 
 305   static inline LIR_Opr illegalOpr();
 306   static inline LIR_Opr nullOpr();
 307 
 308   enum OprType {
 309       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 310     , int_type      = 1 << type_shift
 311     , long_type     = 2 << type_shift
 312     , object_type   = 3 << type_shift
 313     , address_type  = 4 << type_shift
 314     , float_type    = 5 << type_shift
 315     , double_type   = 6 << type_shift
 316     , metadata_type = 7 << type_shift
 317   };
 318   friend OprType as_OprType(BasicType t);
 319   friend BasicType as_BasicType(OprType t);
 320 
 321   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 322   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 323 
 324   static OprSize size_for(BasicType t) {
 325     switch (t) {
 326       case T_LONG:
 327       case T_DOUBLE:
 328         return double_size;
 329         break;
 330 
 331       case T_FLOAT:
 332       case T_BOOLEAN:
 333       case T_CHAR:
 334       case T_BYTE:
 335       case T_SHORT:
 336       case T_INT:
 337       case T_ADDRESS:
 338       case T_OBJECT:
 339       case T_PRIMITIVE_OBJECT:
 340       case T_ARRAY:
 341       case T_METADATA:
 342         return single_size;
 343         break;
 344 
 345       default:
 346         ShouldNotReachHere();
 347         return single_size;
 348       }
 349   }
 350 
 351 
 352   void validate_type() const PRODUCT_RETURN;
 353 
 354   BasicType type() const {
 355     if (is_pointer()) {
 356       return pointer()->type();
 357     }
 358     return as_BasicType(type_field());
 359   }
 360 
 361 
 362   ValueType* value_type() const                  { return as_ValueType(type()); }
 363 
 364   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 365 
 366   bool is_equal(LIR_Opr opr) const         { return *this == opr; }
 367   // checks whether types are same
 368   bool is_same_type(LIR_Opr opr) const     {
 369     assert(type_field() != unknown_type &&
 370            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 371     return type_field() == opr->type_field();
 372   }
 373   bool is_same_register(LIR_Opr opr) {
 374     return (is_register() && opr->is_register() &&
 375             kind_field() == opr->kind_field() &&
 376             (value() & no_type_mask) == (opr->value() & no_type_mask));
 377   }
 378 
 379   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 380   bool is_illegal() const      { return kind_field() == illegal_value; }
 381   bool is_valid() const        { return kind_field() != illegal_value; }
 382 
 383   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 384   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 385 
 386   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 387   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 388 
 389   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 390   bool is_oop() const;
 391 
 392   // semantic for fpu- and xmm-registers:
 393   // * is_float and is_double return true for xmm_registers
 394   //   (so is_single_fpu and is_single_xmm are true)
 395   // * So you must always check for is_???_xmm prior to is_???_fpu to
 396   //   distinguish between fpu- and xmm-registers
 397 
 398   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 399   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 400   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 401 
 402   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 403   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 404   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 405   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 406   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 407 
 408   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 409   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 410   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 411   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 412   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 413 
 414   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 415   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 416   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 417 
 418   // fast accessor functions for special bits that do not work for pointers
 419   // (in this functions, the check for is_pointer() is omitted)
 420   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 421   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 422   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 423   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 424   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 425 
 426   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 427   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 428   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 429   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 430 
 431 
 432   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 433   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 434   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 435   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 436   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 437   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 438   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 439   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 440   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 441   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 442   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 443   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 444 
 445   LIR_OprPtr* pointer() const { assert(_value != 0 && is_pointer(), "nullness and type check"); return (LIR_OprPtr*)_value; }
 446   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 447   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 448 
 449   Register as_register()    const;
 450   Register as_register_lo() const;
 451   Register as_register_hi() const;
 452 
 453   Register as_pointer_register() {
 454 #ifdef _LP64
 455     if (is_double_cpu()) {
 456       assert(as_register_lo() == as_register_hi(), "should be a single register");
 457       return as_register_lo();
 458     }
 459 #endif
 460     return as_register();
 461   }
 462 
 463   FloatRegister as_float_reg   () const;
 464   FloatRegister as_double_reg  () const;
 465 #ifdef X86
 466   XMMRegister as_xmm_float_reg () const;
 467   XMMRegister as_xmm_double_reg() const;
 468   // for compatibility with RInfo
 469   int fpu() const { return lo_reg_half(); }
 470 #endif
 471 
 472   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 473   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 474   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 475   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 476   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 477 
 478   void print() const PRODUCT_RETURN;
 479   void print(outputStream* out) const PRODUCT_RETURN;
 480 };
 481 
 482 inline LIR_Opr::OprType as_OprType(BasicType type) {
 483   switch (type) {
 484   case T_INT:      return LIR_Opr::int_type;
 485   case T_LONG:     return LIR_Opr::long_type;
 486   case T_FLOAT:    return LIR_Opr::float_type;
 487   case T_DOUBLE:   return LIR_Opr::double_type;
 488   case T_OBJECT:
 489   case T_PRIMITIVE_OBJECT:
 490   case T_ARRAY:    return LIR_Opr::object_type;
 491   case T_ADDRESS:  return LIR_Opr::address_type;
 492   case T_METADATA: return LIR_Opr::metadata_type;
 493   case T_ILLEGAL:  // fall through
 494   default: ShouldNotReachHere(); return LIR_Opr::unknown_type;
 495   }
 496 }
 497 
 498 inline BasicType as_BasicType(LIR_Opr::OprType t) {
 499   switch (t) {
 500   case LIR_Opr::int_type:     return T_INT;
 501   case LIR_Opr::long_type:    return T_LONG;
 502   case LIR_Opr::float_type:   return T_FLOAT;
 503   case LIR_Opr::double_type:  return T_DOUBLE;
 504   case LIR_Opr::object_type:  return T_OBJECT;
 505   case LIR_Opr::address_type: return T_ADDRESS;
 506   case LIR_Opr::metadata_type:return T_METADATA;
 507   case LIR_Opr::unknown_type: // fall through
 508   default: ShouldNotReachHere();  return T_ILLEGAL;
 509   }
 510 }
 511 
 512 
 513 // LIR_Address
 514 class LIR_Address: public LIR_OprPtr {
 515  friend class LIR_OpVisitState;
 516 
 517  public:
 518   // NOTE: currently these must be the log2 of the scale factor (and
 519   // must also be equivalent to the ScaleFactor enum in
 520   // assembler_i486.hpp)
 521   enum Scale {
 522     times_1  =  0,
 523     times_2  =  1,
 524     times_4  =  2,
 525     times_8  =  3
 526   };
 527 
 528  private:
 529   LIR_Opr   _base;
 530   LIR_Opr   _index;
 531   Scale     _scale;
 532   intx      _disp;
 533   BasicType _type;
 534 
 535  public:
 536   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 537        _base(base)
 538      , _index(index)
 539      , _scale(times_1)
 540      , _disp(0)
 541      , _type(type) { verify(); }
 542 
 543   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 544        _base(base)
 545      , _index(LIR_Opr::illegalOpr())
 546      , _scale(times_1)
 547      , _disp(disp)
 548      , _type(type) { verify(); }
 549 
 550   LIR_Address(LIR_Opr base, BasicType type):
 551        _base(base)
 552      , _index(LIR_Opr::illegalOpr())
 553      , _scale(times_1)
 554      , _disp(0)
 555      , _type(type) { verify(); }
 556 
 557   LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
 558        _base(base)
 559      , _index(index)
 560      , _scale(times_1)
 561      , _disp(disp)
 562      , _type(type) { verify(); }
 563 
 564   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 565        _base(base)
 566      , _index(index)
 567      , _scale(scale)
 568      , _disp(disp)
 569      , _type(type) { verify(); }
 570 
 571   LIR_Opr base()  const                          { return _base;  }
 572   LIR_Opr index() const                          { return _index; }
 573   Scale   scale() const                          { return _scale; }
 574   intx    disp()  const                          { return _disp;  }
 575 
 576   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 577 
 578   virtual LIR_Address* as_address()              { return this;   }
 579   virtual BasicType type() const                 { return _type; }
 580   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 581 
 582   void verify() const PRODUCT_RETURN;
 583 
 584   static Scale scale(BasicType type);
 585 };
 586 
 587 
 588 // operand factory
 589 class LIR_OprFact: public AllStatic {
 590  public:
 591 
 592   static LIR_Opr illegalOpr;
 593   static LIR_Opr nullOpr;
 594 
 595   static LIR_Opr single_cpu(int reg) {
 596     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 597                                LIR_Opr::int_type             |
 598                                LIR_Opr::cpu_register         |
 599                                LIR_Opr::single_size);
 600   }
 601   static LIR_Opr single_cpu_oop(int reg) {
 602     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 603                                LIR_Opr::object_type          |
 604                                LIR_Opr::cpu_register         |
 605                                LIR_Opr::single_size);
 606   }
 607   static LIR_Opr single_cpu_address(int reg) {
 608     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 609                                LIR_Opr::address_type         |
 610                                LIR_Opr::cpu_register         |
 611                                LIR_Opr::single_size);
 612   }
 613   static LIR_Opr single_cpu_metadata(int reg) {
 614     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 615                                LIR_Opr::metadata_type        |
 616                                LIR_Opr::cpu_register         |
 617                                LIR_Opr::single_size);
 618   }
 619   static LIR_Opr double_cpu(int reg1, int reg2) {
 620     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 621     return (LIR_Opr)(intptr_t)((reg1 << LIR_Opr::reg1_shift) |
 622                                (reg2 << LIR_Opr::reg2_shift) |
 623                                LIR_Opr::long_type            |
 624                                LIR_Opr::cpu_register         |
 625                                LIR_Opr::double_size);
 626   }
 627 
 628   static LIR_Opr single_fpu(int reg) {
 629     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 630                                LIR_Opr::float_type           |
 631                                LIR_Opr::fpu_register         |
 632                                LIR_Opr::single_size);
 633   }
 634 
 635   // Platform dependant.
 636   static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
 637 
 638 #ifdef ARM32
 639   static LIR_Opr single_softfp(int reg) {
 640     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 641                                LIR_Opr::float_type           |
 642                                LIR_Opr::cpu_register         |
 643                                LIR_Opr::single_size);
 644   }
 645   static LIR_Opr double_softfp(int reg1, int reg2) {
 646     return (LIR_Opr)(intptr_t)((reg1 << LIR_Opr::reg1_shift) |
 647                                (reg2 << LIR_Opr::reg2_shift) |
 648                                LIR_Opr::double_type          |
 649                                LIR_Opr::cpu_register         |
 650                                LIR_Opr::double_size);
 651   }
 652 #endif // ARM32
 653 
 654 #if defined(X86)
 655   static LIR_Opr single_xmm(int reg) {
 656     return (LIR_Opr)(intptr_t)((reg << LIR_Opr::reg1_shift) |
 657                                LIR_Opr::float_type          |
 658                                LIR_Opr::fpu_register        |
 659                                LIR_Opr::single_size         |
 660                                LIR_Opr::is_xmm_mask);
 661   }
 662   static LIR_Opr double_xmm(int reg) {
 663     return (LIR_Opr)(intptr_t)((reg << LIR_Opr::reg1_shift) |
 664                                (reg << LIR_Opr::reg2_shift) |
 665                                LIR_Opr::double_type         |
 666                                LIR_Opr::fpu_register        |
 667                                LIR_Opr::double_size         |
 668                                LIR_Opr::is_xmm_mask);
 669   }
 670 #endif // X86
 671 
 672   static LIR_Opr virtual_register(int index, BasicType type) {
 673     if (index > LIR_Opr::vreg_max) {
 674       // Running out of virtual registers. Caller should bailout.
 675       return illegalOpr;
 676     }
 677 
 678     LIR_Opr res;
 679     switch (type) {
 680       case T_OBJECT: // fall through
 681       case T_PRIMITIVE_OBJECT: // fall through
 682       case T_ARRAY:
 683         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift)  |
 684                                             LIR_Opr::object_type  |
 685                                             LIR_Opr::cpu_register |
 686                                             LIR_Opr::single_size  |
 687                                             LIR_Opr::virtual_mask);
 688         break;
 689 
 690       case T_METADATA:
 691         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift)  |
 692                                             LIR_Opr::metadata_type|
 693                                             LIR_Opr::cpu_register |
 694                                             LIR_Opr::single_size  |
 695                                             LIR_Opr::virtual_mask);
 696         break;
 697 
 698       case T_INT:
 699         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 700                                   LIR_Opr::int_type              |
 701                                   LIR_Opr::cpu_register          |
 702                                   LIR_Opr::single_size           |
 703                                   LIR_Opr::virtual_mask);
 704         break;
 705 
 706       case T_ADDRESS:
 707         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 708                                   LIR_Opr::address_type          |
 709                                   LIR_Opr::cpu_register          |
 710                                   LIR_Opr::single_size           |
 711                                   LIR_Opr::virtual_mask);
 712         break;
 713 
 714       case T_LONG:
 715         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 716                                   LIR_Opr::long_type             |
 717                                   LIR_Opr::cpu_register          |
 718                                   LIR_Opr::double_size           |
 719                                   LIR_Opr::virtual_mask);
 720         break;
 721 
 722 #ifdef __SOFTFP__
 723       case T_FLOAT:
 724         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 725                                   LIR_Opr::float_type  |
 726                                   LIR_Opr::cpu_register |
 727                                   LIR_Opr::single_size |
 728                                   LIR_Opr::virtual_mask);
 729         break;
 730       case T_DOUBLE:
 731         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 732                                   LIR_Opr::double_type |
 733                                   LIR_Opr::cpu_register |
 734                                   LIR_Opr::double_size |
 735                                   LIR_Opr::virtual_mask);
 736         break;
 737 #else // __SOFTFP__
 738       case T_FLOAT:
 739         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 740                                   LIR_Opr::float_type           |
 741                                   LIR_Opr::fpu_register         |
 742                                   LIR_Opr::single_size          |
 743                                   LIR_Opr::virtual_mask);
 744         break;
 745 
 746       case
 747         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 748                                             LIR_Opr::double_type           |
 749                                             LIR_Opr::fpu_register          |
 750                                             LIR_Opr::double_size           |
 751                                             LIR_Opr::virtual_mask);
 752         break;
 753 #endif // __SOFTFP__
 754       default:       ShouldNotReachHere(); res = illegalOpr;
 755     }
 756 
 757 #ifdef ASSERT
 758     res->validate_type();
 759     assert(res->vreg_number() == index, "conversion check");
 760     assert(index >= LIR_Opr::vreg_base, "must start at vreg_base");
 761     assert(index <= (max_jint >> LIR_Opr::data_shift), "index is too big");
 762 
 763     // old-style calculation; check if old and new method are equal
 764     LIR_Opr::OprType t = as_OprType(type);
 765 #ifdef __SOFTFP__
 766     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 767                                t |
 768                                LIR_Opr::cpu_register |
 769                                LIR_Opr::size_for(type) | LIR_Opr::virtual_mask);
 770 #else // __SOFTFP__
 771     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) | t |
 772                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_Opr::fpu_register : LIR_Opr::cpu_register) |
 773                                LIR_Opr::size_for(type) | LIR_Opr::virtual_mask);
 774     assert(res == old_res, "old and new method not equal");
 775 #endif // __SOFTFP__
 776 #endif // ASSERT
 777 
 778     return res;
 779   }
 780 
 781   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 782   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 783   // index 2.
 784   static LIR_Opr stack(int index, BasicType type) {
 785     LIR_Opr res;
 786     switch (type) {
 787       case T_PRIMITIVE_OBJECT: // fall through
 788       case T_OBJECT: // fall through
 789       case T_ARRAY:
 790         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 791                                   LIR_Opr::object_type           |
 792                                   LIR_Opr::stack_value           |
 793                                   LIR_Opr::single_size);
 794         break;
 795 
 796       case T_METADATA:
 797         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 798                                   LIR_Opr::metadata_type         |
 799                                   LIR_Opr::stack_value           |
 800                                   LIR_Opr::single_size);
 801         break;
 802       case T_INT:
 803         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 804                                   LIR_Opr::int_type              |
 805                                   LIR_Opr::stack_value           |
 806                                   LIR_Opr::single_size);
 807         break;
 808 
 809       case T_ADDRESS:
 810         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 811                                   LIR_Opr::address_type          |
 812                                   LIR_Opr::stack_value           |
 813                                   LIR_Opr::single_size);
 814         break;
 815 
 816       case T_LONG:
 817         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 818                                   LIR_Opr::long_type             |
 819                                   LIR_Opr::stack_value           |
 820                                   LIR_Opr::double_size);
 821         break;
 822 
 823       case T_FLOAT:
 824         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 825                                   LIR_Opr::float_type            |
 826                                   LIR_Opr::stack_value           |
 827                                   LIR_Opr::single_size);
 828         break;
 829       case T_DOUBLE:
 830         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 831                                   LIR_Opr::double_type           |
 832                                   LIR_Opr::stack_value           |
 833                                   LIR_Opr::double_size);
 834         break;
 835 
 836       default:       ShouldNotReachHere(); res = illegalOpr;
 837     }
 838 
 839 #ifdef ASSERT
 840     assert(index >= 0, "index must be positive");
 841     assert(index <= (max_jint >> LIR_Opr::data_shift), "index is too big");
 842 
 843     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 844                                           LIR_Opr::stack_value           |
 845                                           as_OprType(type)                   |
 846                                           LIR_Opr::size_for(type));
 847     assert(res == old_res, "old and new method not equal");
 848 #endif
 849 
 850     return res;
 851   }
 852 
 853   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 854   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 855   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 856   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 857   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 858   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 859   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 860   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 861   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 862   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 863   static LIR_Opr metadataConst(Metadata* m)      { return (LIR_Opr)(new LIR_Const(m)); }
 864 
 865   static LIR_Opr value_type(ValueType* type);
 866 };
 867 
 868 
 869 //-------------------------------------------------------------------------------
 870 //                   LIR Instructions
 871 //-------------------------------------------------------------------------------
 872 //
 873 // Note:
 874 //  - every instruction has a result operand
 875 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 876 //  - every instruction has a LIR_OpCode operand
 877 //  - LIR_OpN, means an instruction that has N input operands
 878 //
 879 // class hierarchy:
 880 //
 881 class  LIR_Op;
 882 class    LIR_Op0;
 883 class      LIR_OpLabel;
 884 class    LIR_Op1;
 885 class      LIR_OpBranch;
 886 class      LIR_OpConvert;
 887 class      LIR_OpAllocObj;
 888 class      LIR_OpReturn;
 889 class      LIR_OpRoundFP;
 890 class    LIR_Op2;
 891 class    LIR_OpDelay;
 892 class    LIR_Op3;
 893 class      LIR_OpAllocArray;
 894 class    LIR_OpCall;
 895 class      LIR_OpJavaCall;
 896 class      LIR_OpRTCall;
 897 class    LIR_OpArrayCopy;
 898 class    LIR_OpUpdateCRC32;
 899 class    LIR_OpLock;
 900 class    LIR_OpTypeCheck;
 901 class    LIR_OpFlattenedArrayCheck;
 902 class    LIR_OpNullFreeArrayCheck;
 903 class    LIR_OpSubstitutabilityCheck;
 904 class    LIR_OpCompareAndSwap;
 905 class    LIR_OpLoadKlass;
 906 class    LIR_OpProfileCall;
 907 class    LIR_OpProfileType;
 908 class    LIR_OpProfileInlineType;
 909 #ifdef ASSERT
 910 class    LIR_OpAssert;
 911 #endif
 912 
 913 // LIR operation codes
 914 enum LIR_Code {
 915     lir_none
 916   , begin_op0
 917       , lir_label
 918       , lir_nop
 919       , lir_std_entry
 920       , lir_osr_entry
 921       , lir_fpop_raw
 922       , lir_breakpoint
 923       , lir_rtcall
 924       , lir_membar
 925       , lir_membar_acquire
 926       , lir_membar_release
 927       , lir_membar_loadload
 928       , lir_membar_storestore
 929       , lir_membar_loadstore
 930       , lir_membar_storeload
 931       , lir_get_thread
 932       , lir_on_spin_wait
 933       , lir_check_orig_pc
 934   , end_op0
 935   , begin_op1
 936       , lir_fxch
 937       , lir_fld
 938       , lir_push
 939       , lir_pop
 940       , lir_null_check
 941       , lir_return
 942       , lir_leal
 943       , lir_branch
 944       , lir_cond_float_branch
 945       , lir_move
 946       , lir_convert
 947       , lir_alloc_object
 948       , lir_monaddr
 949       , lir_roundfp
 950       , lir_safepoint
 951       , lir_unwind
 952       , lir_load_klass
 953   , end_op1
 954   , begin_op2
 955       , lir_cmp
 956       , lir_cmp_l2i
 957       , lir_ucmp_fd2i
 958       , lir_cmp_fd2i
 959       , lir_cmove
 960       , lir_add
 961       , lir_sub
 962       , lir_mul
 963       , lir_div
 964       , lir_rem
 965       , lir_sqrt
 966       , lir_abs
 967       , lir_neg
 968       , lir_tan
 969       , lir_log10
 970       , lir_logic_and
 971       , lir_logic_or
 972       , lir_logic_xor
 973       , lir_shl
 974       , lir_shr
 975       , lir_ushr
 976       , lir_alloc_array
 977       , lir_throw
 978       , lir_xadd
 979       , lir_xchg
 980   , end_op2
 981   , begin_op3
 982       , lir_idiv
 983       , lir_irem
 984       , lir_fmad
 985       , lir_fmaf
 986   , end_op3
 987   , begin_opJavaCall
 988       , lir_static_call
 989       , lir_optvirtual_call
 990       , lir_icvirtual_call
 991       , lir_dynamic_call
 992   , end_opJavaCall
 993   , begin_opArrayCopy
 994       , lir_arraycopy
 995   , end_opArrayCopy
 996   , begin_opUpdateCRC32
 997       , lir_updatecrc32
 998   , end_opUpdateCRC32
 999   , begin_opLock
1000     , lir_lock
1001     , lir_unlock
1002   , end_opLock
1003   , begin_delay_slot
1004     , lir_delay_slot
1005   , end_delay_slot
1006   , begin_opTypeCheck
1007     , lir_instanceof
1008     , lir_checkcast
1009     , lir_store_check
1010   , end_opTypeCheck
1011   , begin_opFlattenedArrayCheck
1012     , lir_flattened_array_check
1013   , end_opFlattenedArrayCheck
1014   , begin_opNullFreeArrayCheck
1015     , lir_null_free_array_check
1016   , end_opNullFreeArrayCheck
1017   , begin_opSubstitutabilityCheck
1018     , lir_substitutability_check
1019   , end_opSubstitutabilityCheck
1020   , begin_opCompareAndSwap
1021     , lir_cas_long
1022     , lir_cas_obj
1023     , lir_cas_int
1024   , end_opCompareAndSwap
1025   , begin_opMDOProfile
1026     , lir_profile_call
1027     , lir_profile_type
1028     , lir_profile_inline_type
1029   , end_opMDOProfile
1030   , begin_opAssert
1031     , lir_assert
1032   , end_opAssert
1033 };
1034 
1035 
1036 enum LIR_Condition {
1037     lir_cond_equal
1038   , lir_cond_notEqual
1039   , lir_cond_less
1040   , lir_cond_lessEqual
1041   , lir_cond_greaterEqual
1042   , lir_cond_greater
1043   , lir_cond_belowEqual
1044   , lir_cond_aboveEqual
1045   , lir_cond_always
1046   , lir_cond_unknown = -1
1047 };
1048 
1049 
1050 enum LIR_PatchCode {
1051   lir_patch_none,
1052   lir_patch_low,
1053   lir_patch_high,
1054   lir_patch_normal
1055 };
1056 
1057 
1058 enum LIR_MoveKind {
1059   lir_move_normal,
1060   lir_move_volatile,
1061   lir_move_wide,
1062   lir_move_max_flag
1063 };
1064 
1065 
1066 // --------------------------------------------------
1067 // LIR_Op
1068 // --------------------------------------------------
1069 class LIR_Op: public CompilationResourceObj {
1070  friend class LIR_OpVisitState;
1071 
1072 #ifdef ASSERT
1073  private:
1074   const char *  _file;
1075   int           _line;
1076 #endif
1077 
1078  protected:
1079   LIR_Opr       _result;
1080   unsigned short _code;
1081   unsigned short _flags;
1082   CodeEmitInfo* _info;
1083   int           _id;     // value id for register allocation
1084   int           _fpu_pop_count;
1085   Instruction*  _source; // for debugging
1086 
1087   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1088 
1089  protected:
1090   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1091 
1092  public:
1093   LIR_Op()
1094     :
1095 #ifdef ASSERT
1096       _file(NULL)
1097     , _line(0),
1098 #endif
1099       _result(LIR_OprFact::illegalOpr)
1100     , _code(lir_none)
1101     , _flags(0)
1102     , _info(NULL)
1103     , _id(-1)
1104     , _fpu_pop_count(0)
1105     , _source(NULL) {}
1106 
1107   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1108     :
1109 #ifdef ASSERT
1110       _file(NULL)
1111     , _line(0),
1112 #endif
1113       _result(result)
1114     , _code(code)
1115     , _flags(0)
1116     , _info(info)
1117     , _id(-1)
1118     , _fpu_pop_count(0)
1119     , _source(NULL) {}
1120 
1121   CodeEmitInfo* info() const                  { return _info;   }
1122   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1123   LIR_Opr result_opr() const                  { return _result; }
1124   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1125 
1126 #ifdef ASSERT
1127   void set_file_and_line(const char * file, int line) {
1128     _file = file;
1129     _line = line;
1130   }
1131 #endif
1132 
1133   virtual const char * name() const PRODUCT_RETURN0;
1134   virtual void visit(LIR_OpVisitState* state);
1135 
1136   int id()             const                  { return _id;     }
1137   void set_id(int id)                         { _id = id; }
1138 
1139   // FPU stack simulation helpers -- only used on Intel
1140   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1141   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1142   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1143 
1144   Instruction* source() const                 { return _source; }
1145   void set_source(Instruction* ins)           { _source = ins; }
1146 
1147   virtual void emit_code(LIR_Assembler* masm) = 0;
1148   virtual void print_instr(outputStream* out) const   = 0;
1149   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1150 
1151   virtual bool is_patching() { return false; }
1152   virtual LIR_OpCall* as_OpCall() { return NULL; }
1153   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1154   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1155   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1156   virtual LIR_OpLock* as_OpLock() { return NULL; }
1157   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1158   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1159   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1160   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1161   virtual LIR_OpReturn* as_OpReturn() { return NULL; }
1162   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1163   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1164   virtual LIR_Op0* as_Op0() { return NULL; }
1165   virtual LIR_Op1* as_Op1() { return NULL; }
1166   virtual LIR_Op2* as_Op2() { return NULL; }
1167   virtual LIR_Op3* as_Op3() { return NULL; }
1168   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1169   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
1170   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1171   virtual LIR_OpFlattenedArrayCheck* as_OpFlattenedArrayCheck() { return NULL; }
1172   virtual LIR_OpNullFreeArrayCheck* as_OpNullFreeArrayCheck() { return NULL; }
1173   virtual LIR_OpSubstitutabilityCheck* as_OpSubstitutabilityCheck() { return NULL; }
1174   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1175   virtual LIR_OpLoadKlass* as_OpLoadKlass() { return NULL; }
1176   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1177   virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
1178   virtual LIR_OpProfileInlineType* as_OpProfileInlineType() { return NULL; }
1179 #ifdef ASSERT
1180   virtual LIR_OpAssert* as_OpAssert() { return NULL; }
1181 #endif
1182 
1183   virtual void verify() const {}
1184 };
1185 
1186 // for calls
1187 class LIR_OpCall: public LIR_Op {
1188  friend class LIR_OpVisitState;
1189 
1190  protected:
1191   address      _addr;
1192   LIR_OprList* _arguments;
1193  protected:
1194   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1195              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1196     : LIR_Op(code, result, info)
1197     , _addr(addr)
1198     , _arguments(arguments) {}
1199 
1200  public:
1201   address addr() const                           { return _addr; }
1202   const LIR_OprList* arguments() const           { return _arguments; }
1203   virtual LIR_OpCall* as_OpCall()                { return this; }
1204 };
1205 
1206 
1207 // --------------------------------------------------
1208 // LIR_OpJavaCall
1209 // --------------------------------------------------
1210 class LIR_OpJavaCall: public LIR_OpCall {
1211  friend class LIR_OpVisitState;
1212 
1213  private:
1214   ciMethod* _method;
1215   LIR_Opr   _receiver;
1216   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1217 
1218  public:
1219   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1220                  LIR_Opr receiver, LIR_Opr result,
1221                  address addr, LIR_OprList* arguments,
1222                  CodeEmitInfo* info)
1223   : LIR_OpCall(code, addr, result, arguments, info)
1224   , _method(method)
1225   , _receiver(receiver)
1226   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1227   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1228 
1229   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1230                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1231                  LIR_OprList* arguments, CodeEmitInfo* info)
1232   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1233   , _method(method)
1234   , _receiver(receiver)
1235   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1236   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1237 
1238   LIR_Opr receiver() const                       { return _receiver; }
1239   ciMethod* method() const                       { return _method;   }
1240 
1241   // JSR 292 support.
1242   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1243   bool is_method_handle_invoke() const {
1244     return method()->is_compiled_lambda_form() ||   // Java-generated lambda form
1245            method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
1246   }
1247 
1248   virtual void emit_code(LIR_Assembler* masm);
1249   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1250   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1251 
1252   bool maybe_return_as_fields(ciInlineKlass** vk = NULL) const;
1253 };
1254 
1255 // --------------------------------------------------
1256 // LIR_OpLabel
1257 // --------------------------------------------------
1258 // Location where a branch can continue
1259 class LIR_OpLabel: public LIR_Op {
1260  friend class LIR_OpVisitState;
1261 
1262  private:
1263   Label* _label;
1264  public:
1265   LIR_OpLabel(Label* lbl)
1266    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1267    , _label(lbl)                                 {}
1268   Label* label() const                           { return _label; }
1269 
1270   virtual void emit_code(LIR_Assembler* masm);
1271   virtual LIR_OpLabel* as_OpLabel() { return this; }
1272   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1273 };
1274 
1275 // LIR_OpArrayCopy
1276 class LIR_OpArrayCopy: public LIR_Op {
1277  friend class LIR_OpVisitState;
1278 
1279  private:
1280   ArrayCopyStub*  _stub;
1281   LIR_Opr   _src;
1282   LIR_Opr   _src_pos;
1283   LIR_Opr   _dst;
1284   LIR_Opr   _dst_pos;
1285   LIR_Opr   _length;
1286   LIR_Opr   _tmp;
1287   ciArrayKlass* _expected_type;
1288   int       _flags;
1289 
1290 public:
1291   enum Flags {
1292     src_null_check         = 1 << 0,
1293     dst_null_check         = 1 << 1,
1294     src_pos_positive_check = 1 << 2,
1295     dst_pos_positive_check = 1 << 3,
1296     length_positive_check  = 1 << 4,
1297     src_range_check        = 1 << 5,
1298     dst_range_check        = 1 << 6,
1299     type_check             = 1 << 7,
1300     overlapping            = 1 << 8,
1301     unaligned              = 1 << 9,
1302     src_objarray           = 1 << 10,
1303     dst_objarray           = 1 << 11,
1304     always_slow_path       = 1 << 12,
1305     src_inlinetype_check   = 1 << 13,
1306     dst_inlinetype_check   = 1 << 14,
1307     all_flags              = (1 << 15) - 1
1308   };
1309 
1310   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1311                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1312 
1313   LIR_Opr src() const                            { return _src; }
1314   LIR_Opr src_pos() const                        { return _src_pos; }
1315   LIR_Opr dst() const                            { return _dst; }
1316   LIR_Opr dst_pos() const                        { return _dst_pos; }
1317   LIR_Opr length() const                         { return _length; }
1318   LIR_Opr tmp() const                            { return _tmp; }
1319   int flags() const                              { return _flags; }
1320   ciArrayKlass* expected_type() const            { return _expected_type; }
1321   ArrayCopyStub* stub() const                    { return _stub; }
1322 
1323   virtual void emit_code(LIR_Assembler* masm);
1324   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1325   void print_instr(outputStream* out) const PRODUCT_RETURN;
1326 };
1327 
1328 // LIR_OpUpdateCRC32
1329 class LIR_OpUpdateCRC32: public LIR_Op {
1330   friend class LIR_OpVisitState;
1331 
1332 private:
1333   LIR_Opr   _crc;
1334   LIR_Opr   _val;
1335 
1336 public:
1337 
1338   LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1339 
1340   LIR_Opr crc() const                            { return _crc; }
1341   LIR_Opr val() const                            { return _val; }
1342 
1343   virtual void emit_code(LIR_Assembler* masm);
1344   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32()  { return this; }
1345   void print_instr(outputStream* out) const PRODUCT_RETURN;
1346 };
1347 
1348 // --------------------------------------------------
1349 // LIR_Op0
1350 // --------------------------------------------------
1351 class LIR_Op0: public LIR_Op {
1352  friend class LIR_OpVisitState;
1353 
1354  public:
1355   LIR_Op0(LIR_Code code)
1356    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1357   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1358    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1359 
1360   virtual void emit_code(LIR_Assembler* masm);
1361   virtual LIR_Op0* as_Op0() { return this; }
1362   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1363 };
1364 
1365 
1366 // --------------------------------------------------
1367 // LIR_Op1
1368 // --------------------------------------------------
1369 
1370 class LIR_Op1: public LIR_Op {
1371  friend class LIR_OpVisitState;
1372 
1373  protected:
1374   LIR_Opr         _opr;   // input operand
1375   BasicType       _type;  // Operand types
1376   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1377 
1378   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1379 
1380   void set_kind(LIR_MoveKind kind) {
1381     assert(code() == lir_move, "must be");
1382     _flags = kind;
1383   }
1384 
1385  public:
1386   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1387     : LIR_Op(code, result, info)
1388     , _opr(opr)
1389     , _type(type)
1390     , _patch(patch)                    { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1391 
1392   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1393     : LIR_Op(code, result, info)
1394     , _opr(opr)
1395     , _type(type)
1396     , _patch(patch)                    {
1397     assert(code == lir_move, "must be");
1398     set_kind(kind);
1399   }
1400 
1401   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1402     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1403     , _opr(opr)
1404     , _type(T_ILLEGAL)
1405     , _patch(lir_patch_none)           { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1406 
1407   LIR_Opr in_opr()           const               { return _opr;   }
1408   LIR_PatchCode patch_code() const               { return _patch; }
1409   BasicType type()           const               { return _type;  }
1410 
1411   LIR_MoveKind move_kind() const {
1412     assert(code() == lir_move, "must be");
1413     return (LIR_MoveKind)_flags;
1414   }
1415 
1416   virtual bool is_patching() { return _patch != lir_patch_none; }
1417   virtual void emit_code(LIR_Assembler* masm);
1418   virtual LIR_Op1* as_Op1() { return this; }
1419   virtual const char * name() const PRODUCT_RETURN0;
1420 
1421   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1422 
1423   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1424   virtual void verify() const;
1425 };
1426 
1427 
1428 // for runtime calls
1429 class LIR_OpRTCall: public LIR_OpCall {
1430  friend class LIR_OpVisitState;
1431 
1432  private:
1433   LIR_Opr _tmp;
1434  public:
1435   LIR_OpRTCall(address addr, LIR_Opr tmp,
1436                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1437     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1438     , _tmp(tmp) {}
1439 
1440   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1441   virtual void emit_code(LIR_Assembler* masm);
1442   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1443 
1444   LIR_Opr tmp() const                            { return _tmp; }
1445 
1446   virtual void verify() const;
1447 };
1448 
1449 
1450 class LIR_OpBranch: public LIR_Op {
1451  friend class LIR_OpVisitState;
1452 
1453  private:
1454   LIR_Condition _cond;
1455   Label*        _label;
1456   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1457   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
1458   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1459 
1460  public:
1461   LIR_OpBranch(LIR_Condition cond, Label* lbl)
1462     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1463     , _cond(cond)
1464     , _label(lbl)
1465     , _block(NULL)
1466     , _ublock(NULL)
1467     , _stub(NULL) { }
1468 
1469   LIR_OpBranch(LIR_Condition cond, BlockBegin* block);
1470   LIR_OpBranch(LIR_Condition cond, CodeStub* stub);
1471 
1472   // for unordered comparisons
1473   LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock);
1474 
1475   LIR_Condition cond()        const              { return _cond;        }
1476   Label*        label()       const              { return _label;       }
1477   BlockBegin*   block()       const              { return _block;       }
1478   BlockBegin*   ublock()      const              { return _ublock;      }
1479   CodeStub*     stub()        const              { return _stub;       }
1480 
1481   void          change_block(BlockBegin* b);
1482   void          change_ublock(BlockBegin* b);
1483   void          negate_cond();
1484 
1485   virtual void emit_code(LIR_Assembler* masm);
1486   virtual LIR_OpBranch* as_OpBranch() { return this; }
1487   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1488 };
1489 
1490 class LIR_OpReturn: public LIR_Op1 {
1491  friend class LIR_OpVisitState;
1492 
1493  private:
1494   C1SafepointPollStub* _stub;
1495 
1496  public:
1497   LIR_OpReturn(LIR_Opr opr);
1498 
1499   C1SafepointPollStub* stub() const { return _stub; }
1500   virtual LIR_OpReturn* as_OpReturn() { return this; }
1501 };
1502 
1503 class ConversionStub;
1504 
1505 class LIR_OpConvert: public LIR_Op1 {
1506  friend class LIR_OpVisitState;
1507 
1508  private:
1509    Bytecodes::Code _bytecode;
1510    ConversionStub* _stub;
1511 
1512  public:
1513    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1514      : LIR_Op1(lir_convert, opr, result)
1515      , _bytecode(code)
1516      , _stub(stub)                               {}
1517 
1518   Bytecodes::Code bytecode() const               { return _bytecode; }
1519   ConversionStub* stub() const                   { return _stub; }
1520 
1521   virtual void emit_code(LIR_Assembler* masm);
1522   virtual LIR_OpConvert* as_OpConvert() { return this; }
1523   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1524 
1525   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1526 };
1527 
1528 
1529 // LIR_OpAllocObj
1530 class LIR_OpAllocObj : public LIR_Op1 {
1531  friend class LIR_OpVisitState;
1532 
1533  private:
1534   LIR_Opr _tmp1;
1535   LIR_Opr _tmp2;
1536   LIR_Opr _tmp3;
1537   LIR_Opr _tmp4;
1538   int     _hdr_size;
1539   int     _obj_size;
1540   CodeStub* _stub;
1541   bool    _init_check;
1542 
1543  public:
1544   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1545                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1546                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1547     : LIR_Op1(lir_alloc_object, klass, result)
1548     , _tmp1(t1)
1549     , _tmp2(t2)
1550     , _tmp3(t3)
1551     , _tmp4(t4)
1552     , _hdr_size(hdr_size)
1553     , _obj_size(obj_size)
1554     , _stub(stub)
1555     , _init_check(init_check)                    { }
1556 
1557   LIR_Opr klass()        const                   { return in_opr();     }
1558   LIR_Opr obj()          const                   { return result_opr(); }
1559   LIR_Opr tmp1()         const                   { return _tmp1;        }
1560   LIR_Opr tmp2()         const                   { return _tmp2;        }
1561   LIR_Opr tmp3()         const                   { return _tmp3;        }
1562   LIR_Opr tmp4()         const                   { return _tmp4;        }
1563   int     header_size()  const                   { return _hdr_size;    }
1564   int     object_size()  const                   { return _obj_size;    }
1565   bool    init_check()   const                   { return _init_check;  }
1566   CodeStub* stub()       const                   { return _stub;        }
1567 
1568   virtual void emit_code(LIR_Assembler* masm);
1569   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1570   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1571 };
1572 
1573 
1574 // LIR_OpRoundFP
1575 class LIR_OpRoundFP : public LIR_Op1 {
1576  friend class LIR_OpVisitState;
1577 
1578  private:
1579   LIR_Opr _tmp;
1580 
1581  public:
1582   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1583     : LIR_Op1(lir_roundfp, reg, result)
1584     , _tmp(stack_loc_temp) {}
1585 
1586   LIR_Opr tmp() const                            { return _tmp; }
1587   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1588   void print_instr(outputStream* out) const PRODUCT_RETURN;
1589 };
1590 
1591 // LIR_OpTypeCheck
1592 class LIR_OpTypeCheck: public LIR_Op {
1593  friend class LIR_OpVisitState;
1594 
1595  private:
1596   LIR_Opr       _object;
1597   LIR_Opr       _array;
1598   ciKlass*      _klass;
1599   LIR_Opr       _tmp1;
1600   LIR_Opr       _tmp2;
1601   LIR_Opr       _tmp3;
1602   bool          _fast_check;
1603   CodeEmitInfo* _info_for_patch;
1604   CodeEmitInfo* _info_for_exception;
1605   CodeStub*     _stub;
1606   ciMethod*     _profiled_method;
1607   int           _profiled_bci;
1608   bool          _should_profile;
1609   bool          _need_null_check;
1610 
1611 public:
1612   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1613                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1614                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, bool need_null_check = true);
1615   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1616                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1617 
1618   LIR_Opr object() const                         { return _object;         }
1619   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1620   LIR_Opr tmp1() const                           { return _tmp1;           }
1621   LIR_Opr tmp2() const                           { return _tmp2;           }
1622   LIR_Opr tmp3() const                           { return _tmp3;           }
1623   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1624   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1625   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1626   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1627   CodeStub* stub() const                         { return _stub;           }
1628 
1629   // MethodData* profiling
1630   void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1631   void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1632   void set_should_profile(bool b)                { _should_profile = b;       }
1633   ciMethod* profiled_method() const              { return _profiled_method;   }
1634   int       profiled_bci() const                 { return _profiled_bci;      }
1635   bool      should_profile() const               { return _should_profile;    }
1636   bool      need_null_check() const              { return _need_null_check;   }
1637   virtual bool is_patching() { return _info_for_patch != NULL; }
1638   virtual void emit_code(LIR_Assembler* masm);
1639   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1640   void print_instr(outputStream* out) const PRODUCT_RETURN;
1641 };
1642 
1643 // LIR_OpFlattenedArrayCheck
1644 class LIR_OpFlattenedArrayCheck: public LIR_Op {
1645  friend class LIR_OpVisitState;
1646 
1647  private:
1648   LIR_Opr       _array;
1649   LIR_Opr       _value;
1650   LIR_Opr       _tmp;
1651   CodeStub*     _stub;
1652 public:
1653   LIR_OpFlattenedArrayCheck(LIR_Opr array, LIR_Opr value, LIR_Opr tmp, CodeStub* stub);
1654   LIR_Opr array() const                          { return _array;         }
1655   LIR_Opr value() const                          { return _value;         }
1656   LIR_Opr tmp() const                            { return _tmp;           }
1657   CodeStub* stub() const                         { return _stub;          }
1658 
1659   virtual void emit_code(LIR_Assembler* masm);
1660   virtual LIR_OpFlattenedArrayCheck* as_OpFlattenedArrayCheck() { return this; }
1661   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1662 };
1663 
1664 // LIR_OpNullFreeArrayCheck
1665 class LIR_OpNullFreeArrayCheck: public LIR_Op {
1666  friend class LIR_OpVisitState;
1667 
1668  private:
1669   LIR_Opr       _array;
1670   LIR_Opr       _tmp;
1671 public:
1672   LIR_OpNullFreeArrayCheck(LIR_Opr array, LIR_Opr tmp);
1673   LIR_Opr array() const                          { return _array;         }
1674   LIR_Opr tmp() const                            { return _tmp;           }
1675 
1676   virtual void emit_code(LIR_Assembler* masm);
1677   virtual LIR_OpNullFreeArrayCheck* as_OpNullFreeArrayCheck() { return this; }
1678   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1679 };
1680 
1681 class LIR_OpSubstitutabilityCheck: public LIR_Op {
1682  friend class LIR_OpVisitState;
1683 
1684  private:
1685   LIR_Opr       _left;
1686   LIR_Opr       _right;
1687   LIR_Opr       _equal_result;
1688   LIR_Opr       _not_equal_result;
1689   LIR_Opr       _tmp1;
1690   LIR_Opr       _tmp2;
1691   ciKlass*      _left_klass;
1692   ciKlass*      _right_klass;
1693   LIR_Opr       _left_klass_op;
1694   LIR_Opr       _right_klass_op;
1695   CodeStub*     _stub;
1696 public:
1697   LIR_OpSubstitutabilityCheck(LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr equal_result, LIR_Opr not_equal_result,
1698                               LIR_Opr tmp1, LIR_Opr tmp2,
1699                               ciKlass* left_klass, ciKlass* right_klass, LIR_Opr left_klass_op, LIR_Opr right_klass_op,
1700                               CodeEmitInfo* info, CodeStub* stub);
1701 
1702   LIR_Opr left() const             { return _left; }
1703   LIR_Opr right() const            { return _right; }
1704   LIR_Opr equal_result() const     { return _equal_result; }
1705   LIR_Opr not_equal_result() const { return _not_equal_result; }
1706   LIR_Opr tmp1() const             { return _tmp1; }
1707   LIR_Opr tmp2() const             { return _tmp2; }
1708   ciKlass* left_klass() const      { return _left_klass; }
1709   ciKlass* right_klass() const     { return _right_klass; }
1710   LIR_Opr left_klass_op() const    { return _left_klass_op; }
1711   LIR_Opr right_klass_op() const   { return _right_klass_op; }
1712   CodeStub* stub() const           { return _stub; }
1713 
1714   virtual void emit_code(LIR_Assembler* masm);
1715   virtual LIR_OpSubstitutabilityCheck* as_OpSubstitutabilityCheck() { return this; }
1716   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1717 };
1718 
1719 // LIR_Op2
1720 class LIR_Op2: public LIR_Op {
1721  friend class LIR_OpVisitState;
1722 
1723   int  _fpu_stack_size; // for sin/cos implementation on Intel
1724 
1725  protected:
1726   LIR_Opr   _opr1;
1727   LIR_Opr   _opr2;
1728   BasicType _type;
1729   LIR_Opr   _tmp1;
1730   LIR_Opr   _tmp2;
1731   LIR_Opr   _tmp3;
1732   LIR_Opr   _tmp4;
1733   LIR_Opr   _tmp5;
1734   LIR_Condition _condition;
1735 
1736   void verify() const;
1737 
1738  public:
1739   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1740     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1741     , _fpu_stack_size(0)
1742     , _opr1(opr1)
1743     , _opr2(opr2)
1744     , _type(T_ILLEGAL)
1745     , _tmp1(LIR_OprFact::illegalOpr)
1746     , _tmp2(LIR_OprFact::illegalOpr)
1747     , _tmp3(LIR_OprFact::illegalOpr)
1748     , _tmp4(LIR_OprFact::illegalOpr)
1749     , _tmp5(LIR_OprFact::illegalOpr)
1750     , _condition(condition) {
1751     assert(code == lir_cmp || code == lir_assert, "code check");
1752   }
1753 
1754   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1755     : LIR_Op(code, result, NULL)
1756     , _fpu_stack_size(0)
1757     , _opr1(opr1)
1758     , _opr2(opr2)
1759     , _type(type)
1760     , _tmp1(LIR_OprFact::illegalOpr)
1761     , _tmp2(LIR_OprFact::illegalOpr)
1762     , _tmp3(LIR_OprFact::illegalOpr)
1763     , _tmp4(LIR_OprFact::illegalOpr)
1764     , _tmp5(LIR_OprFact::illegalOpr)
1765     , _condition(condition) {
1766     assert(code == lir_cmove, "code check");
1767     assert(type != T_ILLEGAL, "cmove should have type");
1768   }
1769 
1770   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1771           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1772     : LIR_Op(code, result, info)
1773     , _fpu_stack_size(0)
1774     , _opr1(opr1)
1775     , _opr2(opr2)
1776     , _type(type)
1777     , _tmp1(LIR_OprFact::illegalOpr)
1778     , _tmp2(LIR_OprFact::illegalOpr)
1779     , _tmp3(LIR_OprFact::illegalOpr)
1780     , _tmp4(LIR_OprFact::illegalOpr)
1781     , _tmp5(LIR_OprFact::illegalOpr)
1782     , _condition(lir_cond_unknown) {
1783     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1784   }
1785 
1786   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1787           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1788     : LIR_Op(code, result, NULL)
1789     , _fpu_stack_size(0)
1790     , _opr1(opr1)
1791     , _opr2(opr2)
1792     , _type(T_ILLEGAL)
1793     , _tmp1(tmp1)
1794     , _tmp2(tmp2)
1795     , _tmp3(tmp3)
1796     , _tmp4(tmp4)
1797     , _tmp5(tmp5)
1798     , _condition(lir_cond_unknown) {
1799     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1800   }
1801 
1802   LIR_Opr in_opr1() const                        { return _opr1; }
1803   LIR_Opr in_opr2() const                        { return _opr2; }
1804   BasicType type()  const                        { return _type; }
1805   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1806   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1807   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1808   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1809   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1810   LIR_Condition condition() const  {
1811     assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
1812   }
1813   void set_condition(LIR_Condition condition) {
1814     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1815   }
1816 
1817   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1818   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1819 
1820   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1821   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1822 
1823   virtual void emit_code(LIR_Assembler* masm);
1824   virtual LIR_Op2* as_Op2() { return this; }
1825   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1826 };
1827 
1828 class LIR_OpAllocArray : public LIR_Op {
1829  friend class LIR_OpVisitState;
1830 
1831  private:
1832   LIR_Opr   _klass;
1833   LIR_Opr   _len;
1834   LIR_Opr   _tmp1;
1835   LIR_Opr   _tmp2;
1836   LIR_Opr   _tmp3;
1837   LIR_Opr   _tmp4;
1838   BasicType _type;
1839   CodeStub* _stub;
1840 
1841  public:
1842   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1843     : LIR_Op(lir_alloc_array, result, NULL)
1844     , _klass(klass)
1845     , _len(len)
1846     , _tmp1(t1)
1847     , _tmp2(t2)
1848     , _tmp3(t3)
1849     , _tmp4(t4)
1850     , _type(type)
1851     , _stub(stub) {}
1852 
1853   LIR_Opr   klass()   const                      { return _klass;       }
1854   LIR_Opr   len()     const                      { return _len;         }
1855   LIR_Opr   obj()     const                      { return result_opr(); }
1856   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1857   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1858   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1859   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1860   BasicType type()    const                      { return _type;        }
1861   CodeStub* stub()    const                      { return _stub;        }
1862 
1863   virtual void emit_code(LIR_Assembler* masm);
1864   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1865   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1866 };
1867 
1868 
1869 class LIR_Op3: public LIR_Op {
1870  friend class LIR_OpVisitState;
1871 
1872  private:
1873   LIR_Opr _opr1;
1874   LIR_Opr _opr2;
1875   LIR_Opr _opr3;
1876  public:
1877   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1878     : LIR_Op(code, result, info)
1879     , _opr1(opr1)
1880     , _opr2(opr2)
1881     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1882   LIR_Opr in_opr1() const                        { return _opr1; }
1883   LIR_Opr in_opr2() const                        { return _opr2; }
1884   LIR_Opr in_opr3() const                        { return _opr3; }
1885 
1886   virtual void emit_code(LIR_Assembler* masm);
1887   virtual LIR_Op3* as_Op3() { return this; }
1888   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1889 };
1890 
1891 
1892 //--------------------------------
1893 class LabelObj: public CompilationResourceObj {
1894  private:
1895   Label _label;
1896  public:
1897   LabelObj()                                     {}
1898   Label* label()                                 { return &_label; }
1899 };
1900 
1901 
1902 class LIR_OpLock: public LIR_Op {
1903  friend class LIR_OpVisitState;
1904 
1905  private:
1906   LIR_Opr _hdr;
1907   LIR_Opr _obj;
1908   LIR_Opr _lock;
1909   LIR_Opr _scratch;
1910   CodeStub* _stub;
1911   CodeStub* _throw_imse_stub;
1912  public:
1913   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub=NULL)
1914     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1915     , _hdr(hdr)
1916     , _obj(obj)
1917     , _lock(lock)
1918     , _scratch(scratch)
1919     , _stub(stub)
1920     , _throw_imse_stub(throw_imse_stub)                    {}
1921 
1922   LIR_Opr hdr_opr() const                        { return _hdr; }
1923   LIR_Opr obj_opr() const                        { return _obj; }
1924   LIR_Opr lock_opr() const                       { return _lock; }
1925   LIR_Opr scratch_opr() const                    { return _scratch; }
1926   CodeStub* stub() const                         { return _stub; }
1927   CodeStub* throw_imse_stub() const              { return _throw_imse_stub; }
1928 
1929   virtual void emit_code(LIR_Assembler* masm);
1930   virtual LIR_OpLock* as_OpLock() { return this; }
1931   void print_instr(outputStream* out) const PRODUCT_RETURN;
1932 };
1933 
1934 class LIR_OpLoadKlass: public LIR_Op {
1935   friend class LIR_OpVisitState;
1936 
1937  private:
1938   LIR_Opr _obj;
1939  public:
1940   LIR_OpLoadKlass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info)
1941     : LIR_Op(lir_load_klass, result, info)
1942     , _obj(obj)
1943     {}
1944 
1945   LIR_Opr obj()        const { return _obj;  }
1946 
1947   virtual LIR_OpLoadKlass* as_OpLoadKlass() { return this; }
1948   virtual void emit_code(LIR_Assembler* masm);
1949   void print_instr(outputStream* out) const PRODUCT_RETURN;
1950 };
1951 
1952 class LIR_OpDelay: public LIR_Op {
1953  friend class LIR_OpVisitState;
1954 
1955  private:
1956   LIR_Op* _op;
1957 
1958  public:
1959   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1960     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1961     _op(op) {
1962     assert(op->code() == lir_nop, "should be filling with nops");
1963   }
1964   virtual void emit_code(LIR_Assembler* masm);
1965   virtual LIR_OpDelay* as_OpDelay() { return this; }
1966   void print_instr(outputStream* out) const PRODUCT_RETURN;
1967   LIR_Op* delay_op() const { return _op; }
1968   CodeEmitInfo* call_info() const { return info(); }
1969 };
1970 
1971 #ifdef ASSERT
1972 // LIR_OpAssert
1973 class LIR_OpAssert : public LIR_Op2 {
1974  friend class LIR_OpVisitState;
1975 
1976  private:
1977   const char* _msg;
1978   bool        _halt;
1979 
1980  public:
1981   LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1982     : LIR_Op2(lir_assert, condition, opr1, opr2)
1983     , _msg(msg)
1984     , _halt(halt) {
1985   }
1986 
1987   const char* msg() const                        { return _msg; }
1988   bool        halt() const                       { return _halt; }
1989 
1990   virtual void emit_code(LIR_Assembler* masm);
1991   virtual LIR_OpAssert* as_OpAssert()            { return this; }
1992   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1993 };
1994 #endif
1995 
1996 // LIR_OpCompareAndSwap
1997 class LIR_OpCompareAndSwap : public LIR_Op {
1998  friend class LIR_OpVisitState;
1999 
2000  private:
2001   LIR_Opr _addr;
2002   LIR_Opr _cmp_value;
2003   LIR_Opr _new_value;
2004   LIR_Opr _tmp1;
2005   LIR_Opr _tmp2;
2006 
2007  public:
2008   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2009                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
2010     : LIR_Op(code, result, NULL)  // no result, no info
2011     , _addr(addr)
2012     , _cmp_value(cmp_value)
2013     , _new_value(new_value)
2014     , _tmp1(t1)
2015     , _tmp2(t2)                                  { }
2016 
2017   LIR_Opr addr()        const                    { return _addr;  }
2018   LIR_Opr cmp_value()   const                    { return _cmp_value; }
2019   LIR_Opr new_value()   const                    { return _new_value; }
2020   LIR_Opr tmp1()        const                    { return _tmp1;      }
2021   LIR_Opr tmp2()        const                    { return _tmp2;      }
2022 
2023   virtual void emit_code(LIR_Assembler* masm);
2024   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
2025   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2026 };
2027 
2028 // LIR_OpProfileCall
2029 class LIR_OpProfileCall : public LIR_Op {
2030  friend class LIR_OpVisitState;
2031 
2032  private:
2033   ciMethod* _profiled_method;
2034   int       _profiled_bci;
2035   ciMethod* _profiled_callee;
2036   LIR_Opr   _mdo;
2037   LIR_Opr   _recv;
2038   LIR_Opr   _tmp1;
2039   ciKlass*  _known_holder;
2040 
2041  public:
2042   // Destroys recv
2043   LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
2044     : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
2045     , _profiled_method(profiled_method)
2046     , _profiled_bci(profiled_bci)
2047     , _profiled_callee(profiled_callee)
2048     , _mdo(mdo)
2049     , _recv(recv)
2050     , _tmp1(t1)
2051     , _known_holder(known_holder)                { }
2052 
2053   ciMethod* profiled_method() const              { return _profiled_method;  }
2054   int       profiled_bci()    const              { return _profiled_bci;     }
2055   ciMethod* profiled_callee() const              { return _profiled_callee;  }
2056   LIR_Opr   mdo()             const              { return _mdo;              }
2057   LIR_Opr   recv()            const              { return _recv;             }
2058   LIR_Opr   tmp1()            const              { return _tmp1;             }
2059   ciKlass*  known_holder()    const              { return _known_holder;     }
2060 
2061   virtual void emit_code(LIR_Assembler* masm);
2062   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
2063   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2064   bool should_profile_receiver_type() const {
2065     bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
2066     Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
2067     bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !_profiled_callee->can_be_statically_bound()) || bc == Bytecodes::_invokeinterface;
2068     return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
2069   }
2070 };
2071 
2072 // LIR_OpProfileType
2073 class LIR_OpProfileType : public LIR_Op {
2074  friend class LIR_OpVisitState;
2075 
2076  private:
2077   LIR_Opr      _mdp;
2078   LIR_Opr      _obj;
2079   LIR_Opr      _tmp;
2080   ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
2081   intptr_t     _current_klass; // what the profiling currently reports
2082   bool         _not_null;      // true if we know statically that _obj cannot be null
2083   bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
2084                                // _exact_klass it the only possible type for this parameter in any context.
2085 
2086  public:
2087   // Destroys recv
2088   LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
2089     : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
2090     , _mdp(mdp)
2091     , _obj(obj)
2092     , _tmp(tmp)
2093     , _exact_klass(exact_klass)
2094     , _current_klass(current_klass)
2095     , _not_null(not_null)
2096     , _no_conflict(no_conflict) { }
2097 
2098   LIR_Opr      mdp()              const             { return _mdp;              }
2099   LIR_Opr      obj()              const             { return _obj;              }
2100   LIR_Opr      tmp()              const             { return _tmp;              }
2101   ciKlass*     exact_klass()      const             { return _exact_klass;      }
2102   intptr_t     current_klass()    const             { return _current_klass;    }
2103   bool         not_null()         const             { return _not_null;         }
2104   bool         no_conflict()      const             { return _no_conflict;      }
2105 
2106   virtual void emit_code(LIR_Assembler* masm);
2107   virtual LIR_OpProfileType* as_OpProfileType() { return this; }
2108   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2109 };
2110 
2111 // LIR_OpProfileInlineType
2112 class LIR_OpProfileInlineType : public LIR_Op {
2113  friend class LIR_OpVisitState;
2114 
2115  private:
2116   LIR_Opr      _mdp;
2117   LIR_Opr      _obj;
2118   int          _flag;
2119   LIR_Opr      _tmp;
2120   bool         _not_null;      // true if we know statically that _obj cannot be null
2121 
2122  public:
2123   // Destroys recv
2124   LIR_OpProfileInlineType(LIR_Opr mdp, LIR_Opr obj, int flag, LIR_Opr tmp, bool not_null)
2125     : LIR_Op(lir_profile_inline_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
2126     , _mdp(mdp)
2127     , _obj(obj)
2128     , _flag(flag)
2129     , _tmp(tmp)
2130     , _not_null(not_null) { }
2131 
2132   LIR_Opr      mdp()              const             { return _mdp;              }
2133   LIR_Opr      obj()              const             { return _obj;              }
2134   int          flag()             const             { return _flag;             }
2135   LIR_Opr      tmp()              const             { return _tmp;              }
2136   bool         not_null()         const             { return _not_null;         }
2137 
2138   virtual void emit_code(LIR_Assembler* masm);
2139   virtual LIR_OpProfileInlineType* as_OpProfileInlineType() { return this; }
2140   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2141 };
2142 
2143 class LIR_InsertionBuffer;
2144 
2145 //--------------------------------LIR_List---------------------------------------------------
2146 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
2147 // The LIR instructions are appended by the LIR_List class itself;
2148 //
2149 // Notes:
2150 // - all offsets are(should be) in bytes
2151 // - local positions are specified with an offset, with offset 0 being local 0
2152 
2153 class LIR_List: public CompilationResourceObj {
2154  private:
2155   LIR_OpList  _operations;
2156 
2157   Compilation*  _compilation;
2158 #ifndef PRODUCT
2159   BlockBegin*   _block;
2160 #endif
2161 #ifdef ASSERT
2162   const char *  _file;
2163   int           _line;
2164 #endif
2165 
2166  public:
2167   void append(LIR_Op* op) {
2168     if (op->source() == NULL)
2169       op->set_source(_compilation->current_instruction());
2170 #ifndef PRODUCT
2171     if (PrintIRWithLIR) {
2172       _compilation->maybe_print_current_instruction();
2173       op->print(); tty->cr();
2174     }
2175 #endif // PRODUCT
2176 
2177     _operations.append(op);
2178 
2179 #ifdef ASSERT
2180     op->verify();
2181     op->set_file_and_line(_file, _line);
2182     _file = NULL;
2183     _line = 0;
2184 #endif
2185   }
2186 
2187   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
2188 
2189 #ifdef ASSERT
2190   void set_file_and_line(const char * file, int line);
2191 #endif
2192 
2193   //---------- accessors ---------------
2194   LIR_OpList* instructions_list()                { return &_operations; }
2195   int         length() const                     { return _operations.length(); }
2196   LIR_Op*     at(int i) const                    { return _operations.at(i); }
2197 
2198   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
2199 
2200   // insert LIR_Ops in buffer to right places in LIR_List
2201   void append(LIR_InsertionBuffer* buffer);
2202 
2203   //---------- mutators ---------------
2204   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
2205   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
2206   void remove_at(int i)                          { _operations.remove_at(i); }
2207 
2208   //---------- printing -------------
2209   void print_instructions() PRODUCT_RETURN;
2210 
2211 
2212   //---------- instructions -------------
2213   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2214                         address dest, LIR_OprList* arguments,
2215                         CodeEmitInfo* info) {
2216     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2217   }
2218   void call_static(ciMethod* method, LIR_Opr result,
2219                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2220     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2221   }
2222   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2223                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2224     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2225   }
2226   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2227                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2228     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2229   }
2230 
2231   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
2232   void membar()                                  { append(new LIR_Op0(lir_membar)); }
2233   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
2234   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
2235   void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
2236   void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
2237   void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
2238   void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
2239 
2240   void nop()                                     { append(new LIR_Op0(lir_nop)); }
2241 
2242   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
2243   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2244 
2245   void on_spin_wait()                            { append(new LIR_Op0(lir_on_spin_wait)); }
2246 
2247   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
2248 
2249   void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
2250 
2251   // result is a stack location for old backend and vreg for UseLinearScan
2252   // stack_loc_temp is an illegal register for old backend
2253   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2254   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2255   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2256   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2257   void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
2258     if (UseCompressedOops) {
2259       append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2260     } else {
2261       move(src, dst, info);
2262     }
2263   }
2264   void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2265     if (UseCompressedOops) {
2266       append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2267     } else {
2268       move(src, dst, info);
2269     }
2270   }
2271   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2272 
2273   void oop2reg  (jobject o, LIR_Opr reg)         { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
2274   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2275 
2276   void metadata2reg  (Metadata* o, LIR_Opr reg)  { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg));   }
2277   void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2278 
2279   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2280   void return_op(LIR_Opr result)                   { append(new LIR_OpReturn(result)); }
2281 
2282   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2283 
2284   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2285   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2286   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2287 
2288   void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
2289   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2290     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2291   }
2292   void unwind_exception(LIR_Opr exceptionOop) {
2293     append(new LIR_Op1(lir_unwind, exceptionOop));
2294   }
2295 
2296   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2297   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2298 
2299   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2300     append(new LIR_Op2(lir_cmp, condition, left, right, info));
2301   }
2302   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2303     cmp(condition, left, LIR_OprFact::intConst(right), info);
2304   }
2305 
2306   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2307   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2308 
2309   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2310     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2311   }
2312 
2313   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2314                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2315   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2316                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2317   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2318                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2319 
2320   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2321   void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr)              { append(new LIR_Op2(lir_neg, from, tmp, to)); }
2322   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2323   void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
2324   void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }
2325   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2326   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2327 
2328   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2329   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2330   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2331   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul, left, right, res, tmp)); }
2332   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2333   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div, left, right, res, tmp)); }
2334   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2335 
2336   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2337   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2338 
2339   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2340 
2341   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2342   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2343   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2344   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2345   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2346 
2347   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2348   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2349   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2350   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2351 
2352   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2353   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2354 
2355   // jump is an unconditional branch
2356   void jump(BlockBegin* block) {
2357     append(new LIR_OpBranch(lir_cond_always, block));
2358   }
2359   void jump(CodeStub* stub) {
2360     append(new LIR_OpBranch(lir_cond_always, stub));
2361   }
2362   void branch(LIR_Condition cond, Label* lbl) {
2363     append(new LIR_OpBranch(cond, lbl));
2364   }
2365   // Should not be used for fp comparisons
2366   void branch(LIR_Condition cond, BlockBegin* block) {
2367     append(new LIR_OpBranch(cond, block));
2368   }
2369   // Should not be used for fp comparisons
2370   void branch(LIR_Condition cond, CodeStub* stub) {
2371     append(new LIR_OpBranch(cond, stub));
2372   }
2373   // Should only be used for fp comparisons
2374   void branch(LIR_Condition cond, BlockBegin* block, BlockBegin* unordered) {
2375     append(new LIR_OpBranch(cond, block, unordered));
2376   }
2377 
2378   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2379   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2380   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2381 
2382   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2383   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2384   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2385 
2386   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2387   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2388 
2389   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2390     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2391   }
2392 
2393   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2394                     LIR_OprList* arguments, CodeEmitInfo* info) {
2395     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2396   }
2397 
2398   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2399   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2400   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub=NULL);
2401 
2402   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2403 
2404   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2405 
2406   void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)  { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2407 
2408   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2409   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2410   void check_flattened_array(LIR_Opr array, LIR_Opr value, LIR_Opr tmp, CodeStub* stub);
2411   void check_null_free_array(LIR_Opr array, LIR_Opr tmp);
2412   void substitutability_check(LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr equal_result, LIR_Opr not_equal_result,
2413                               LIR_Opr tmp1, LIR_Opr tmp2,
2414                               ciKlass* left_klass, ciKlass* right_klass, LIR_Opr left_klass_op, LIR_Opr right_klass_op,
2415                               CodeEmitInfo* info, CodeStub* stub);
2416 
2417   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2418                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2419                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2420                   ciMethod* profiled_method, int profiled_bci, bool is_null_free);
2421   // MethodData* profiling
2422   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2423     append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2424   }
2425   void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2426     append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2427   }
2428   void profile_inline_type(LIR_Address* mdp, LIR_Opr obj, int flag, LIR_Opr tmp, bool not_null) {
2429     append(new LIR_OpProfileInlineType(LIR_OprFact::address(mdp), obj, flag, tmp, not_null));
2430   }
2431 
2432   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2433   void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2434 
2435   void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) { append(new LIR_OpLoadKlass(obj, result, info)); }
2436 
2437 #ifdef ASSERT
2438   void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2439 #endif
2440 };
2441 
2442 void print_LIR(BlockList* blocks);
2443 
2444 class LIR_InsertionBuffer : public CompilationResourceObj {
2445  private:
2446   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2447 
2448   // list of insertion points. index and count are stored alternately:
2449   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2450   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2451   intStack    _index_and_count;
2452 
2453   // the LIR_Ops to be inserted
2454   LIR_OpList  _ops;
2455 
2456   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2457   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2458   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2459 
2460 #ifdef ASSERT
2461   void verify();
2462 #endif
2463  public:
2464   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2465 
2466   // must be called before using the insertion buffer
2467   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2468   bool initialized() const  { return _lir != NULL; }
2469   // called automatically when the buffer is appended to the LIR_List
2470   void finish()             { _lir = NULL; }
2471 
2472   // accessors
2473   LIR_List*  lir_list() const             { return _lir; }
2474   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2475   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2476   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2477 
2478   int number_of_ops() const               { return _ops.length(); }
2479   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2480 
2481   // append an instruction to the buffer
2482   void append(int index, LIR_Op* op);
2483 
2484   // instruction
2485   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2486 };
2487 
2488 
2489 //
2490 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2491 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2492 // information about the input, output and temporaries used by the
2493 // op to be recorded.  It also records whether the op has call semantics
2494 // and also records all the CodeEmitInfos used by this op.
2495 //
2496 
2497 
2498 class LIR_OpVisitState: public StackObj {
2499  public:
2500   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2501 
2502   enum {
2503     maxNumberOfOperands = 20,
2504     maxNumberOfInfos = 4
2505   };
2506 
2507  private:
2508   LIR_Op*          _op;
2509 
2510   // optimization: the operands and infos are not stored in a variable-length
2511   //               list, but in a fixed-size array to save time of size checks and resizing
2512   int              _oprs_len[numModes];
2513   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2514   int _info_len;
2515   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2516 
2517   bool             _has_call;
2518   bool             _has_slow_case;
2519 
2520 
2521   // only include register operands
2522   // addresses are decomposed to the base and index registers
2523   // constants and stack operands are ignored
2524   void append(LIR_Opr& opr, OprMode mode) {
2525     assert(opr->is_valid(), "should not call this otherwise");
2526     assert(mode >= 0 && mode < numModes, "bad mode");
2527 
2528     if (opr->is_register()) {
2529        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2530       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2531 
2532     } else if (opr->is_pointer()) {
2533       LIR_Address* address = opr->as_address_ptr();
2534       if (address != NULL) {
2535         // special handling for addresses: add base and index register of the address
2536         // both are always input operands or temp if we want to extend
2537         // their liveness!
2538         if (mode == outputMode) {
2539           mode = inputMode;
2540         }
2541         assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2542         if (address->_base->is_valid()) {
2543           assert(address->_base->is_register(), "must be");
2544           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2545           _oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2546         }
2547         if (address->_index->is_valid()) {
2548           assert(address->_index->is_register(), "must be");
2549           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2550           _oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2551         }
2552 
2553       } else {
2554         assert(opr->is_constant(), "constant operands are not processed");
2555       }
2556     } else {
2557       assert(opr->is_stack(), "stack operands are not processed");
2558     }
2559   }
2560 
2561   void append(CodeEmitInfo* info) {
2562     assert(info != NULL, "should not call this otherwise");
2563     assert(_info_len < maxNumberOfInfos, "array overflow");
2564     _info_new[_info_len++] = info;
2565   }
2566 
2567  public:
2568   LIR_OpVisitState()         { reset(); }
2569 
2570   LIR_Op* op() const         { return _op; }
2571   void set_op(LIR_Op* op)    { reset(); _op = op; }
2572 
2573   bool has_call() const      { return _has_call; }
2574   bool has_slow_case() const { return _has_slow_case; }
2575 
2576   void reset() {
2577     _op = NULL;
2578     _has_call = false;
2579     _has_slow_case = false;
2580 
2581     _oprs_len[inputMode] = 0;
2582     _oprs_len[tempMode] = 0;
2583     _oprs_len[outputMode] = 0;
2584     _info_len = 0;
2585   }
2586 
2587 
2588   int opr_count(OprMode mode) const {
2589     assert(mode >= 0 && mode < numModes, "bad mode");
2590     return _oprs_len[mode];
2591   }
2592 
2593   LIR_Opr opr_at(OprMode mode, int index) const {
2594     assert(mode >= 0 && mode < numModes, "bad mode");
2595     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2596     return *_oprs_new[mode][index];
2597   }
2598 
2599   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2600     assert(mode >= 0 && mode < numModes, "bad mode");
2601     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2602     *_oprs_new[mode][index] = opr;
2603   }
2604 
2605   int info_count() const {
2606     return _info_len;
2607   }
2608 
2609   CodeEmitInfo* info_at(int index) const {
2610     assert(index < _info_len, "index out of bounds");
2611     return _info_new[index];
2612   }
2613 
2614   XHandlers* all_xhandler();
2615 
2616   // collects all register operands of the instruction
2617   void visit(LIR_Op* op);
2618 
2619 #ifdef ASSERT
2620   // check that an operation has no operands
2621   bool no_operands(LIR_Op* op);
2622 #endif
2623 
2624   // LIR_Op visitor functions use these to fill in the state
2625   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2626   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2627   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2628   void do_info(CodeEmitInfo* info)        { append(info); }
2629 
2630   void do_stub(CodeStub* stub);
2631   void do_call()                          { _has_call = true; }
2632   void do_slow_case()                     { _has_slow_case = true; }
2633   void do_slow_case(CodeEmitInfo* info) {
2634     _has_slow_case = true;
2635     append(info);
2636   }
2637 };
2638 
2639 
2640 inline LIR_Opr LIR_Opr::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2641 
2642 inline LIR_Opr LIR_Opr::nullOpr()   { return LIR_OprFact::nullOpr; };
2643 
2644 #endif // SHARE_C1_C1_LIR_HPP