1 /*
   2  * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_C1_C1_LIR_HPP
  26 #define SHARE_C1_C1_LIR_HPP
  27 
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_ValueType.hpp"
  30 #include "oops/method.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 #include "utilities/macros.hpp"
  33 
  34 class BlockBegin;
  35 class BlockList;
  36 class LIR_Assembler;
  37 class CodeEmitInfo;
  38 class CodeStub;
  39 class CodeStubList;
  40 class C1SafepointPollStub;
  41 class ArrayCopyStub;
  42 class LIR_Op;
  43 class ciType;
  44 class ValueType;
  45 class LIR_OpVisitState;
  46 class FpuStackSim;
  47 
  48 //---------------------------------------------------------------------
  49 //                 LIR Operands
  50 //    LIR_OprPtr
  51 //      LIR_Const
  52 //      LIR_Address
  53 //---------------------------------------------------------------------
  54 class LIR_OprPtr;
  55 class LIR_Const;
  56 class LIR_Address;
  57 class LIR_OprVisitor;
  58 class LIR_Opr;
  59 
  60 typedef int          RegNr;
  61 
  62 typedef GrowableArray<LIR_Opr> LIR_OprList;
  63 typedef GrowableArray<LIR_Op*> LIR_OpArray;
  64 typedef GrowableArray<LIR_Op*> LIR_OpList;
  65 
  66 // define LIR_OprPtr early so LIR_Opr can refer to it
  67 class LIR_OprPtr: public CompilationResourceObj {
  68  public:
  69   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  70   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  71 
  72   virtual LIR_Const*  as_constant()              { return nullptr; }
  73   virtual LIR_Address* as_address()              { return nullptr; }
  74   virtual BasicType type() const                 = 0;
  75   virtual void print_value_on(outputStream* out) const = 0;
  76 };
  77 
  78 
  79 
  80 // LIR constants
  81 class LIR_Const: public LIR_OprPtr {
  82  private:
  83   JavaValue _value;
  84 
  85   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  86   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  87   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  88 
  89  public:
  90   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  91   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  92   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  93   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  94   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  95   LIR_Const(void* p) {
  96 #ifdef _LP64
  97     assert(sizeof(jlong) >= sizeof(p), "too small");;
  98     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
  99 #else
 100     assert(sizeof(jint) >= sizeof(p), "too small");;
 101     _value.set_type(T_INT);     _value.set_jint((jint)p);
 102 #endif
 103   }
 104   LIR_Const(Metadata* m) {
 105     _value.set_type(T_METADATA);
 106 #ifdef _LP64
 107     _value.set_jlong((jlong)m);
 108 #else
 109     _value.set_jint((jint)m);
 110 #endif // _LP64
 111   }
 112 
 113   virtual BasicType type()       const { return _value.get_type(); }
 114   virtual LIR_Const* as_constant()     { return this; }
 115 
 116   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 117   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 118   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 119   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 120   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 121   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 122   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 123 
 124 #ifdef _LP64
 125   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 126   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
 127 #else
 128   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 129   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
 130 #endif
 131 
 132 
 133   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 134   jint      as_jint_lo_bits() const    {
 135     if (type() == T_DOUBLE) {
 136       return low(jlong_cast(_value.get_jdouble()));
 137     } else {
 138       return as_jint_lo();
 139     }
 140   }
 141   jint      as_jint_hi_bits() const    {
 142     if (type() == T_DOUBLE) {
 143       return high(jlong_cast(_value.get_jdouble()));
 144     } else {
 145       return as_jint_hi();
 146     }
 147   }
 148   jlong      as_jlong_bits() const    {
 149     if (type() == T_DOUBLE) {
 150       return jlong_cast(_value.get_jdouble());
 151     } else {
 152       return as_jlong();
 153     }
 154   }
 155 
 156   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 157 
 158 
 159   bool is_zero_float() {
 160     jfloat f = as_jfloat();
 161     jfloat ok = 0.0f;
 162     return jint_cast(f) == jint_cast(ok);
 163   }
 164 
 165   bool is_one_float() {
 166     jfloat f = as_jfloat();
 167     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 168   }
 169 
 170   bool is_zero_double() {
 171     jdouble d = as_jdouble();
 172     jdouble ok = 0.0;
 173     return jlong_cast(d) == jlong_cast(ok);
 174   }
 175 
 176   bool is_one_double() {
 177     jdouble d = as_jdouble();
 178     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 179   }
 180 };
 181 
 182 
 183 //---------------------LIR Operand descriptor------------------------------------
 184 //
 185 // The class LIR_Opr represents a LIR instruction operand;
 186 // it can be a register (ALU/FPU), stack location or a constant;
 187 // Constants and addresses are represented as resource area allocated
 188 // structures (see above), and pointers are stored in the _value field (cast to
 189 // an intptr_t).
 190 // Registers and stack locations are represented inline as integers.
 191 // (see value function).
 192 
 193 // Previously, this class was derived from CompilationResourceObj.
 194 // However, deriving from any of the "Obj" types in allocation.hpp seems
 195 // detrimental, since in some build modes it would add a vtable to this class,
 196 // which make it no longer be a 1-word trivially-copyable wrapper object,
 197 // which is the entire point of it.
 198 
 199 class LIR_Opr {
 200  public:
 201   // value structure:
 202   //          data        other-non-data opr-type opr-kind
 203   // +-------------------+--------------+-------+-----+
 204   // [max...............................|6 5 4 3|2 1 0]
 205   //                                                 ^
 206   //                                           is_pointer bit
 207   //
 208   // lowest bit cleared, means it is a structure pointer
 209   // we need 4 bits to represent types
 210 
 211  private:
 212   friend class LIR_OprFact;
 213 
 214   intptr_t _value;
 215   // Conversion
 216   intptr_t value() const                         { return _value; }
 217 
 218   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 219     return (value() & mask) == masked_value;
 220   }
 221 
 222   enum OprKind {
 223       pointer_value      = 0
 224     , stack_value        = 1
 225     , cpu_register       = 3
 226     , fpu_register       = 5
 227     , illegal_value      = 7
 228   };
 229 
 230   enum OprBits {
 231       pointer_bits   = 1
 232     , kind_bits      = 3
 233     , type_bits      = 4
 234     , size_bits      = 2
 235     , destroys_bits  = 1
 236     , virtual_bits   = 1
 237     , is_xmm_bits    = 1
 238     , last_use_bits  = 1
 239     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 240     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + virtual_bits
 241                        + is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits
 242     , data_bits      = BitsPerInt - non_data_bits
 243     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 244   };
 245 
 246   enum OprShift : uintptr_t {
 247       kind_shift     = 0
 248     , type_shift     = kind_shift     + kind_bits
 249     , size_shift     = type_shift     + type_bits
 250     , destroys_shift = size_shift     + size_bits
 251     , last_use_shift = destroys_shift + destroys_bits
 252     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 253     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 254     , is_xmm_shift   = virtual_shift + virtual_bits
 255     , data_shift     = is_xmm_shift + is_xmm_bits
 256     , reg1_shift = data_shift
 257     , reg2_shift = data_shift + reg_bits
 258 
 259   };
 260 
 261   enum OprSize {
 262       single_size = 0 << size_shift
 263     , double_size = 1 << size_shift
 264   };
 265 
 266   enum OprMask {
 267       kind_mask      = right_n_bits(kind_bits)
 268     , type_mask      = right_n_bits(type_bits) << type_shift
 269     , size_mask      = right_n_bits(size_bits) << size_shift
 270     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 271     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 272     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 273     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 274     , pointer_mask   = right_n_bits(pointer_bits)
 275     , lower_reg_mask = right_n_bits(reg_bits)
 276     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 277   };
 278 
 279   uint32_t data() const                          { return (uint32_t)value() >> data_shift; }
 280   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 281   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 282   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 283   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 284 
 285   static char type_char(BasicType t);
 286 
 287  public:
 288   LIR_Opr() : _value(0) {}
 289   LIR_Opr(intptr_t val) : _value(val) {}
 290   LIR_Opr(LIR_OprPtr *val) : _value(reinterpret_cast<intptr_t>(val)) {}
 291   bool operator==(const LIR_Opr &other) const { return _value == other._value; }
 292   bool operator!=(const LIR_Opr &other) const { return _value != other._value; }
 293   explicit operator bool() const { return _value != 0; }
 294 
 295   // UGLY HACK: make this value object look like a pointer (to itself). This
 296   // operator overload should be removed, and all callers updated from
 297   // `opr->fn()` to `opr.fn()`.
 298   const LIR_Opr* operator->() const { return this; }
 299   LIR_Opr* operator->() { return this; }
 300 
 301   enum {
 302     vreg_base = ConcreteRegisterImpl::number_of_registers,
 303     data_max = (1 << data_bits) - 1,      // max unsigned value for data bit field
 304     vreg_limit =  10000,                  // choose a reasonable limit,
 305     vreg_max = MIN2(vreg_limit, data_max) // and make sure if fits in the bit field
 306   };
 307 
 308   static inline LIR_Opr illegalOpr();
 309   static inline LIR_Opr nullOpr();
 310 
 311   enum OprType {
 312       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 313     , int_type      = 1 << type_shift
 314     , long_type     = 2 << type_shift
 315     , object_type   = 3 << type_shift
 316     , address_type  = 4 << type_shift
 317     , float_type    = 5 << type_shift
 318     , double_type   = 6 << type_shift
 319     , metadata_type = 7 << type_shift
 320   };
 321   friend OprType as_OprType(BasicType t);
 322   friend BasicType as_BasicType(OprType t);
 323 
 324   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 325   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 326 
 327   static OprSize size_for(BasicType t) {
 328     switch (t) {
 329       case T_LONG:
 330       case T_DOUBLE:
 331         return double_size;
 332         break;
 333 
 334       case T_FLOAT:
 335       case T_BOOLEAN:
 336       case T_CHAR:
 337       case T_BYTE:
 338       case T_SHORT:
 339       case T_INT:
 340       case T_ADDRESS:
 341       case T_OBJECT:
 342       case T_ARRAY:
 343       case T_METADATA:
 344         return single_size;
 345         break;
 346 
 347       default:
 348         ShouldNotReachHere();
 349         return single_size;
 350       }
 351   }
 352 
 353 
 354   void validate_type() const PRODUCT_RETURN;
 355 
 356   BasicType type() const {
 357     if (is_pointer()) {
 358       return pointer()->type();
 359     }
 360     return as_BasicType(type_field());
 361   }
 362 
 363 
 364   ValueType* value_type() const                  { return as_ValueType(type()); }
 365 
 366   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 367 
 368   bool is_equal(LIR_Opr opr) const         { return *this == opr; }
 369   // checks whether types are same
 370   bool is_same_type(LIR_Opr opr) const     {
 371     assert(type_field() != unknown_type &&
 372            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 373     return type_field() == opr->type_field();
 374   }
 375   bool is_same_register(LIR_Opr opr) {
 376     return (is_register() && opr->is_register() &&
 377             kind_field() == opr->kind_field() &&
 378             (value() & no_type_mask) == (opr->value() & no_type_mask));
 379   }
 380 
 381   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 382   bool is_illegal() const      { return kind_field() == illegal_value; }
 383   bool is_valid() const        { return kind_field() != illegal_value; }
 384 
 385   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 386   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 387 
 388   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != nullptr; }
 389   bool is_address() const      { return is_pointer() && pointer()->as_address() != nullptr; }
 390 
 391   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 392   bool is_oop() const;
 393 
 394   // semantic for fpu- and xmm-registers:
 395   // * is_float and is_double return true for xmm_registers
 396   //   (so is_single_fpu and is_single_xmm are true)
 397   // * So you must always check for is_???_xmm prior to is_???_fpu to
 398   //   distinguish between fpu- and xmm-registers
 399 
 400   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 401   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 402   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 403 
 404   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 405   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 406   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 407   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 408   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 409 
 410   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 411   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 412   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 413   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 414   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 415 
 416   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 417   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 418   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 419 
 420   // fast accessor functions for special bits that do not work for pointers
 421   // (in this functions, the check for is_pointer() is omitted)
 422   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 423   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 424   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 425   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 426   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 427 
 428   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 429   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 430   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 431   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 432 
 433 
 434   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 435   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 436   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 437   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 438   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 439   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 440   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 441   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 442   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 443   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 444   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 445   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 446 
 447   LIR_OprPtr* pointer() const { assert(_value != 0 && is_pointer(), "nullness and type check"); return (LIR_OprPtr*)_value; }
 448   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 449   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 450 
 451   Register as_register()    const;
 452   Register as_register_lo() const;
 453   Register as_register_hi() const;
 454 
 455   Register as_pointer_register() {
 456 #ifdef _LP64
 457     if (is_double_cpu()) {
 458       assert(as_register_lo() == as_register_hi(), "should be a single register");
 459       return as_register_lo();
 460     }
 461 #endif
 462     return as_register();
 463   }
 464 
 465   FloatRegister as_float_reg   () const;
 466   FloatRegister as_double_reg  () const;
 467 #ifdef X86
 468   XMMRegister as_xmm_float_reg () const;
 469   XMMRegister as_xmm_double_reg() const;
 470   // for compatibility with RInfo
 471   int fpu() const { return lo_reg_half(); }
 472 #endif
 473 
 474   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 475   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 476   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 477   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 478   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 479 
 480   void print() const PRODUCT_RETURN;
 481   void print(outputStream* out) const PRODUCT_RETURN;
 482 };
 483 
 484 inline LIR_Opr::OprType as_OprType(BasicType type) {
 485   switch (type) {
 486   case T_INT:      return LIR_Opr::int_type;
 487   case T_LONG:     return LIR_Opr::long_type;
 488   case T_FLOAT:    return LIR_Opr::float_type;
 489   case T_DOUBLE:   return LIR_Opr::double_type;
 490   case T_OBJECT:
 491   case T_ARRAY:    return LIR_Opr::object_type;
 492   case T_ADDRESS:  return LIR_Opr::address_type;
 493   case T_METADATA: return LIR_Opr::metadata_type;
 494   case T_ILLEGAL:  // fall through
 495   default: ShouldNotReachHere(); return LIR_Opr::unknown_type;
 496   }
 497 }
 498 
 499 inline BasicType as_BasicType(LIR_Opr::OprType t) {
 500   switch (t) {
 501   case LIR_Opr::int_type:     return T_INT;
 502   case LIR_Opr::long_type:    return T_LONG;
 503   case LIR_Opr::float_type:   return T_FLOAT;
 504   case LIR_Opr::double_type:  return T_DOUBLE;
 505   case LIR_Opr::object_type:  return T_OBJECT;
 506   case LIR_Opr::address_type: return T_ADDRESS;
 507   case LIR_Opr::metadata_type:return T_METADATA;
 508   case LIR_Opr::unknown_type: // fall through
 509   default: ShouldNotReachHere();  return T_ILLEGAL;
 510   }
 511 }
 512 
 513 
 514 // LIR_Address
 515 class LIR_Address: public LIR_OprPtr {
 516  friend class LIR_OpVisitState;
 517 
 518  public:
 519   // NOTE: currently these must be the log2 of the scale factor (and
 520   // must also be equivalent to the ScaleFactor enum in
 521   // assembler_i486.hpp)
 522   enum Scale {
 523     times_1  =  0,
 524     times_2  =  1,
 525     times_4  =  2,
 526     times_8  =  3
 527   };
 528 
 529  private:
 530   LIR_Opr   _base;
 531   LIR_Opr   _index;
 532   intx      _disp;
 533   Scale     _scale;
 534   BasicType _type;
 535 
 536  public:
 537   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 538        _base(base)
 539      , _index(index)
 540      , _disp(0)
 541      , _scale(times_1)
 542      , _type(type) { verify(); }
 543 
 544   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 545        _base(base)
 546      , _index(LIR_Opr::illegalOpr())
 547      , _disp(disp)
 548      , _scale(times_1)
 549      , _type(type) { verify(); }
 550 
 551   LIR_Address(LIR_Opr base, BasicType type):
 552        _base(base)
 553      , _index(LIR_Opr::illegalOpr())
 554      , _disp(0)
 555      , _scale(times_1)
 556      , _type(type) { verify(); }
 557 
 558   LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
 559        _base(base)
 560      , _index(index)
 561      , _disp(disp)
 562      , _scale(times_1)
 563      , _type(type) { verify(); }
 564 
 565   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 566        _base(base)
 567      , _index(index)
 568      , _disp(disp)
 569      , _scale(scale)
 570      , _type(type) { verify(); }
 571 
 572   LIR_Opr base()  const                          { return _base;  }
 573   LIR_Opr index() const                          { return _index; }
 574   Scale   scale() const                          { return _scale; }
 575   intx    disp()  const                          { return _disp;  }
 576 
 577   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 578 
 579   virtual LIR_Address* as_address()              { return this;   }
 580   virtual BasicType type() const                 { return _type; }
 581   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 582 
 583   void verify() const PRODUCT_RETURN;
 584 
 585   static Scale scale(BasicType type);
 586 };
 587 
 588 
 589 // operand factory
 590 class LIR_OprFact: public AllStatic {
 591  public:
 592 
 593   static LIR_Opr illegalOpr;
 594   static LIR_Opr nullOpr;
 595 
 596   static LIR_Opr single_cpu(int reg) {
 597     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 598                                LIR_Opr::int_type             |
 599                                LIR_Opr::cpu_register         |
 600                                LIR_Opr::single_size);
 601   }
 602   static LIR_Opr single_cpu_oop(int reg) {
 603     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 604                                LIR_Opr::object_type          |
 605                                LIR_Opr::cpu_register         |
 606                                LIR_Opr::single_size);
 607   }
 608   static LIR_Opr single_cpu_address(int reg) {
 609     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 610                                LIR_Opr::address_type         |
 611                                LIR_Opr::cpu_register         |
 612                                LIR_Opr::single_size);
 613   }
 614   static LIR_Opr single_cpu_metadata(int reg) {
 615     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 616                                LIR_Opr::metadata_type        |
 617                                LIR_Opr::cpu_register         |
 618                                LIR_Opr::single_size);
 619   }
 620   static LIR_Opr double_cpu(int reg1, int reg2) {
 621     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 622     return (LIR_Opr)(intptr_t)((reg1 << LIR_Opr::reg1_shift) |
 623                                (reg2 << LIR_Opr::reg2_shift) |
 624                                LIR_Opr::long_type            |
 625                                LIR_Opr::cpu_register         |
 626                                LIR_Opr::double_size);
 627   }
 628 
 629   static LIR_Opr single_fpu(int reg) {
 630     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 631                                LIR_Opr::float_type           |
 632                                LIR_Opr::fpu_register         |
 633                                LIR_Opr::single_size);
 634   }
 635 
 636   // Platform dependent.
 637   static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
 638 
 639 #ifdef ARM32
 640   static LIR_Opr single_softfp(int reg) {
 641     return (LIR_Opr)(intptr_t)((reg  << LIR_Opr::reg1_shift) |
 642                                LIR_Opr::float_type           |
 643                                LIR_Opr::cpu_register         |
 644                                LIR_Opr::single_size);
 645   }
 646   static LIR_Opr double_softfp(int reg1, int reg2) {
 647     return (LIR_Opr)(intptr_t)((reg1 << LIR_Opr::reg1_shift) |
 648                                (reg2 << LIR_Opr::reg2_shift) |
 649                                LIR_Opr::double_type          |
 650                                LIR_Opr::cpu_register         |
 651                                LIR_Opr::double_size);
 652   }
 653 #endif // ARM32
 654 
 655 #if defined(X86)
 656   static LIR_Opr single_xmm(int reg) {
 657     return (LIR_Opr)(intptr_t)((reg << LIR_Opr::reg1_shift) |
 658                                LIR_Opr::float_type          |
 659                                LIR_Opr::fpu_register        |
 660                                LIR_Opr::single_size         |
 661                                LIR_Opr::is_xmm_mask);
 662   }
 663   static LIR_Opr double_xmm(int reg) {
 664     return (LIR_Opr)(intptr_t)((reg << LIR_Opr::reg1_shift) |
 665                                (reg << LIR_Opr::reg2_shift) |
 666                                LIR_Opr::double_type         |
 667                                LIR_Opr::fpu_register        |
 668                                LIR_Opr::double_size         |
 669                                LIR_Opr::is_xmm_mask);
 670   }
 671 #endif // X86
 672 
 673   static LIR_Opr virtual_register(int index, BasicType type) {
 674     if (index > LIR_Opr::vreg_max) {
 675       // Running out of virtual registers. Caller should bailout.
 676       return illegalOpr;
 677     }
 678 
 679     LIR_Opr res;
 680     switch (type) {
 681       case T_OBJECT: // fall through
 682       case T_ARRAY:
 683         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift)  |
 684                                             LIR_Opr::object_type  |
 685                                             LIR_Opr::cpu_register |
 686                                             LIR_Opr::single_size  |
 687                                             LIR_Opr::virtual_mask);
 688         break;
 689 
 690       case T_METADATA:
 691         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift)  |
 692                                             LIR_Opr::metadata_type|
 693                                             LIR_Opr::cpu_register |
 694                                             LIR_Opr::single_size  |
 695                                             LIR_Opr::virtual_mask);
 696         break;
 697 
 698       case T_INT:
 699         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 700                                   LIR_Opr::int_type              |
 701                                   LIR_Opr::cpu_register          |
 702                                   LIR_Opr::single_size           |
 703                                   LIR_Opr::virtual_mask);
 704         break;
 705 
 706       case T_ADDRESS:
 707         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 708                                   LIR_Opr::address_type          |
 709                                   LIR_Opr::cpu_register          |
 710                                   LIR_Opr::single_size           |
 711                                   LIR_Opr::virtual_mask);
 712         break;
 713 
 714       case T_LONG:
 715         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 716                                   LIR_Opr::long_type             |
 717                                   LIR_Opr::cpu_register          |
 718                                   LIR_Opr::double_size           |
 719                                   LIR_Opr::virtual_mask);
 720         break;
 721 
 722 #ifdef __SOFTFP__
 723       case T_FLOAT:
 724         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 725                                   LIR_Opr::float_type  |
 726                                   LIR_Opr::cpu_register |
 727                                   LIR_Opr::single_size |
 728                                   LIR_Opr::virtual_mask);
 729         break;
 730       case T_DOUBLE:
 731         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 732                                   LIR_Opr::double_type |
 733                                   LIR_Opr::cpu_register |
 734                                   LIR_Opr::double_size |
 735                                   LIR_Opr::virtual_mask);
 736         break;
 737 #else // __SOFTFP__
 738       case T_FLOAT:
 739         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 740                                   LIR_Opr::float_type           |
 741                                   LIR_Opr::fpu_register         |
 742                                   LIR_Opr::single_size          |
 743                                   LIR_Opr::virtual_mask);
 744         break;
 745 
 746       case
 747         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 748                                             LIR_Opr::double_type           |
 749                                             LIR_Opr::fpu_register          |
 750                                             LIR_Opr::double_size           |
 751                                             LIR_Opr::virtual_mask);
 752         break;
 753 #endif // __SOFTFP__
 754       default:       ShouldNotReachHere(); res = illegalOpr;
 755     }
 756 
 757 #ifdef ASSERT
 758     res->validate_type();
 759     assert(res->vreg_number() == index, "conversion check");
 760     assert(index >= LIR_Opr::vreg_base, "must start at vreg_base");
 761 
 762     // old-style calculation; check if old and new method are equal
 763     LIR_Opr::OprType t = as_OprType(type);
 764 #ifdef __SOFTFP__
 765     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 766                                t |
 767                                LIR_Opr::cpu_register |
 768                                LIR_Opr::size_for(type) | LIR_Opr::virtual_mask);
 769 #else // __SOFTFP__
 770     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) | t |
 771                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_Opr::fpu_register : LIR_Opr::cpu_register) |
 772                                LIR_Opr::size_for(type) | LIR_Opr::virtual_mask);
 773     assert(res == old_res, "old and new method not equal");
 774 #endif // __SOFTFP__
 775 #endif // ASSERT
 776 
 777     return res;
 778   }
 779 
 780   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 781   // the index is platform independent; a double stack using indices 2 and 3 has always
 782   // index 2.
 783   static LIR_Opr stack(int index, BasicType type) {
 784     LIR_Opr res;
 785     switch (type) {
 786       case T_OBJECT: // fall through
 787       case T_ARRAY:
 788         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 789                                   LIR_Opr::object_type           |
 790                                   LIR_Opr::stack_value           |
 791                                   LIR_Opr::single_size);
 792         break;
 793 
 794       case T_METADATA:
 795         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 796                                   LIR_Opr::metadata_type         |
 797                                   LIR_Opr::stack_value           |
 798                                   LIR_Opr::single_size);
 799         break;
 800       case T_INT:
 801         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 802                                   LIR_Opr::int_type              |
 803                                   LIR_Opr::stack_value           |
 804                                   LIR_Opr::single_size);
 805         break;
 806 
 807       case T_ADDRESS:
 808         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 809                                   LIR_Opr::address_type          |
 810                                   LIR_Opr::stack_value           |
 811                                   LIR_Opr::single_size);
 812         break;
 813 
 814       case T_LONG:
 815         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 816                                   LIR_Opr::long_type             |
 817                                   LIR_Opr::stack_value           |
 818                                   LIR_Opr::double_size);
 819         break;
 820 
 821       case T_FLOAT:
 822         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 823                                   LIR_Opr::float_type            |
 824                                   LIR_Opr::stack_value           |
 825                                   LIR_Opr::single_size);
 826         break;
 827       case T_DOUBLE:
 828         res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 829                                   LIR_Opr::double_type           |
 830                                   LIR_Opr::stack_value           |
 831                                   LIR_Opr::double_size);
 832         break;
 833 
 834       default:       ShouldNotReachHere(); res = illegalOpr;
 835     }
 836 
 837 #ifdef ASSERT
 838     assert(index >= 0, "index must be positive");
 839     assert(index == (int)res->data(), "conversion check");
 840 
 841     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_Opr::data_shift) |
 842                                           LIR_Opr::stack_value           |
 843                                           as_OprType(type)                   |
 844                                           LIR_Opr::size_for(type));
 845     assert(res == old_res, "old and new method not equal");
 846 #endif
 847 
 848     return res;
 849   }
 850 
 851   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 852   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 853   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 854   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 855   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 856   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 857   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 858   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 859   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 860   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 861   static LIR_Opr metadataConst(Metadata* m)      { return (LIR_Opr)(new LIR_Const(m)); }
 862 
 863   static LIR_Opr value_type(ValueType* type);
 864 };
 865 
 866 
 867 //-------------------------------------------------------------------------------
 868 //                   LIR Instructions
 869 //-------------------------------------------------------------------------------
 870 //
 871 // Note:
 872 //  - every instruction has a result operand
 873 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 874 //  - every instruction has a LIR_OpCode operand
 875 //  - LIR_OpN, means an instruction that has N input operands
 876 //
 877 // class hierarchy:
 878 //
 879 class  LIR_Op;
 880 class    LIR_Op0;
 881 class      LIR_OpLabel;
 882 class    LIR_Op1;
 883 class      LIR_OpBranch;
 884 class      LIR_OpConvert;
 885 class      LIR_OpAllocObj;
 886 class      LIR_OpReturn;
 887 class      LIR_OpRoundFP;
 888 class    LIR_Op2;
 889 class    LIR_OpDelay;
 890 class    LIR_Op3;
 891 class      LIR_OpAllocArray;
 892 class    LIR_Op4;
 893 class    LIR_OpCall;
 894 class      LIR_OpJavaCall;
 895 class      LIR_OpRTCall;
 896 class    LIR_OpArrayCopy;
 897 class    LIR_OpUpdateCRC32;
 898 class    LIR_OpLock;
 899 class    LIR_OpTypeCheck;
 900 class    LIR_OpCompareAndSwap;
 901 class    LIR_OpLoadKlass;
 902 class    LIR_OpProfileCall;
 903 class    LIR_OpProfileType;
 904 #ifdef ASSERT
 905 class    LIR_OpAssert;
 906 #endif
 907 
 908 // LIR operation codes
 909 enum LIR_Code {
 910     lir_none
 911   , begin_op0
 912       , lir_label
 913       , lir_nop
 914       , lir_std_entry
 915       , lir_osr_entry
 916       , lir_fpop_raw
 917       , lir_breakpoint
 918       , lir_rtcall
 919       , lir_membar
 920       , lir_membar_acquire
 921       , lir_membar_release
 922       , lir_membar_loadload
 923       , lir_membar_storestore
 924       , lir_membar_loadstore
 925       , lir_membar_storeload
 926       , lir_get_thread
 927       , lir_on_spin_wait
 928   , end_op0
 929   , begin_op1
 930       , lir_fxch
 931       , lir_fld
 932       , lir_push
 933       , lir_pop
 934       , lir_null_check
 935       , lir_return
 936       , lir_leal
 937       , lir_move
 938       , lir_convert
 939       , lir_alloc_object
 940       , lir_monaddr
 941       , lir_roundfp
 942       , lir_safepoint
 943       , lir_unwind
 944       , lir_load_klass
 945   , end_op1
 946   , begin_op2
 947       , lir_branch
 948       , lir_cond_float_branch
 949       , lir_cmp
 950       , lir_cmp_l2i
 951       , lir_ucmp_fd2i
 952       , lir_cmp_fd2i
 953       , lir_add
 954       , lir_sub
 955       , lir_mul
 956       , lir_div
 957       , lir_rem
 958       , lir_sqrt
 959       , lir_abs
 960       , lir_neg
 961       , lir_tan
 962       , lir_f2hf
 963       , lir_hf2f
 964       , lir_log10
 965       , lir_logic_and
 966       , lir_logic_or
 967       , lir_logic_xor
 968       , lir_shl
 969       , lir_shr
 970       , lir_ushr
 971       , lir_alloc_array
 972       , lir_throw
 973       , lir_xadd
 974       , lir_xchg
 975   , end_op2
 976   , begin_op3
 977       , lir_idiv
 978       , lir_irem
 979       , lir_fmad
 980       , lir_fmaf
 981   , end_op3
 982   , begin_op4
 983       , lir_cmove
 984   , end_op4
 985   , begin_opJavaCall
 986       , lir_static_call
 987       , lir_optvirtual_call
 988       , lir_icvirtual_call
 989       , lir_dynamic_call
 990   , end_opJavaCall
 991   , begin_opArrayCopy
 992       , lir_arraycopy
 993   , end_opArrayCopy
 994   , begin_opUpdateCRC32
 995       , lir_updatecrc32
 996   , end_opUpdateCRC32
 997   , begin_opLock
 998     , lir_lock
 999     , lir_unlock
1000   , end_opLock
1001   , begin_delay_slot
1002     , lir_delay_slot
1003   , end_delay_slot
1004   , begin_opTypeCheck
1005     , lir_instanceof
1006     , lir_checkcast
1007     , lir_store_check
1008   , end_opTypeCheck
1009   , begin_opCompareAndSwap
1010     , lir_cas_long
1011     , lir_cas_obj
1012     , lir_cas_int
1013   , end_opCompareAndSwap
1014   , begin_opMDOProfile
1015     , lir_profile_call
1016     , lir_profile_type
1017   , end_opMDOProfile
1018   , begin_opAssert
1019     , lir_assert
1020   , end_opAssert
1021 #ifdef INCLUDE_ZGC
1022   , begin_opXLoadBarrierTest
1023     , lir_xloadbarrier_test
1024   , end_opXLoadBarrierTest
1025 #endif
1026 };
1027 
1028 
1029 enum LIR_Condition {
1030     lir_cond_equal
1031   , lir_cond_notEqual
1032   , lir_cond_less
1033   , lir_cond_lessEqual
1034   , lir_cond_greaterEqual
1035   , lir_cond_greater
1036   , lir_cond_belowEqual
1037   , lir_cond_aboveEqual
1038   , lir_cond_always
1039   , lir_cond_unknown = -1
1040 };
1041 
1042 
1043 enum LIR_PatchCode {
1044   lir_patch_none,
1045   lir_patch_low,
1046   lir_patch_high,
1047   lir_patch_normal
1048 };
1049 
1050 
1051 enum LIR_MoveKind {
1052   lir_move_normal,
1053   lir_move_volatile,
1054   lir_move_wide,
1055   lir_move_max_flag
1056 };
1057 
1058 
1059 // --------------------------------------------------
1060 // LIR_Op
1061 // --------------------------------------------------
1062 class LIR_Op: public CompilationResourceObj {
1063  friend class LIR_OpVisitState;
1064 
1065 #ifdef ASSERT
1066  private:
1067   const char *  _file;
1068   int           _line;
1069 #endif
1070 
1071  protected:
1072   LIR_Opr       _result;
1073   unsigned short _code;
1074   unsigned short _flags;
1075   CodeEmitInfo* _info;
1076   int           _id;     // value id for register allocation
1077   int           _fpu_pop_count;
1078   Instruction*  _source; // for debugging
1079 
1080   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1081 
1082  protected:
1083   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1084 
1085  public:
1086   LIR_Op()
1087     :
1088 #ifdef ASSERT
1089       _file(nullptr)
1090     , _line(0),
1091 #endif
1092       _result(LIR_OprFact::illegalOpr)
1093     , _code(lir_none)
1094     , _flags(0)
1095     , _info(nullptr)
1096     , _id(-1)
1097     , _fpu_pop_count(0)
1098     , _source(nullptr) {}
1099 
1100   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1101     :
1102 #ifdef ASSERT
1103       _file(nullptr)
1104     , _line(0),
1105 #endif
1106       _result(result)
1107     , _code(code)
1108     , _flags(0)
1109     , _info(info)
1110     , _id(-1)
1111     , _fpu_pop_count(0)
1112     , _source(nullptr) {}
1113 
1114   CodeEmitInfo* info() const                  { return _info;   }
1115   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1116   LIR_Opr result_opr() const                  { return _result; }
1117   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1118 
1119 #ifdef ASSERT
1120   void set_file_and_line(const char * file, int line) {
1121     _file = file;
1122     _line = line;
1123   }
1124 #endif
1125 
1126   virtual const char * name() const PRODUCT_RETURN_NULL;
1127   virtual void visit(LIR_OpVisitState* state);
1128 
1129   int id()             const                  { return _id;     }
1130   void set_id(int id)                         { _id = id; }
1131 
1132   // FPU stack simulation helpers -- only used on Intel
1133   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1134   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1135   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1136 
1137   Instruction* source() const                 { return _source; }
1138   void set_source(Instruction* ins)           { _source = ins; }
1139 
1140   virtual void emit_code(LIR_Assembler* masm) = 0;
1141   virtual void print_instr(outputStream* out) const   = 0;
1142   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1143 
1144   virtual bool is_patching() { return false; }
1145   virtual LIR_OpCall* as_OpCall() { return nullptr; }
1146   virtual LIR_OpJavaCall* as_OpJavaCall() { return nullptr; }
1147   virtual LIR_OpLabel* as_OpLabel() { return nullptr; }
1148   virtual LIR_OpDelay* as_OpDelay() { return nullptr; }
1149   virtual LIR_OpLock* as_OpLock() { return nullptr; }
1150   virtual LIR_OpAllocArray* as_OpAllocArray() { return nullptr; }
1151   virtual LIR_OpAllocObj* as_OpAllocObj() { return nullptr; }
1152   virtual LIR_OpRoundFP* as_OpRoundFP() { return nullptr; }
1153   virtual LIR_OpBranch* as_OpBranch() { return nullptr; }
1154   virtual LIR_OpReturn* as_OpReturn() { return nullptr; }
1155   virtual LIR_OpRTCall* as_OpRTCall() { return nullptr; }
1156   virtual LIR_OpConvert* as_OpConvert() { return nullptr; }
1157   virtual LIR_Op0* as_Op0() { return nullptr; }
1158   virtual LIR_Op1* as_Op1() { return nullptr; }
1159   virtual LIR_Op2* as_Op2() { return nullptr; }
1160   virtual LIR_Op3* as_Op3() { return nullptr; }
1161   virtual LIR_Op4* as_Op4() { return nullptr; }
1162   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return nullptr; }
1163   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return nullptr; }
1164   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return nullptr; }
1165   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return nullptr; }
1166   virtual LIR_OpLoadKlass* as_OpLoadKlass() { return nullptr; }
1167   virtual LIR_OpProfileCall* as_OpProfileCall() { return nullptr; }
1168   virtual LIR_OpProfileType* as_OpProfileType() { return nullptr; }
1169 #ifdef ASSERT
1170   virtual LIR_OpAssert* as_OpAssert() { return nullptr; }
1171 #endif
1172 
1173   virtual void verify() const {}
1174 };
1175 
1176 // for calls
1177 class LIR_OpCall: public LIR_Op {
1178  friend class LIR_OpVisitState;
1179 
1180  protected:
1181   address      _addr;
1182   LIR_OprList* _arguments;
1183  protected:
1184   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1185              LIR_OprList* arguments, CodeEmitInfo* info = nullptr)
1186     : LIR_Op(code, result, info)
1187     , _addr(addr)
1188     , _arguments(arguments) {}
1189 
1190  public:
1191   address addr() const                           { return _addr; }
1192   const LIR_OprList* arguments() const           { return _arguments; }
1193   virtual LIR_OpCall* as_OpCall()                { return this; }
1194 };
1195 
1196 
1197 // --------------------------------------------------
1198 // LIR_OpJavaCall
1199 // --------------------------------------------------
1200 class LIR_OpJavaCall: public LIR_OpCall {
1201  friend class LIR_OpVisitState;
1202 
1203  private:
1204   ciMethod* _method;
1205   LIR_Opr   _receiver;
1206   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1207 
1208  public:
1209   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1210                  LIR_Opr receiver, LIR_Opr result,
1211                  address addr, LIR_OprList* arguments,
1212                  CodeEmitInfo* info)
1213   : LIR_OpCall(code, addr, result, arguments, info)
1214   , _method(method)
1215   , _receiver(receiver)
1216   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1217   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1218 
1219   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1220                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1221                  LIR_OprList* arguments, CodeEmitInfo* info)
1222   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1223   , _method(method)
1224   , _receiver(receiver)
1225   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1226   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1227 
1228   LIR_Opr receiver() const                       { return _receiver; }
1229   ciMethod* method() const                       { return _method;   }
1230 
1231   // JSR 292 support.
1232   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1233   bool is_method_handle_invoke() const {
1234     return method()->is_compiled_lambda_form() ||   // Java-generated lambda form
1235            method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
1236   }
1237 
1238   virtual void emit_code(LIR_Assembler* masm);
1239   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1240   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1241 };
1242 
1243 // --------------------------------------------------
1244 // LIR_OpLabel
1245 // --------------------------------------------------
1246 // Location where a branch can continue
1247 class LIR_OpLabel: public LIR_Op {
1248  friend class LIR_OpVisitState;
1249 
1250  private:
1251   Label* _label;
1252  public:
1253   LIR_OpLabel(Label* lbl)
1254    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, nullptr)
1255    , _label(lbl)                                 {}
1256   Label* label() const                           { return _label; }
1257 
1258   virtual void emit_code(LIR_Assembler* masm);
1259   virtual LIR_OpLabel* as_OpLabel() { return this; }
1260   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1261 };
1262 
1263 // LIR_OpArrayCopy
1264 class LIR_OpArrayCopy: public LIR_Op {
1265  friend class LIR_OpVisitState;
1266 
1267  private:
1268   ArrayCopyStub*  _stub;
1269   LIR_Opr   _src;
1270   LIR_Opr   _src_pos;
1271   LIR_Opr   _dst;
1272   LIR_Opr   _dst_pos;
1273   LIR_Opr   _length;
1274   LIR_Opr   _tmp;
1275   ciArrayKlass* _expected_type;
1276   int       _flags;
1277 
1278 public:
1279   enum Flags {
1280     src_null_check         = 1 << 0,
1281     dst_null_check         = 1 << 1,
1282     src_pos_positive_check = 1 << 2,
1283     dst_pos_positive_check = 1 << 3,
1284     length_positive_check  = 1 << 4,
1285     src_range_check        = 1 << 5,
1286     dst_range_check        = 1 << 6,
1287     type_check             = 1 << 7,
1288     overlapping            = 1 << 8,
1289     unaligned              = 1 << 9,
1290     src_objarray           = 1 << 10,
1291     dst_objarray           = 1 << 11,
1292     all_flags              = (1 << 12) - 1
1293   };
1294 
1295   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1296                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1297 
1298   LIR_Opr src() const                            { return _src; }
1299   LIR_Opr src_pos() const                        { return _src_pos; }
1300   LIR_Opr dst() const                            { return _dst; }
1301   LIR_Opr dst_pos() const                        { return _dst_pos; }
1302   LIR_Opr length() const                         { return _length; }
1303   LIR_Opr tmp() const                            { return _tmp; }
1304   int flags() const                              { return _flags; }
1305   ciArrayKlass* expected_type() const            { return _expected_type; }
1306   ArrayCopyStub* stub() const                    { return _stub; }
1307 
1308   virtual void emit_code(LIR_Assembler* masm);
1309   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1310   void print_instr(outputStream* out) const PRODUCT_RETURN;
1311 };
1312 
1313 // LIR_OpUpdateCRC32
1314 class LIR_OpUpdateCRC32: public LIR_Op {
1315   friend class LIR_OpVisitState;
1316 
1317 private:
1318   LIR_Opr   _crc;
1319   LIR_Opr   _val;
1320 
1321 public:
1322 
1323   LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1324 
1325   LIR_Opr crc() const                            { return _crc; }
1326   LIR_Opr val() const                            { return _val; }
1327 
1328   virtual void emit_code(LIR_Assembler* masm);
1329   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32()  { return this; }
1330   void print_instr(outputStream* out) const PRODUCT_RETURN;
1331 };
1332 
1333 // --------------------------------------------------
1334 // LIR_Op0
1335 // --------------------------------------------------
1336 class LIR_Op0: public LIR_Op {
1337  friend class LIR_OpVisitState;
1338 
1339  public:
1340   LIR_Op0(LIR_Code code)
1341    : LIR_Op(code, LIR_OprFact::illegalOpr, nullptr)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1342   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = nullptr)
1343    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1344 
1345   virtual void emit_code(LIR_Assembler* masm);
1346   virtual LIR_Op0* as_Op0() { return this; }
1347   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1348 };
1349 
1350 
1351 // --------------------------------------------------
1352 // LIR_Op1
1353 // --------------------------------------------------
1354 
1355 class LIR_Op1: public LIR_Op {
1356  friend class LIR_OpVisitState;
1357 
1358  protected:
1359   LIR_Opr         _opr;   // input operand
1360   BasicType       _type;  // Operand types
1361   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1362 
1363   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1364 
1365   void set_kind(LIR_MoveKind kind) {
1366     assert(code() == lir_move, "must be");
1367     _flags = kind;
1368   }
1369 
1370  public:
1371   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = nullptr)
1372     : LIR_Op(code, result, info)
1373     , _opr(opr)
1374     , _type(type)
1375     , _patch(patch)                    { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1376 
1377   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1378     : LIR_Op(code, result, info)
1379     , _opr(opr)
1380     , _type(type)
1381     , _patch(patch)                    {
1382     assert(code == lir_move, "must be");
1383     set_kind(kind);
1384   }
1385 
1386   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1387     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1388     , _opr(opr)
1389     , _type(T_ILLEGAL)
1390     , _patch(lir_patch_none)           { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1391 
1392   LIR_Opr in_opr()           const               { return _opr;   }
1393   LIR_PatchCode patch_code() const               { return _patch; }
1394   BasicType type()           const               { return _type;  }
1395 
1396   LIR_MoveKind move_kind() const {
1397     assert(code() == lir_move, "must be");
1398     return (LIR_MoveKind)_flags;
1399   }
1400 
1401   virtual bool is_patching() { return _patch != lir_patch_none; }
1402   virtual void emit_code(LIR_Assembler* masm);
1403   virtual LIR_Op1* as_Op1() { return this; }
1404   virtual const char * name() const PRODUCT_RETURN_NULL;
1405 
1406   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1407 
1408   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1409   virtual void verify() const;
1410 };
1411 
1412 
1413 // for runtime calls
1414 class LIR_OpRTCall: public LIR_OpCall {
1415  friend class LIR_OpVisitState;
1416 
1417  private:
1418   LIR_Opr _tmp;
1419  public:
1420   LIR_OpRTCall(address addr, LIR_Opr tmp,
1421                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = nullptr)
1422     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1423     , _tmp(tmp) {}
1424 
1425   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1426   virtual void emit_code(LIR_Assembler* masm);
1427   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1428 
1429   LIR_Opr tmp() const                            { return _tmp; }
1430 
1431   virtual void verify() const;
1432 };
1433 
1434 
1435 
1436 class LIR_OpReturn: public LIR_Op1 {
1437  friend class LIR_OpVisitState;
1438 
1439  private:
1440   C1SafepointPollStub* _stub;
1441 
1442  public:
1443   LIR_OpReturn(LIR_Opr opr);
1444 
1445   C1SafepointPollStub* stub() const { return _stub; }
1446   virtual LIR_OpReturn* as_OpReturn() { return this; }
1447 };
1448 
1449 class ConversionStub;
1450 
1451 class LIR_OpConvert: public LIR_Op1 {
1452  friend class LIR_OpVisitState;
1453 
1454  private:
1455    Bytecodes::Code _bytecode;
1456    ConversionStub* _stub;
1457 
1458  public:
1459    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1460      : LIR_Op1(lir_convert, opr, result)
1461      , _bytecode(code)
1462      , _stub(stub)                               {}
1463 
1464   Bytecodes::Code bytecode() const               { return _bytecode; }
1465   ConversionStub* stub() const                   { return _stub; }
1466 
1467   virtual void emit_code(LIR_Assembler* masm);
1468   virtual LIR_OpConvert* as_OpConvert() { return this; }
1469   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1470 
1471   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1472 };
1473 
1474 
1475 // LIR_OpAllocObj
1476 class LIR_OpAllocObj : public LIR_Op1 {
1477  friend class LIR_OpVisitState;
1478 
1479  private:
1480   LIR_Opr _tmp1;
1481   LIR_Opr _tmp2;
1482   LIR_Opr _tmp3;
1483   LIR_Opr _tmp4;
1484   int     _hdr_size;
1485   int     _obj_size;
1486   CodeStub* _stub;
1487   bool    _init_check;
1488 
1489  public:
1490   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1491                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1492                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1493     : LIR_Op1(lir_alloc_object, klass, result)
1494     , _tmp1(t1)
1495     , _tmp2(t2)
1496     , _tmp3(t3)
1497     , _tmp4(t4)
1498     , _hdr_size(hdr_size)
1499     , _obj_size(obj_size)
1500     , _stub(stub)
1501     , _init_check(init_check)                    { }
1502 
1503   LIR_Opr klass()        const                   { return in_opr();     }
1504   LIR_Opr obj()          const                   { return result_opr(); }
1505   LIR_Opr tmp1()         const                   { return _tmp1;        }
1506   LIR_Opr tmp2()         const                   { return _tmp2;        }
1507   LIR_Opr tmp3()         const                   { return _tmp3;        }
1508   LIR_Opr tmp4()         const                   { return _tmp4;        }
1509   int     header_size()  const                   { return _hdr_size;    }
1510   int     object_size()  const                   { return _obj_size;    }
1511   bool    init_check()   const                   { return _init_check;  }
1512   CodeStub* stub()       const                   { return _stub;        }
1513 
1514   virtual void emit_code(LIR_Assembler* masm);
1515   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1516   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1517 };
1518 
1519 
1520 // LIR_OpRoundFP
1521 class LIR_OpRoundFP : public LIR_Op1 {
1522  friend class LIR_OpVisitState;
1523 
1524  private:
1525   LIR_Opr _tmp;
1526 
1527  public:
1528   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1529     : LIR_Op1(lir_roundfp, reg, result)
1530     , _tmp(stack_loc_temp) {}
1531 
1532   LIR_Opr tmp() const                            { return _tmp; }
1533   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1534   void print_instr(outputStream* out) const PRODUCT_RETURN;
1535 };
1536 
1537 // LIR_OpTypeCheck
1538 class LIR_OpTypeCheck: public LIR_Op {
1539  friend class LIR_OpVisitState;
1540 
1541  private:
1542   LIR_Opr       _object;
1543   LIR_Opr       _array;
1544   ciKlass*      _klass;
1545   LIR_Opr       _tmp1;
1546   LIR_Opr       _tmp2;
1547   LIR_Opr       _tmp3;
1548   CodeEmitInfo* _info_for_patch;
1549   CodeEmitInfo* _info_for_exception;
1550   CodeStub*     _stub;
1551   ciMethod*     _profiled_method;
1552   int           _profiled_bci;
1553   bool          _should_profile;
1554   bool          _fast_check;
1555 
1556 public:
1557   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1558                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1559                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1560   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1561                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1562 
1563   LIR_Opr object() const                         { return _object;         }
1564   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1565   LIR_Opr tmp1() const                           { return _tmp1;           }
1566   LIR_Opr tmp2() const                           { return _tmp2;           }
1567   LIR_Opr tmp3() const                           { return _tmp3;           }
1568   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1569   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1570   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1571   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1572   CodeStub* stub() const                         { return _stub;           }
1573 
1574   // MethodData* profiling
1575   void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1576   void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1577   void set_should_profile(bool b)                { _should_profile = b;       }
1578   ciMethod* profiled_method() const              { return _profiled_method;   }
1579   int       profiled_bci() const                 { return _profiled_bci;      }
1580   bool      should_profile() const               { return _should_profile;    }
1581 
1582   virtual bool is_patching() { return _info_for_patch != nullptr; }
1583   virtual void emit_code(LIR_Assembler* masm);
1584   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1585   void print_instr(outputStream* out) const PRODUCT_RETURN;
1586 };
1587 
1588 // LIR_Op2
1589 class LIR_Op2: public LIR_Op {
1590  friend class LIR_OpVisitState;
1591 
1592   int  _fpu_stack_size; // for sin/cos implementation on Intel
1593 
1594  protected:
1595   LIR_Opr   _opr1;
1596   LIR_Opr   _opr2;
1597   LIR_Opr   _tmp1;
1598   LIR_Opr   _tmp2;
1599   LIR_Opr   _tmp3;
1600   LIR_Opr   _tmp4;
1601   LIR_Opr   _tmp5;
1602   LIR_Condition _condition;
1603   BasicType _type;
1604 
1605   void verify() const;
1606 
1607  public:
1608   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = nullptr, BasicType type = T_ILLEGAL)
1609     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1610     , _fpu_stack_size(0)
1611     , _opr1(opr1)
1612     , _opr2(opr2)
1613     , _tmp1(LIR_OprFact::illegalOpr)
1614     , _tmp2(LIR_OprFact::illegalOpr)
1615     , _tmp3(LIR_OprFact::illegalOpr)
1616     , _tmp4(LIR_OprFact::illegalOpr)
1617     , _tmp5(LIR_OprFact::illegalOpr)
1618     , _condition(condition)
1619     , _type(type) {
1620     assert(code == lir_cmp || code == lir_branch || code == lir_cond_float_branch || code == lir_assert, "code check");
1621   }
1622 
1623   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1624     : LIR_Op(code, result, nullptr)
1625     , _fpu_stack_size(0)
1626     , _opr1(opr1)
1627     , _opr2(opr2)
1628     , _tmp1(LIR_OprFact::illegalOpr)
1629     , _tmp2(LIR_OprFact::illegalOpr)
1630     , _tmp3(LIR_OprFact::illegalOpr)
1631     , _tmp4(LIR_OprFact::illegalOpr)
1632     , _tmp5(LIR_OprFact::illegalOpr)
1633     , _condition(condition)
1634     , _type(type) {
1635     assert(code == lir_cmove, "code check");
1636     assert(type != T_ILLEGAL, "cmove should have type");
1637   }
1638 
1639   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1640           CodeEmitInfo* info = nullptr, BasicType type = T_ILLEGAL)
1641     : LIR_Op(code, result, info)
1642     , _fpu_stack_size(0)
1643     , _opr1(opr1)
1644     , _opr2(opr2)
1645     , _tmp1(LIR_OprFact::illegalOpr)
1646     , _tmp2(LIR_OprFact::illegalOpr)
1647     , _tmp3(LIR_OprFact::illegalOpr)
1648     , _tmp4(LIR_OprFact::illegalOpr)
1649     , _tmp5(LIR_OprFact::illegalOpr)
1650     , _condition(lir_cond_unknown)
1651     , _type(type) {
1652     assert(code != lir_cmp && code != lir_branch && code != lir_cond_float_branch && is_in_range(code, begin_op2, end_op2), "code check");
1653   }
1654 
1655   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1656           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1657     : LIR_Op(code, result, nullptr)
1658     , _fpu_stack_size(0)
1659     , _opr1(opr1)
1660     , _opr2(opr2)
1661     , _tmp1(tmp1)
1662     , _tmp2(tmp2)
1663     , _tmp3(tmp3)
1664     , _tmp4(tmp4)
1665     , _tmp5(tmp5)
1666     , _condition(lir_cond_unknown)
1667     , _type(T_ILLEGAL)    {
1668     assert(code != lir_cmp && code != lir_branch && code != lir_cond_float_branch && is_in_range(code, begin_op2, end_op2), "code check");
1669   }
1670 
1671   LIR_Opr in_opr1() const                        { return _opr1; }
1672   LIR_Opr in_opr2() const                        { return _opr2; }
1673   BasicType type()  const                        { return _type; }
1674   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1675   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1676   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1677   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1678   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1679   LIR_Condition condition() const  {
1680     assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch || code() == lir_assert, "only valid for branch and assert"); return _condition;
1681   }
1682   void set_condition(LIR_Condition condition) {
1683     assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch, "only valid for branch"); _condition = condition;
1684   }
1685 
1686   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1687   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1688 
1689   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1690   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1691 
1692   virtual void emit_code(LIR_Assembler* masm);
1693   virtual LIR_Op2* as_Op2() { return this; }
1694   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1695 };
1696 
1697 class LIR_OpBranch: public LIR_Op2 {
1698  friend class LIR_OpVisitState;
1699 
1700  private:
1701   Label*        _label;
1702   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1703   BlockBegin*   _ublock; // if this is a float-branch, this is the unordered block
1704   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1705 
1706  public:
1707   LIR_OpBranch(LIR_Condition cond, Label* lbl)
1708     : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*) nullptr)
1709     , _label(lbl)
1710     , _block(nullptr)
1711     , _ublock(nullptr)
1712     , _stub(nullptr) { }
1713 
1714   LIR_OpBranch(LIR_Condition cond, BlockBegin* block);
1715   LIR_OpBranch(LIR_Condition cond, CodeStub* stub);
1716 
1717   // for unordered comparisons
1718   LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock);
1719 
1720   LIR_Condition cond() const {
1721     return condition();
1722   }
1723 
1724   void set_cond(LIR_Condition cond) {
1725     set_condition(cond);
1726   }
1727 
1728   Label*        label()       const              { return _label;       }
1729   BlockBegin*   block()       const              { return _block;       }
1730   BlockBegin*   ublock()      const              { return _ublock;      }
1731   CodeStub*     stub()        const              { return _stub;        }
1732 
1733   void          change_block(BlockBegin* b);
1734   void          change_ublock(BlockBegin* b);
1735   void          negate_cond();
1736 
1737   virtual void emit_code(LIR_Assembler* masm);
1738   virtual LIR_OpBranch* as_OpBranch() { return this; }
1739   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1740 };
1741 
1742 class LIR_OpAllocArray : public LIR_Op {
1743  friend class LIR_OpVisitState;
1744 
1745  private:
1746   LIR_Opr   _klass;
1747   LIR_Opr   _len;
1748   LIR_Opr   _tmp1;
1749   LIR_Opr   _tmp2;
1750   LIR_Opr   _tmp3;
1751   LIR_Opr   _tmp4;
1752   CodeStub* _stub;
1753   BasicType _type;
1754   bool      _zero_array;
1755 
1756  public:
1757   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub, bool zero_array)
1758     : LIR_Op(lir_alloc_array, result, nullptr)
1759     , _klass(klass)
1760     , _len(len)
1761     , _tmp1(t1)
1762     , _tmp2(t2)
1763     , _tmp3(t3)
1764     , _tmp4(t4)
1765     , _stub(stub)
1766     , _type(type)
1767     , _zero_array(zero_array) {}
1768 
1769   LIR_Opr   klass()   const                      { return _klass;       }
1770   LIR_Opr   len()     const                      { return _len;         }
1771   LIR_Opr   obj()     const                      { return result_opr(); }
1772   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1773   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1774   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1775   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1776   BasicType type()    const                      { return _type;        }
1777   CodeStub* stub()    const                      { return _stub;        }
1778   bool zero_array()   const                      { return _zero_array;  }
1779 
1780   virtual void emit_code(LIR_Assembler* masm);
1781   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1782   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1783 };
1784 
1785 
1786 class LIR_Op3: public LIR_Op {
1787  friend class LIR_OpVisitState;
1788 
1789  private:
1790   LIR_Opr _opr1;
1791   LIR_Opr _opr2;
1792   LIR_Opr _opr3;
1793  public:
1794   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = nullptr)
1795     : LIR_Op(code, result, info)
1796     , _opr1(opr1)
1797     , _opr2(opr2)
1798     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1799   LIR_Opr in_opr1() const                        { return _opr1; }
1800   LIR_Opr in_opr2() const                        { return _opr2; }
1801   LIR_Opr in_opr3() const                        { return _opr3; }
1802 
1803   virtual void emit_code(LIR_Assembler* masm);
1804   virtual LIR_Op3* as_Op3() { return this; }
1805   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1806 };
1807 
1808 class LIR_Op4: public LIR_Op {
1809   friend class LIR_OpVisitState;
1810  protected:
1811   LIR_Opr   _opr1;
1812   LIR_Opr   _opr2;
1813   LIR_Opr   _opr3;
1814   LIR_Opr   _opr4;
1815   LIR_Opr   _tmp1;
1816   LIR_Opr   _tmp2;
1817   LIR_Opr   _tmp3;
1818   LIR_Opr   _tmp4;
1819   LIR_Opr   _tmp5;
1820   LIR_Condition _condition;
1821   BasicType _type;
1822 
1823  public:
1824   LIR_Op4(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr opr4,
1825           LIR_Opr result, BasicType type)
1826     : LIR_Op(code, result, nullptr)
1827     , _opr1(opr1)
1828     , _opr2(opr2)
1829     , _opr3(opr3)
1830     , _opr4(opr4)
1831     , _tmp1(LIR_OprFact::illegalOpr)
1832     , _tmp2(LIR_OprFact::illegalOpr)
1833     , _tmp3(LIR_OprFact::illegalOpr)
1834     , _tmp4(LIR_OprFact::illegalOpr)
1835     , _tmp5(LIR_OprFact::illegalOpr)
1836     , _condition(condition)
1837     , _type(type) {
1838     assert(code == lir_cmove, "code check");
1839     assert(type != T_ILLEGAL, "cmove should have type");
1840   }
1841 
1842   LIR_Opr in_opr1() const                        { return _opr1; }
1843   LIR_Opr in_opr2() const                        { return _opr2; }
1844   LIR_Opr in_opr3() const                        { return _opr3; }
1845   LIR_Opr in_opr4() const                        { return _opr4; }
1846   BasicType type()  const                        { return _type; }
1847   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1848   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1849   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1850   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1851   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1852 
1853   LIR_Condition condition() const                { return _condition; }
1854   void set_condition(LIR_Condition condition)    { _condition = condition; }
1855 
1856   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1857   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1858   void set_in_opr3(LIR_Opr opr)                  { _opr3 = opr; }
1859   void set_in_opr4(LIR_Opr opr)                  { _opr4 = opr; }
1860   virtual void emit_code(LIR_Assembler* masm);
1861   virtual LIR_Op4* as_Op4() { return this; }
1862 
1863   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1864 };
1865 
1866 //--------------------------------
1867 class LabelObj: public CompilationResourceObj {
1868  private:
1869   Label _label;
1870  public:
1871   LabelObj()                                     {}
1872   Label* label()                                 { return &_label; }
1873 };
1874 
1875 
1876 class LIR_OpLock: public LIR_Op {
1877  friend class LIR_OpVisitState;
1878 
1879  private:
1880   LIR_Opr _hdr;
1881   LIR_Opr _obj;
1882   LIR_Opr _lock;
1883   LIR_Opr _scratch;
1884   CodeStub* _stub;
1885  public:
1886   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1887     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1888     , _hdr(hdr)
1889     , _obj(obj)
1890     , _lock(lock)
1891     , _scratch(scratch)
1892     , _stub(stub)                      {}
1893 
1894   LIR_Opr hdr_opr() const                        { return _hdr; }
1895   LIR_Opr obj_opr() const                        { return _obj; }
1896   LIR_Opr lock_opr() const                       { return _lock; }
1897   LIR_Opr scratch_opr() const                    { return _scratch; }
1898   CodeStub* stub() const                         { return _stub; }
1899 
1900   virtual void emit_code(LIR_Assembler* masm);
1901   virtual LIR_OpLock* as_OpLock() { return this; }
1902   void print_instr(outputStream* out) const PRODUCT_RETURN;
1903 };
1904 
1905 class LIR_OpLoadKlass: public LIR_Op {
1906   friend class LIR_OpVisitState;
1907 
1908  private:
1909   LIR_Opr _obj;
1910  public:
1911   LIR_OpLoadKlass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info)
1912     : LIR_Op(lir_load_klass, result, info)
1913     , _obj(obj)
1914     {}
1915 
1916   LIR_Opr obj()        const { return _obj;  }
1917 
1918   virtual LIR_OpLoadKlass* as_OpLoadKlass() { return this; }
1919   virtual void emit_code(LIR_Assembler* masm);
1920   void print_instr(outputStream* out) const PRODUCT_RETURN;
1921 };
1922 
1923 class LIR_OpDelay: public LIR_Op {
1924  friend class LIR_OpVisitState;
1925 
1926  private:
1927   LIR_Op* _op;
1928 
1929  public:
1930   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1931     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1932     _op(op) {
1933     assert(op->code() == lir_nop, "should be filling with nops");
1934   }
1935   virtual void emit_code(LIR_Assembler* masm);
1936   virtual LIR_OpDelay* as_OpDelay() { return this; }
1937   void print_instr(outputStream* out) const PRODUCT_RETURN;
1938   LIR_Op* delay_op() const { return _op; }
1939   CodeEmitInfo* call_info() const { return info(); }
1940 };
1941 
1942 #ifdef ASSERT
1943 // LIR_OpAssert
1944 class LIR_OpAssert : public LIR_Op2 {
1945  friend class LIR_OpVisitState;
1946 
1947  private:
1948   const char* _msg;
1949   bool        _halt;
1950 
1951  public:
1952   LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1953     : LIR_Op2(lir_assert, condition, opr1, opr2)
1954     , _msg(msg)
1955     , _halt(halt) {
1956   }
1957 
1958   const char* msg() const                        { return _msg; }
1959   bool        halt() const                       { return _halt; }
1960 
1961   virtual void emit_code(LIR_Assembler* masm);
1962   virtual LIR_OpAssert* as_OpAssert()            { return this; }
1963   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1964 };
1965 #endif
1966 
1967 // LIR_OpCompareAndSwap
1968 class LIR_OpCompareAndSwap : public LIR_Op {
1969  friend class LIR_OpVisitState;
1970 
1971  private:
1972   LIR_Opr _addr;
1973   LIR_Opr _cmp_value;
1974   LIR_Opr _new_value;
1975   LIR_Opr _tmp1;
1976   LIR_Opr _tmp2;
1977 
1978  public:
1979   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1980                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1981     : LIR_Op(code, result, nullptr)  // no result, no info
1982     , _addr(addr)
1983     , _cmp_value(cmp_value)
1984     , _new_value(new_value)
1985     , _tmp1(t1)
1986     , _tmp2(t2)                                  { }
1987 
1988   LIR_Opr addr()        const                    { return _addr;  }
1989   LIR_Opr cmp_value()   const                    { return _cmp_value; }
1990   LIR_Opr new_value()   const                    { return _new_value; }
1991   LIR_Opr tmp1()        const                    { return _tmp1;      }
1992   LIR_Opr tmp2()        const                    { return _tmp2;      }
1993 
1994   virtual void emit_code(LIR_Assembler* masm);
1995   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1996   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1997 };
1998 
1999 // LIR_OpProfileCall
2000 class LIR_OpProfileCall : public LIR_Op {
2001  friend class LIR_OpVisitState;
2002 
2003  private:
2004   ciMethod* _profiled_method;
2005   int       _profiled_bci;
2006   ciMethod* _profiled_callee;
2007   LIR_Opr   _mdo;
2008   LIR_Opr   _recv;
2009   LIR_Opr   _tmp1;
2010   ciKlass*  _known_holder;
2011 
2012  public:
2013   // Destroys recv
2014   LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
2015     : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, nullptr)  // no result, no info
2016     , _profiled_method(profiled_method)
2017     , _profiled_bci(profiled_bci)
2018     , _profiled_callee(profiled_callee)
2019     , _mdo(mdo)
2020     , _recv(recv)
2021     , _tmp1(t1)
2022     , _known_holder(known_holder)                { }
2023 
2024   ciMethod* profiled_method() const              { return _profiled_method;  }
2025   int       profiled_bci()    const              { return _profiled_bci;     }
2026   ciMethod* profiled_callee() const              { return _profiled_callee;  }
2027   LIR_Opr   mdo()             const              { return _mdo;              }
2028   LIR_Opr   recv()            const              { return _recv;             }
2029   LIR_Opr   tmp1()            const              { return _tmp1;             }
2030   ciKlass*  known_holder()    const              { return _known_holder;     }
2031 
2032   virtual void emit_code(LIR_Assembler* masm);
2033   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
2034   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2035   bool should_profile_receiver_type() const {
2036     bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
2037     bool callee_is_private = _profiled_callee->is_loaded() && _profiled_callee->is_private();
2038     Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
2039     bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !callee_is_private) || bc == Bytecodes::_invokeinterface;
2040     return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
2041   }
2042 };
2043 
2044 // LIR_OpProfileType
2045 class LIR_OpProfileType : public LIR_Op {
2046  friend class LIR_OpVisitState;
2047 
2048  private:
2049   LIR_Opr      _mdp;
2050   LIR_Opr      _obj;
2051   LIR_Opr      _tmp;
2052   ciKlass*     _exact_klass;   // non null if we know the klass statically (no need to load it from _obj)
2053   intptr_t     _current_klass; // what the profiling currently reports
2054   bool         _not_null;      // true if we know statically that _obj cannot be null
2055   bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not null and we know
2056                                // _exact_klass it the only possible type for this parameter in any context.
2057 
2058  public:
2059   // Destroys recv
2060   LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
2061     : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, nullptr)  // no result, no info
2062     , _mdp(mdp)
2063     , _obj(obj)
2064     , _tmp(tmp)
2065     , _exact_klass(exact_klass)
2066     , _current_klass(current_klass)
2067     , _not_null(not_null)
2068     , _no_conflict(no_conflict) { }
2069 
2070   LIR_Opr      mdp()              const             { return _mdp;              }
2071   LIR_Opr      obj()              const             { return _obj;              }
2072   LIR_Opr      tmp()              const             { return _tmp;              }
2073   ciKlass*     exact_klass()      const             { return _exact_klass;      }
2074   intptr_t     current_klass()    const             { return _current_klass;    }
2075   bool         not_null()         const             { return _not_null;         }
2076   bool         no_conflict()      const             { return _no_conflict;      }
2077 
2078   virtual void emit_code(LIR_Assembler* masm);
2079   virtual LIR_OpProfileType* as_OpProfileType() { return this; }
2080   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2081 };
2082 
2083 class LIR_InsertionBuffer;
2084 
2085 //--------------------------------LIR_List---------------------------------------------------
2086 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
2087 // The LIR instructions are appended by the LIR_List class itself;
2088 //
2089 // Notes:
2090 // - all offsets are(should be) in bytes
2091 // - local positions are specified with an offset, with offset 0 being local 0
2092 
2093 class LIR_List: public CompilationResourceObj {
2094  private:
2095   LIR_OpList  _operations;
2096 
2097   Compilation*  _compilation;
2098 #ifndef PRODUCT
2099   BlockBegin*   _block;
2100 #endif
2101 #ifdef ASSERT
2102   const char *  _file;
2103   int           _line;
2104 #endif
2105 #ifdef RISCV
2106   LIR_Opr       _cmp_opr1;
2107   LIR_Opr       _cmp_opr2;
2108 #endif
2109 
2110  public:
2111   void append(LIR_Op* op) {
2112     if (op->source() == nullptr)
2113       op->set_source(_compilation->current_instruction());
2114 #ifndef PRODUCT
2115     if (PrintIRWithLIR) {
2116       _compilation->maybe_print_current_instruction();
2117       op->print(); tty->cr();
2118     }
2119 #endif // PRODUCT
2120 
2121 #ifdef RISCV
2122     set_cmp_oprs(op);
2123     // lir_cmp set cmp oprs only on riscv
2124     if (op->code() == lir_cmp) return;
2125 #endif
2126 
2127     _operations.append(op);
2128 
2129 #ifdef ASSERT
2130     op->verify();
2131     op->set_file_and_line(_file, _line);
2132     _file = nullptr;
2133     _line = 0;
2134 #endif
2135   }
2136 
2137   LIR_List(Compilation* compilation, BlockBegin* block = nullptr);
2138 
2139 #ifdef ASSERT
2140   void set_file_and_line(const char * file, int line);
2141 #endif
2142 
2143 #ifdef RISCV
2144   void set_cmp_oprs(LIR_Op* op);
2145 #endif
2146 
2147   //---------- accessors ---------------
2148   LIR_OpList* instructions_list()                { return &_operations; }
2149   int         length() const                     { return _operations.length(); }
2150   LIR_Op*     at(int i) const                    { return _operations.at(i); }
2151 
2152   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
2153 
2154   // insert LIR_Ops in buffer to right places in LIR_List
2155   void append(LIR_InsertionBuffer* buffer);
2156 
2157   //---------- mutators ---------------
2158   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
2159   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
2160   void remove_at(int i)                          { _operations.remove_at(i); }
2161 
2162   //---------- printing -------------
2163   void print_instructions() PRODUCT_RETURN;
2164 
2165 
2166   //---------- instructions -------------
2167   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2168                         address dest, LIR_OprList* arguments,
2169                         CodeEmitInfo* info) {
2170     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2171   }
2172   void call_static(ciMethod* method, LIR_Opr result,
2173                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2174     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2175   }
2176   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2177                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2178     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2179   }
2180   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2181                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2182     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2183   }
2184 
2185   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
2186   void membar()                                  { append(new LIR_Op0(lir_membar)); }
2187   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
2188   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
2189   void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
2190   void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
2191   void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
2192   void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
2193 
2194   void nop()                                     { append(new LIR_Op0(lir_nop)); }
2195 
2196   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
2197   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2198 
2199   void on_spin_wait()                            { append(new LIR_Op0(lir_on_spin_wait)); }
2200 
2201   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
2202 
2203   void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = nullptr) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
2204 
2205   // result is a stack location for old backend and vreg for UseLinearScan
2206   // stack_loc_temp is an illegal register for old backend
2207   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2208   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = nullptr) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2209   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = nullptr) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2210   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = nullptr) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2211   void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = nullptr) {
2212     if (UseCompressedOops) {
2213       append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2214     } else {
2215       move(src, dst, info);
2216     }
2217   }
2218   void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = nullptr) {
2219     if (UseCompressedOops) {
2220       append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2221     } else {
2222       move(src, dst, info);
2223     }
2224   }
2225   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = nullptr, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2226 
2227   void oop2reg  (jobject o, LIR_Opr reg)         { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
2228   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2229 
2230   void metadata2reg  (Metadata* o, LIR_Opr reg)  { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg));   }
2231   void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2232 
2233   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2234   void return_op(LIR_Opr result)                   { append(new LIR_OpReturn(result)); }
2235 
2236   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = nullptr/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2237 
2238   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2239   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2240   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2241 
2242   void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
2243   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2244     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2245   }
2246   void unwind_exception(LIR_Opr exceptionOop) {
2247     append(new LIR_Op1(lir_unwind, exceptionOop));
2248   }
2249 
2250   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2251   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2252 
2253   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = nullptr) {
2254     append(new LIR_Op2(lir_cmp, condition, left, right, info));
2255   }
2256   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = nullptr) {
2257     cmp(condition, left, LIR_OprFact::intConst(right), info);
2258   }
2259 
2260   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2261   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2262 
2263   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type,
2264              LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr) {
2265     append(new LIR_Op4(lir_cmove, condition, src1, src2, cmp_opr1, cmp_opr2, dst, type));
2266   }
2267 
2268   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2269                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2270   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2271                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2272   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2273                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2274 
2275   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2276   void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr)              { append(new LIR_Op2(lir_neg, from, tmp, to)); }
2277   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2278   void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
2279   void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }
2280   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2281   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2282   void f2hf(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_f2hf, from, tmp, to)); }
2283   void hf2f(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_hf2f, from, tmp, to)); }
2284 
2285   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2286   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = nullptr) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2287   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2288   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul, left, right, res, tmp)); }
2289   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = nullptr)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2290   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div, left, right, res, tmp)); }
2291   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = nullptr)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2292 
2293   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2294   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2295 
2296   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = nullptr, LIR_PatchCode patch_code = lir_patch_none);
2297 
2298   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2299   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2300   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = nullptr, LIR_PatchCode patch_code = lir_patch_none);
2301   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2302   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2303 
2304   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2305   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2306   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2307   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2308 
2309   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2310   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array = true);
2311 
2312   // jump is an unconditional branch
2313   void jump(BlockBegin* block) {
2314     append(new LIR_OpBranch(lir_cond_always, block));
2315   }
2316   void jump(CodeStub* stub) {
2317     append(new LIR_OpBranch(lir_cond_always, stub));
2318   }
2319   void branch(LIR_Condition cond, Label* lbl) {
2320     append(new LIR_OpBranch(cond, lbl));
2321   }
2322   // Should not be used for fp comparisons
2323   void branch(LIR_Condition cond, BlockBegin* block) {
2324     append(new LIR_OpBranch(cond, block));
2325   }
2326   // Should not be used for fp comparisons
2327   void branch(LIR_Condition cond, CodeStub* stub) {
2328     append(new LIR_OpBranch(cond, stub));
2329   }
2330   // Should only be used for fp comparisons
2331   void branch(LIR_Condition cond, BlockBegin* block, BlockBegin* unordered) {
2332     append(new LIR_OpBranch(cond, block, unordered));
2333   }
2334 
2335   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2336   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2337   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2338 
2339   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2340   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2341   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2342 
2343   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2344   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2345 
2346   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2347     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2348   }
2349 
2350   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2351                     LIR_OprList* arguments, CodeEmitInfo* info) {
2352     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2353   }
2354 
2355   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2356   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2357   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2358 
2359   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2360 
2361   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2362 
2363   void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)  { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2364 
2365   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2366   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2367 
2368   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2369                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2370                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2371                   ciMethod* profiled_method, int profiled_bci);
2372   // MethodData* profiling
2373   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2374     append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2375   }
2376   void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2377     append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2378   }
2379 
2380   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2381   void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2382 
2383   void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) { append(new LIR_OpLoadKlass(obj, result, info)); }
2384 
2385 #ifdef ASSERT
2386   void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2387 #endif
2388 };
2389 
2390 void print_LIR(BlockList* blocks);
2391 
2392 class LIR_InsertionBuffer : public CompilationResourceObj {
2393  private:
2394   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (null when uninitialized)
2395 
2396   // list of insertion points. index and count are stored alternately:
2397   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2398   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2399   intStack    _index_and_count;
2400 
2401   // the LIR_Ops to be inserted
2402   LIR_OpList  _ops;
2403 
2404   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2405   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2406   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2407 
2408 #ifdef ASSERT
2409   void verify();
2410 #endif
2411  public:
2412   LIR_InsertionBuffer() : _lir(nullptr), _index_and_count(8), _ops(8) { }
2413 
2414   // must be called before using the insertion buffer
2415   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2416   bool initialized() const  { return _lir != nullptr; }
2417   // called automatically when the buffer is appended to the LIR_List
2418   void finish()             { _lir = nullptr; }
2419 
2420   // accessors
2421   LIR_List*  lir_list() const             { return _lir; }
2422   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2423   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2424   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2425 
2426   int number_of_ops() const               { return _ops.length(); }
2427   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2428 
2429   // append an instruction to the buffer
2430   void append(int index, LIR_Op* op);
2431 
2432   // instruction
2433   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = nullptr) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2434 };
2435 
2436 
2437 //
2438 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2439 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2440 // information about the input, output and temporaries used by the
2441 // op to be recorded.  It also records whether the op has call semantics
2442 // and also records all the CodeEmitInfos used by this op.
2443 //
2444 
2445 
2446 class LIR_OpVisitState: public StackObj {
2447  public:
2448   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2449 
2450   enum {
2451     maxNumberOfOperands = 21,
2452     maxNumberOfInfos = 4
2453   };
2454 
2455  private:
2456   LIR_Op*          _op;
2457 
2458   // optimization: the operands and infos are not stored in a variable-length
2459   //               list, but in a fixed-size array to save time of size checks and resizing
2460   int              _oprs_len[numModes];
2461   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2462   int _info_len;
2463   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2464 
2465   bool             _has_call;
2466   bool             _has_slow_case;
2467 
2468 
2469   // only include register operands
2470   // addresses are decomposed to the base and index registers
2471   // constants and stack operands are ignored
2472   void append(LIR_Opr& opr, OprMode mode) {
2473     assert(opr->is_valid(), "should not call this otherwise");
2474     assert(mode >= 0 && mode < numModes, "bad mode");
2475 
2476     if (opr->is_register()) {
2477        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2478       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2479 
2480     } else if (opr->is_pointer()) {
2481       LIR_Address* address = opr->as_address_ptr();
2482       if (address != nullptr) {
2483         // special handling for addresses: add base and index register of the address
2484         // both are always input operands or temp if we want to extend
2485         // their liveness!
2486         if (mode == outputMode) {
2487           mode = inputMode;
2488         }
2489         assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2490         if (address->_base->is_valid()) {
2491           assert(address->_base->is_register(), "must be");
2492           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2493           _oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2494         }
2495         if (address->_index->is_valid()) {
2496           assert(address->_index->is_register(), "must be");
2497           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2498           _oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2499         }
2500 
2501       } else {
2502         assert(opr->is_constant(), "constant operands are not processed");
2503       }
2504     } else {
2505       assert(opr->is_stack(), "stack operands are not processed");
2506     }
2507   }
2508 
2509   void append(CodeEmitInfo* info) {
2510     assert(info != nullptr, "should not call this otherwise");
2511     assert(_info_len < maxNumberOfInfos, "array overflow");
2512     _info_new[_info_len++] = info;
2513   }
2514 
2515  public:
2516   LIR_OpVisitState()         { reset(); }
2517 
2518   LIR_Op* op() const         { return _op; }
2519   void set_op(LIR_Op* op)    { reset(); _op = op; }
2520 
2521   bool has_call() const      { return _has_call; }
2522   bool has_slow_case() const { return _has_slow_case; }
2523 
2524   void reset() {
2525     _op = nullptr;
2526     _has_call = false;
2527     _has_slow_case = false;
2528 
2529     _oprs_len[inputMode] = 0;
2530     _oprs_len[tempMode] = 0;
2531     _oprs_len[outputMode] = 0;
2532     _info_len = 0;
2533   }
2534 
2535 
2536   int opr_count(OprMode mode) const {
2537     assert(mode >= 0 && mode < numModes, "bad mode");
2538     return _oprs_len[mode];
2539   }
2540 
2541   LIR_Opr opr_at(OprMode mode, int index) const {
2542     assert(mode >= 0 && mode < numModes, "bad mode");
2543     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2544     return *_oprs_new[mode][index];
2545   }
2546 
2547   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2548     assert(mode >= 0 && mode < numModes, "bad mode");
2549     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2550     *_oprs_new[mode][index] = opr;
2551   }
2552 
2553   int info_count() const {
2554     return _info_len;
2555   }
2556 
2557   CodeEmitInfo* info_at(int index) const {
2558     assert(index < _info_len, "index out of bounds");
2559     return _info_new[index];
2560   }
2561 
2562   XHandlers* all_xhandler();
2563 
2564   // collects all register operands of the instruction
2565   void visit(LIR_Op* op);
2566 
2567 #ifdef ASSERT
2568   // check that an operation has no operands
2569   bool no_operands(LIR_Op* op);
2570 #endif
2571 
2572   // LIR_Op visitor functions use these to fill in the state
2573   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2574   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2575   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2576   void do_info(CodeEmitInfo* info)        { append(info); }
2577 
2578   void do_stub(CodeStub* stub);
2579   void do_call()                          { _has_call = true; }
2580   void do_slow_case()                     { _has_slow_case = true; }
2581   void do_slow_case(CodeEmitInfo* info) {
2582     _has_slow_case = true;
2583     append(info);
2584   }
2585 };
2586 
2587 
2588 inline LIR_Opr LIR_Opr::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2589 
2590 inline LIR_Opr LIR_Opr::nullOpr()   { return LIR_OprFact::nullOpr; };
2591 
2592 #endif // SHARE_C1_C1_LIR_HPP