1 /*
   2  * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_C1_C1_LIR_HPP
  26 #define SHARE_C1_C1_LIR_HPP
  27 
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_ValueType.hpp"
  30 #include "oops/method.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 class BlockBegin;
  34 class BlockList;
  35 class LIR_Assembler;
  36 class CodeEmitInfo;
  37 class CodeStub;
  38 class CodeStubList;
  39 class C1SafepointPollStub;
  40 class ArrayCopyStub;
  41 class LIR_Op;
  42 class ciType;
  43 class ValueType;
  44 class LIR_OpVisitState;
  45 class FpuStackSim;
  46 
  47 //---------------------------------------------------------------------
  48 //                 LIR Operands
  49 //  LIR_OprDesc
  50 //    LIR_OprPtr
  51 //      LIR_Const
  52 //      LIR_Address
  53 //---------------------------------------------------------------------
  54 class LIR_OprDesc;
  55 class LIR_OprPtr;
  56 class LIR_Const;
  57 class LIR_Address;
  58 class LIR_OprVisitor;
  59 
  60 
  61 typedef LIR_OprDesc* LIR_Opr;
  62 typedef int          RegNr;
  63 
  64 typedef GrowableArray<LIR_Opr> LIR_OprList;
  65 typedef GrowableArray<LIR_Op*> LIR_OpArray;
  66 typedef GrowableArray<LIR_Op*> LIR_OpList;
  67 
  68 // define LIR_OprPtr early so LIR_OprDesc can refer to it
  69 class LIR_OprPtr: public CompilationResourceObj {
  70  public:
  71   bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
  72   bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
  73 
  74   virtual LIR_Const*  as_constant()              { return NULL; }
  75   virtual LIR_Address* as_address()              { return NULL; }
  76   virtual BasicType type() const                 = 0;
  77   virtual void print_value_on(outputStream* out) const = 0;
  78 };
  79 
  80 
  81 
  82 // LIR constants
  83 class LIR_Const: public LIR_OprPtr {
  84  private:
  85   JavaValue _value;
  86 
  87   void type_check(BasicType t) const   { assert(type() == t, "type check"); }
  88   void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
  89   void type_check(BasicType t1, BasicType t2, BasicType t3) const   { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
  90 
  91  public:
  92   LIR_Const(jint i, bool is_address=false)       { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
  93   LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
  94   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
  95   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
  96   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
  97   LIR_Const(void* p) {
  98 #ifdef _LP64
  99     assert(sizeof(jlong) >= sizeof(p), "too small");;
 100     _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
 101 #else
 102     assert(sizeof(jint) >= sizeof(p), "too small");;
 103     _value.set_type(T_INT);     _value.set_jint((jint)p);
 104 #endif
 105   }
 106   LIR_Const(Metadata* m) {
 107     _value.set_type(T_METADATA);
 108 #ifdef _LP64
 109     _value.set_jlong((jlong)m);
 110 #else
 111     _value.set_jint((jint)m);
 112 #endif // _LP64
 113   }
 114 
 115   virtual BasicType type()       const { return _value.get_type(); }
 116   virtual LIR_Const* as_constant()     { return this; }
 117 
 118   jint      as_jint()    const         { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
 119   jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
 120   jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
 121   jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
 122   jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
 123   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
 124   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
 125 
 126 #ifdef _LP64
 127   address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
 128   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
 129 #else
 130   address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
 131   Metadata* as_metadata() const        { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
 132 #endif
 133 
 134 
 135   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
 136   jint      as_jint_lo_bits() const    {
 137     if (type() == T_DOUBLE) {
 138       return low(jlong_cast(_value.get_jdouble()));
 139     } else {
 140       return as_jint_lo();
 141     }
 142   }
 143   jint      as_jint_hi_bits() const    {
 144     if (type() == T_DOUBLE) {
 145       return high(jlong_cast(_value.get_jdouble()));
 146     } else {
 147       return as_jint_hi();
 148     }
 149   }
 150   jlong      as_jlong_bits() const    {
 151     if (type() == T_DOUBLE) {
 152       return jlong_cast(_value.get_jdouble());
 153     } else {
 154       return as_jlong();
 155     }
 156   }
 157 
 158   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 159 
 160 
 161   bool is_zero_float() {
 162     jfloat f = as_jfloat();
 163     jfloat ok = 0.0f;
 164     return jint_cast(f) == jint_cast(ok);
 165   }
 166 
 167   bool is_one_float() {
 168     jfloat f = as_jfloat();
 169     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
 170   }
 171 
 172   bool is_zero_double() {
 173     jdouble d = as_jdouble();
 174     jdouble ok = 0.0;
 175     return jlong_cast(d) == jlong_cast(ok);
 176   }
 177 
 178   bool is_one_double() {
 179     jdouble d = as_jdouble();
 180     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
 181   }
 182 };
 183 
 184 
 185 //---------------------LIR Operand descriptor------------------------------------
 186 //
 187 // The class LIR_OprDesc represents a LIR instruction operand;
 188 // it can be a register (ALU/FPU), stack location or a constant;
 189 // Constants and addresses are represented as resource area allocated
 190 // structures (see above).
 191 // Registers and stack locations are inlined into the this pointer
 192 // (see value function).
 193 
 194 class LIR_OprDesc: public CompilationResourceObj {
 195  public:
 196   // value structure:
 197   //     data       opr-type opr-kind
 198   // +--------------+-------+-------+
 199   // [max...........|7 6 5 4|3 2 1 0]
 200   //                               ^
 201   //                         is_pointer bit
 202   //
 203   // lowest bit cleared, means it is a structure pointer
 204   // we need  4 bits to represent types
 205 
 206  private:
 207   friend class LIR_OprFact;
 208 
 209   // Conversion
 210   intptr_t value() const                         { return (intptr_t) this; }
 211 
 212   bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
 213     return (value() & mask) == masked_value;
 214   }
 215 
 216   enum OprKind {
 217       pointer_value      = 0
 218     , stack_value        = 1
 219     , cpu_register       = 3
 220     , fpu_register       = 5
 221     , illegal_value      = 7
 222   };
 223 
 224   enum OprBits {
 225       pointer_bits   = 1
 226     , kind_bits      = 3
 227     , type_bits      = 4
 228     , size_bits      = 2
 229     , destroys_bits  = 1
 230     , virtual_bits   = 1
 231     , is_xmm_bits    = 1
 232     , last_use_bits  = 1
 233     , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
 234     , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + virtual_bits
 235                        + is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits
 236     , data_bits      = BitsPerInt - non_data_bits
 237     , reg_bits       = data_bits / 2      // for two registers in one value encoding
 238   };
 239 
 240   enum OprShift : uintptr_t {
 241       kind_shift     = 0
 242     , type_shift     = kind_shift     + kind_bits
 243     , size_shift     = type_shift     + type_bits
 244     , destroys_shift = size_shift     + size_bits
 245     , last_use_shift = destroys_shift + destroys_bits
 246     , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
 247     , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
 248     , is_xmm_shift   = virtual_shift + virtual_bits
 249     , data_shift     = is_xmm_shift + is_xmm_bits
 250     , reg1_shift = data_shift
 251     , reg2_shift = data_shift + reg_bits
 252 
 253   };
 254 
 255   enum OprSize {
 256       single_size = 0 << size_shift
 257     , double_size = 1 << size_shift
 258   };
 259 
 260   enum OprMask {
 261       kind_mask      = right_n_bits(kind_bits)
 262     , type_mask      = right_n_bits(type_bits) << type_shift
 263     , size_mask      = right_n_bits(size_bits) << size_shift
 264     , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
 265     , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
 266     , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
 267     , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
 268     , pointer_mask   = right_n_bits(pointer_bits)
 269     , lower_reg_mask = right_n_bits(reg_bits)
 270     , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
 271   };
 272 
 273   uint32_t data() const                          { return (uint32_t)value() >> data_shift; }
 274   int lo_reg_half() const                        { return data() & lower_reg_mask; }
 275   int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
 276   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
 277   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
 278 
 279   static char type_char(BasicType t);
 280 
 281  public:
 282   enum {
 283     vreg_base = ConcreteRegisterImpl::number_of_registers,
 284     data_max = (1 << data_bits) - 1,      // max unsigned value for data bit field
 285     vreg_limit =  10000,                  // choose a reasonable limit,
 286     vreg_max = MIN2(vreg_limit, data_max) // and make sure if fits in the bit field
 287   };
 288 
 289   static inline LIR_Opr illegalOpr();
 290 
 291   enum OprType {
 292       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
 293     , int_type      = 1 << type_shift
 294     , long_type     = 2 << type_shift
 295     , object_type   = 3 << type_shift
 296     , address_type  = 4 << type_shift
 297     , float_type    = 5 << type_shift
 298     , double_type   = 6 << type_shift
 299     , metadata_type = 7 << type_shift
 300   };
 301   friend OprType as_OprType(BasicType t);
 302   friend BasicType as_BasicType(OprType t);
 303 
 304   OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
 305   OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
 306 
 307   static OprSize size_for(BasicType t) {
 308     switch (t) {
 309       case T_LONG:
 310       case T_DOUBLE:
 311         return double_size;
 312         break;
 313 
 314       case T_FLOAT:
 315       case T_BOOLEAN:
 316       case T_CHAR:
 317       case T_BYTE:
 318       case T_SHORT:
 319       case T_INT:
 320       case T_ADDRESS:
 321       case T_OBJECT:
 322       case T_ARRAY:
 323       case T_METADATA:
 324         return single_size;
 325         break;
 326 
 327       default:
 328         ShouldNotReachHere();
 329         return single_size;
 330       }
 331   }
 332 
 333 
 334   void validate_type() const PRODUCT_RETURN;
 335 
 336   BasicType type() const {
 337     if (is_pointer()) {
 338       return pointer()->type();
 339     }
 340     return as_BasicType(type_field());
 341   }
 342 
 343 
 344   ValueType* value_type() const                  { return as_ValueType(type()); }
 345 
 346   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
 347 
 348   bool is_equal(LIR_Opr opr) const         { return this == opr; }
 349   // checks whether types are same
 350   bool is_same_type(LIR_Opr opr) const     {
 351     assert(type_field() != unknown_type &&
 352            opr->type_field() != unknown_type, "shouldn't see unknown_type");
 353     return type_field() == opr->type_field();
 354   }
 355   bool is_same_register(LIR_Opr opr) {
 356     return (is_register() && opr->is_register() &&
 357             kind_field() == opr->kind_field() &&
 358             (value() & no_type_mask) == (opr->value() & no_type_mask));
 359   }
 360 
 361   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
 362   bool is_illegal() const      { return kind_field() == illegal_value; }
 363   bool is_valid() const        { return kind_field() != illegal_value; }
 364 
 365   bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
 366   bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
 367 
 368   bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
 369   bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
 370 
 371   bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
 372   bool is_oop() const;
 373 
 374   // semantic for fpu- and xmm-registers:
 375   // * is_float and is_double return true for xmm_registers
 376   //   (so is_single_fpu and is_single_xmm are true)
 377   // * So you must always check for is_???_xmm prior to is_???_fpu to
 378   //   distinguish between fpu- and xmm-registers
 379 
 380   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
 381   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
 382   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
 383 
 384   bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
 385   bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
 386   bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
 387   bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
 388   bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
 389 
 390   bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
 391   bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
 392   bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
 393   bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
 394   bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
 395 
 396   bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
 397   bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
 398   bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
 399 
 400   // fast accessor functions for special bits that do not work for pointers
 401   // (in this functions, the check for is_pointer() is omitted)
 402   bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
 403   bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
 404   bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
 405   bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
 406   BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
 407 
 408   bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
 409   bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
 410   LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
 411   LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
 412 
 413 
 414   int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
 415   int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
 416   RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 417   RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 418   RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 419   RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
 420   RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 421   RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 422   RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
 423   RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
 424   RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
 425   int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
 426 
 427   LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
 428   LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
 429   LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
 430 
 431   Register as_register()    const;
 432   Register as_register_lo() const;
 433   Register as_register_hi() const;
 434 
 435   Register as_pointer_register() {
 436 #ifdef _LP64
 437     if (is_double_cpu()) {
 438       assert(as_register_lo() == as_register_hi(), "should be a single register");
 439       return as_register_lo();
 440     }
 441 #endif
 442     return as_register();
 443   }
 444 
 445   FloatRegister as_float_reg   () const;
 446   FloatRegister as_double_reg  () const;
 447 #ifdef X86
 448   XMMRegister as_xmm_float_reg () const;
 449   XMMRegister as_xmm_double_reg() const;
 450   // for compatibility with RInfo
 451   int fpu() const { return lo_reg_half(); }
 452 #endif
 453 
 454   jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
 455   jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
 456   jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
 457   jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
 458   jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
 459 
 460   void print() const PRODUCT_RETURN;
 461   void print(outputStream* out) const PRODUCT_RETURN;
 462 };
 463 
 464 
 465 inline LIR_OprDesc::OprType as_OprType(BasicType type) {
 466   switch (type) {
 467   case T_INT:      return LIR_OprDesc::int_type;
 468   case T_LONG:     return LIR_OprDesc::long_type;
 469   case T_FLOAT:    return LIR_OprDesc::float_type;
 470   case T_DOUBLE:   return LIR_OprDesc::double_type;
 471   case T_OBJECT:
 472   case T_ARRAY:    return LIR_OprDesc::object_type;
 473   case T_ADDRESS:  return LIR_OprDesc::address_type;
 474   case T_METADATA: return LIR_OprDesc::metadata_type;
 475   case T_ILLEGAL:  // fall through
 476   default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
 477   }
 478 }
 479 
 480 inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
 481   switch (t) {
 482   case LIR_OprDesc::int_type:     return T_INT;
 483   case LIR_OprDesc::long_type:    return T_LONG;
 484   case LIR_OprDesc::float_type:   return T_FLOAT;
 485   case LIR_OprDesc::double_type:  return T_DOUBLE;
 486   case LIR_OprDesc::object_type:  return T_OBJECT;
 487   case LIR_OprDesc::address_type: return T_ADDRESS;
 488   case LIR_OprDesc::metadata_type:return T_METADATA;
 489   case LIR_OprDesc::unknown_type: // fall through
 490   default: ShouldNotReachHere();  return T_ILLEGAL;
 491   }
 492 }
 493 
 494 
 495 // LIR_Address
 496 class LIR_Address: public LIR_OprPtr {
 497  friend class LIR_OpVisitState;
 498 
 499  public:
 500   // NOTE: currently these must be the log2 of the scale factor (and
 501   // must also be equivalent to the ScaleFactor enum in
 502   // assembler_i486.hpp)
 503   enum Scale {
 504     times_1  =  0,
 505     times_2  =  1,
 506     times_4  =  2,
 507     times_8  =  3
 508   };
 509 
 510  private:
 511   LIR_Opr   _base;
 512   LIR_Opr   _index;
 513   Scale     _scale;
 514   intx      _disp;
 515   BasicType _type;
 516 
 517  public:
 518   LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
 519        _base(base)
 520      , _index(index)
 521      , _scale(times_1)
 522      , _disp(0)
 523      , _type(type) { verify(); }
 524 
 525   LIR_Address(LIR_Opr base, intx disp, BasicType type):
 526        _base(base)
 527      , _index(LIR_OprDesc::illegalOpr())
 528      , _scale(times_1)
 529      , _disp(disp)
 530      , _type(type) { verify(); }
 531 
 532   LIR_Address(LIR_Opr base, BasicType type):
 533        _base(base)
 534      , _index(LIR_OprDesc::illegalOpr())
 535      , _scale(times_1)
 536      , _disp(0)
 537      , _type(type) { verify(); }
 538 
 539   LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
 540        _base(base)
 541      , _index(index)
 542      , _scale(times_1)
 543      , _disp(disp)
 544      , _type(type) { verify(); }
 545 
 546   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
 547        _base(base)
 548      , _index(index)
 549      , _scale(scale)
 550      , _disp(disp)
 551      , _type(type) { verify(); }
 552 
 553   LIR_Opr base()  const                          { return _base;  }
 554   LIR_Opr index() const                          { return _index; }
 555   Scale   scale() const                          { return _scale; }
 556   intx    disp()  const                          { return _disp;  }
 557 
 558   bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
 559 
 560   virtual LIR_Address* as_address()              { return this;   }
 561   virtual BasicType type() const                 { return _type; }
 562   virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
 563 
 564   void verify() const PRODUCT_RETURN;
 565 
 566   static Scale scale(BasicType type);
 567 };
 568 
 569 
 570 // operand factory
 571 class LIR_OprFact: public AllStatic {
 572  public:
 573 
 574   static LIR_Opr illegalOpr;
 575 
 576   static LIR_Opr single_cpu(int reg) {
 577     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 578                                LIR_OprDesc::int_type             |
 579                                LIR_OprDesc::cpu_register         |
 580                                LIR_OprDesc::single_size);
 581   }
 582   static LIR_Opr single_cpu_oop(int reg) {
 583     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 584                                LIR_OprDesc::object_type          |
 585                                LIR_OprDesc::cpu_register         |
 586                                LIR_OprDesc::single_size);
 587   }
 588   static LIR_Opr single_cpu_address(int reg) {
 589     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 590                                LIR_OprDesc::address_type         |
 591                                LIR_OprDesc::cpu_register         |
 592                                LIR_OprDesc::single_size);
 593   }
 594   static LIR_Opr single_cpu_metadata(int reg) {
 595     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 596                                LIR_OprDesc::metadata_type        |
 597                                LIR_OprDesc::cpu_register         |
 598                                LIR_OprDesc::single_size);
 599   }
 600   static LIR_Opr double_cpu(int reg1, int reg2) {
 601     LP64_ONLY(assert(reg1 == reg2, "must be identical"));
 602     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 603                                (reg2 << LIR_OprDesc::reg2_shift) |
 604                                LIR_OprDesc::long_type            |
 605                                LIR_OprDesc::cpu_register         |
 606                                LIR_OprDesc::double_size);
 607   }
 608 
 609   static LIR_Opr single_fpu(int reg) {
 610     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 611                                LIR_OprDesc::float_type           |
 612                                LIR_OprDesc::fpu_register         |
 613                                LIR_OprDesc::single_size);
 614   }
 615 
 616   // Platform dependant.
 617   static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
 618 
 619 #ifdef ARM32
 620   static LIR_Opr single_softfp(int reg) {
 621     return (LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
 622                                LIR_OprDesc::float_type           |
 623                                LIR_OprDesc::cpu_register         |
 624                                LIR_OprDesc::single_size);
 625   }
 626   static LIR_Opr double_softfp(int reg1, int reg2) {
 627     return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
 628                                (reg2 << LIR_OprDesc::reg2_shift) |
 629                                LIR_OprDesc::double_type          |
 630                                LIR_OprDesc::cpu_register         |
 631                                LIR_OprDesc::double_size);
 632   }
 633 #endif // ARM32
 634 
 635 #if defined(X86)
 636   static LIR_Opr single_xmm(int reg) {
 637     return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
 638                                LIR_OprDesc::float_type          |
 639                                LIR_OprDesc::fpu_register        |
 640                                LIR_OprDesc::single_size         |
 641                                LIR_OprDesc::is_xmm_mask);
 642   }
 643   static LIR_Opr double_xmm(int reg) {
 644     return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
 645                                (reg << LIR_OprDesc::reg2_shift) |
 646                                LIR_OprDesc::double_type         |
 647                                LIR_OprDesc::fpu_register        |
 648                                LIR_OprDesc::double_size         |
 649                                LIR_OprDesc::is_xmm_mask);
 650   }
 651 #endif // X86
 652 
 653   static LIR_Opr virtual_register(int index, BasicType type) {
 654     if (index > LIR_OprDesc::vreg_max) {
 655       // Running out of virtual registers. Caller should bailout.
 656       return illegalOpr;
 657     }
 658 
 659     LIR_Opr res;
 660     switch (type) {
 661       case T_OBJECT: // fall through
 662       case T_ARRAY:
 663         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 664                                             LIR_OprDesc::object_type  |
 665                                             LIR_OprDesc::cpu_register |
 666                                             LIR_OprDesc::single_size  |
 667                                             LIR_OprDesc::virtual_mask);
 668         break;
 669 
 670       case T_METADATA:
 671         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift)  |
 672                                             LIR_OprDesc::metadata_type|
 673                                             LIR_OprDesc::cpu_register |
 674                                             LIR_OprDesc::single_size  |
 675                                             LIR_OprDesc::virtual_mask);
 676         break;
 677 
 678       case T_INT:
 679         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 680                                   LIR_OprDesc::int_type              |
 681                                   LIR_OprDesc::cpu_register          |
 682                                   LIR_OprDesc::single_size           |
 683                                   LIR_OprDesc::virtual_mask);
 684         break;
 685 
 686       case T_ADDRESS:
 687         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 688                                   LIR_OprDesc::address_type          |
 689                                   LIR_OprDesc::cpu_register          |
 690                                   LIR_OprDesc::single_size           |
 691                                   LIR_OprDesc::virtual_mask);
 692         break;
 693 
 694       case T_LONG:
 695         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 696                                   LIR_OprDesc::long_type             |
 697                                   LIR_OprDesc::cpu_register          |
 698                                   LIR_OprDesc::double_size           |
 699                                   LIR_OprDesc::virtual_mask);
 700         break;
 701 
 702 #ifdef __SOFTFP__
 703       case T_FLOAT:
 704         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 705                                   LIR_OprDesc::float_type  |
 706                                   LIR_OprDesc::cpu_register |
 707                                   LIR_OprDesc::single_size |
 708                                   LIR_OprDesc::virtual_mask);
 709         break;
 710       case T_DOUBLE:
 711         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 712                                   LIR_OprDesc::double_type |
 713                                   LIR_OprDesc::cpu_register |
 714                                   LIR_OprDesc::double_size |
 715                                   LIR_OprDesc::virtual_mask);
 716         break;
 717 #else // __SOFTFP__
 718       case T_FLOAT:
 719         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 720                                   LIR_OprDesc::float_type           |
 721                                   LIR_OprDesc::fpu_register         |
 722                                   LIR_OprDesc::single_size          |
 723                                   LIR_OprDesc::virtual_mask);
 724         break;
 725 
 726       case
 727         T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 728                                             LIR_OprDesc::double_type           |
 729                                             LIR_OprDesc::fpu_register          |
 730                                             LIR_OprDesc::double_size           |
 731                                             LIR_OprDesc::virtual_mask);
 732         break;
 733 #endif // __SOFTFP__
 734       default:       ShouldNotReachHere(); res = illegalOpr;
 735     }
 736 
 737 #ifdef ASSERT
 738     res->validate_type();
 739     assert(res->vreg_number() == index, "conversion check");
 740     assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
 741 
 742     // old-style calculation; check if old and new method are equal
 743     LIR_OprDesc::OprType t = as_OprType(type);
 744 #ifdef __SOFTFP__
 745     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 746                                t |
 747                                LIR_OprDesc::cpu_register |
 748                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 749 #else // __SOFTFP__
 750     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
 751                                           ((type == T_FLOAT || type == T_DOUBLE) ?  LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
 752                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
 753     assert(res == old_res, "old and new method not equal");
 754 #endif // __SOFTFP__
 755 #endif // ASSERT
 756 
 757     return res;
 758   }
 759 
 760   // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
 761   // the index is platform independent; a double stack useing indeces 2 and 3 has always
 762   // index 2.
 763   static LIR_Opr stack(int index, BasicType type) {
 764     LIR_Opr res;
 765     switch (type) {
 766       case T_OBJECT: // fall through
 767       case T_ARRAY:
 768         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 769                                   LIR_OprDesc::object_type           |
 770                                   LIR_OprDesc::stack_value           |
 771                                   LIR_OprDesc::single_size);
 772         break;
 773 
 774       case T_METADATA:
 775         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 776                                   LIR_OprDesc::metadata_type         |
 777                                   LIR_OprDesc::stack_value           |
 778                                   LIR_OprDesc::single_size);
 779         break;
 780       case T_INT:
 781         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 782                                   LIR_OprDesc::int_type              |
 783                                   LIR_OprDesc::stack_value           |
 784                                   LIR_OprDesc::single_size);
 785         break;
 786 
 787       case T_ADDRESS:
 788         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 789                                   LIR_OprDesc::address_type          |
 790                                   LIR_OprDesc::stack_value           |
 791                                   LIR_OprDesc::single_size);
 792         break;
 793 
 794       case T_LONG:
 795         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 796                                   LIR_OprDesc::long_type             |
 797                                   LIR_OprDesc::stack_value           |
 798                                   LIR_OprDesc::double_size);
 799         break;
 800 
 801       case T_FLOAT:
 802         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 803                                   LIR_OprDesc::float_type            |
 804                                   LIR_OprDesc::stack_value           |
 805                                   LIR_OprDesc::single_size);
 806         break;
 807       case T_DOUBLE:
 808         res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 809                                   LIR_OprDesc::double_type           |
 810                                   LIR_OprDesc::stack_value           |
 811                                   LIR_OprDesc::double_size);
 812         break;
 813 
 814       default:       ShouldNotReachHere(); res = illegalOpr;
 815     }
 816 
 817 #ifdef ASSERT
 818     assert(index >= 0, "index must be positive");
 819     assert(index == (int)res->data(), "conversion check");
 820 
 821     LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
 822                                           LIR_OprDesc::stack_value           |
 823                                           as_OprType(type)                   |
 824                                           LIR_OprDesc::size_for(type));
 825     assert(res == old_res, "old and new method not equal");
 826 #endif
 827 
 828     return res;
 829   }
 830 
 831   static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
 832   static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
 833   static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
 834   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
 835   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
 836   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
 837   static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
 838   static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
 839   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
 840   static LIR_Opr addressConst(jint i)            { return (LIR_Opr)(new LIR_Const(i, true)); }
 841   static LIR_Opr metadataConst(Metadata* m)      { return (LIR_Opr)(new LIR_Const(m)); }
 842 
 843   static LIR_Opr value_type(ValueType* type);
 844 };
 845 
 846 
 847 //-------------------------------------------------------------------------------
 848 //                   LIR Instructions
 849 //-------------------------------------------------------------------------------
 850 //
 851 // Note:
 852 //  - every instruction has a result operand
 853 //  - every instruction has an CodeEmitInfo operand (can be revisited later)
 854 //  - every instruction has a LIR_OpCode operand
 855 //  - LIR_OpN, means an instruction that has N input operands
 856 //
 857 // class hierarchy:
 858 //
 859 class  LIR_Op;
 860 class    LIR_Op0;
 861 class      LIR_OpLabel;
 862 class    LIR_Op1;
 863 class      LIR_OpBranch;
 864 class      LIR_OpConvert;
 865 class      LIR_OpAllocObj;
 866 class      LIR_OpReturn;
 867 class      LIR_OpRoundFP;
 868 class    LIR_Op2;
 869 class    LIR_OpDelay;
 870 class    LIR_Op3;
 871 class      LIR_OpAllocArray;
 872 #ifdef RISCV
 873 class    LIR_Op4;
 874 #endif
 875 class    LIR_OpCall;
 876 class      LIR_OpJavaCall;
 877 class      LIR_OpRTCall;
 878 class    LIR_OpArrayCopy;
 879 class    LIR_OpUpdateCRC32;
 880 class    LIR_OpLock;
 881 class    LIR_OpTypeCheck;
 882 class    LIR_OpCompareAndSwap;
 883 class    LIR_OpLoadKlass;
 884 class    LIR_OpProfileCall;
 885 class    LIR_OpProfileType;
 886 #ifdef ASSERT
 887 class    LIR_OpAssert;
 888 #endif
 889 
 890 // LIR operation codes
 891 enum LIR_Code {
 892     lir_none
 893   , begin_op0
 894       , lir_label
 895       , lir_nop
 896       , lir_backwardbranch_target
 897       , lir_std_entry
 898       , lir_osr_entry
 899       , lir_fpop_raw
 900       , lir_breakpoint
 901       , lir_rtcall
 902       , lir_membar
 903       , lir_membar_acquire
 904       , lir_membar_release
 905       , lir_membar_loadload
 906       , lir_membar_storestore
 907       , lir_membar_loadstore
 908       , lir_membar_storeload
 909       , lir_get_thread
 910       , lir_on_spin_wait
 911   , end_op0
 912   , begin_op1
 913       , lir_fxch
 914       , lir_fld
 915       , lir_push
 916       , lir_pop
 917       , lir_null_check
 918       , lir_return
 919       , lir_leal
 920 #ifndef RISCV
 921       , lir_branch
 922       , lir_cond_float_branch
 923 #endif
 924       , lir_move
 925       , lir_convert
 926       , lir_alloc_object
 927       , lir_monaddr
 928       , lir_roundfp
 929       , lir_safepoint
 930       , lir_unwind
 931       , lir_load_klass
 932   , end_op1
 933   , begin_op2
 934 #ifdef RISCV
 935       , lir_branch
 936       , lir_cond_float_branch
 937 #endif
 938       , lir_cmp
 939       , lir_cmp_l2i
 940       , lir_ucmp_fd2i
 941       , lir_cmp_fd2i
 942 #ifndef RISCV
 943       , lir_cmove
 944 #endif
 945       , lir_add
 946       , lir_sub
 947       , lir_mul
 948       , lir_div
 949       , lir_rem
 950       , lir_sqrt
 951       , lir_abs
 952       , lir_neg
 953       , lir_tan
 954       , lir_log10
 955       , lir_logic_and
 956       , lir_logic_or
 957       , lir_logic_xor
 958       , lir_shl
 959       , lir_shr
 960       , lir_ushr
 961       , lir_alloc_array
 962       , lir_throw
 963       , lir_xadd
 964       , lir_xchg
 965   , end_op2
 966   , begin_op3
 967       , lir_idiv
 968       , lir_irem
 969       , lir_fmad
 970       , lir_fmaf
 971   , end_op3
 972 #ifdef RISCV
 973   , begin_op4
 974       , lir_cmove
 975   , end_op4
 976 #endif
 977   , begin_opJavaCall
 978       , lir_static_call
 979       , lir_optvirtual_call
 980       , lir_icvirtual_call
 981       , lir_dynamic_call
 982   , end_opJavaCall
 983   , begin_opArrayCopy
 984       , lir_arraycopy
 985   , end_opArrayCopy
 986   , begin_opUpdateCRC32
 987       , lir_updatecrc32
 988   , end_opUpdateCRC32
 989   , begin_opLock
 990     , lir_lock
 991     , lir_unlock
 992   , end_opLock
 993   , begin_delay_slot
 994     , lir_delay_slot
 995   , end_delay_slot
 996   , begin_opTypeCheck
 997     , lir_instanceof
 998     , lir_checkcast
 999     , lir_store_check
1000   , end_opTypeCheck
1001   , begin_opCompareAndSwap
1002     , lir_cas_long
1003     , lir_cas_obj
1004     , lir_cas_int
1005   , end_opCompareAndSwap
1006   , begin_opMDOProfile
1007     , lir_profile_call
1008     , lir_profile_type
1009   , end_opMDOProfile
1010   , begin_opAssert
1011     , lir_assert
1012   , end_opAssert
1013 #if defined(RISCV) && defined(INCLUDE_ZGC)
1014   , begin_opZLoadBarrierTest
1015     , lir_zloadbarrier_test
1016   , end_opZLoadBarrierTest
1017 #endif
1018 };
1019 
1020 
1021 enum LIR_Condition {
1022     lir_cond_equal
1023   , lir_cond_notEqual
1024   , lir_cond_less
1025   , lir_cond_lessEqual
1026   , lir_cond_greaterEqual
1027   , lir_cond_greater
1028   , lir_cond_belowEqual
1029   , lir_cond_aboveEqual
1030   , lir_cond_always
1031   , lir_cond_unknown = -1
1032 };
1033 
1034 
1035 enum LIR_PatchCode {
1036   lir_patch_none,
1037   lir_patch_low,
1038   lir_patch_high,
1039   lir_patch_normal
1040 };
1041 
1042 
1043 enum LIR_MoveKind {
1044   lir_move_normal,
1045   lir_move_volatile,
1046   lir_move_unaligned,
1047   lir_move_wide,
1048   lir_move_max_flag
1049 };
1050 
1051 
1052 // --------------------------------------------------
1053 // LIR_Op
1054 // --------------------------------------------------
1055 class LIR_Op: public CompilationResourceObj {
1056  friend class LIR_OpVisitState;
1057 
1058 #ifdef ASSERT
1059  private:
1060   const char *  _file;
1061   int           _line;
1062 #endif
1063 
1064  protected:
1065   LIR_Opr       _result;
1066   unsigned short _code;
1067   unsigned short _flags;
1068   CodeEmitInfo* _info;
1069   int           _id;     // value id for register allocation
1070   int           _fpu_pop_count;
1071   Instruction*  _source; // for debugging
1072 
1073   static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1074 
1075  protected:
1076   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
1077 
1078  public:
1079   LIR_Op()
1080     :
1081 #ifdef ASSERT
1082       _file(NULL)
1083     , _line(0),
1084 #endif
1085       _result(LIR_OprFact::illegalOpr)
1086     , _code(lir_none)
1087     , _flags(0)
1088     , _info(NULL)
1089     , _id(-1)
1090     , _fpu_pop_count(0)
1091     , _source(NULL) {}
1092 
1093   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1094     :
1095 #ifdef ASSERT
1096       _file(NULL)
1097     , _line(0),
1098 #endif
1099       _result(result)
1100     , _code(code)
1101     , _flags(0)
1102     , _info(info)
1103     , _id(-1)
1104     , _fpu_pop_count(0)
1105     , _source(NULL) {}
1106 
1107   CodeEmitInfo* info() const                  { return _info;   }
1108   LIR_Code code()      const                  { return (LIR_Code)_code;   }
1109   LIR_Opr result_opr() const                  { return _result; }
1110   void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
1111 
1112 #ifdef ASSERT
1113   void set_file_and_line(const char * file, int line) {
1114     _file = file;
1115     _line = line;
1116   }
1117 #endif
1118 
1119   virtual const char * name() const PRODUCT_RETURN0;
1120   virtual void visit(LIR_OpVisitState* state);
1121 
1122   int id()             const                  { return _id;     }
1123   void set_id(int id)                         { _id = id; }
1124 
1125   // FPU stack simulation helpers -- only used on Intel
1126   void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1127   int  fpu_pop_count() const                  { return _fpu_pop_count; }
1128   bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
1129 
1130   Instruction* source() const                 { return _source; }
1131   void set_source(Instruction* ins)           { _source = ins; }
1132 
1133   virtual void emit_code(LIR_Assembler* masm) = 0;
1134   virtual void print_instr(outputStream* out) const   = 0;
1135   virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1136 
1137   virtual bool is_patching() { return false; }
1138   virtual LIR_OpCall* as_OpCall() { return NULL; }
1139   virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1140   virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1141   virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1142   virtual LIR_OpLock* as_OpLock() { return NULL; }
1143   virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1144   virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1145   virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1146   virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1147   virtual LIR_OpReturn* as_OpReturn() { return NULL; }
1148   virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1149   virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1150   virtual LIR_Op0* as_Op0() { return NULL; }
1151   virtual LIR_Op1* as_Op1() { return NULL; }
1152   virtual LIR_Op2* as_Op2() { return NULL; }
1153   virtual LIR_Op3* as_Op3() { return NULL; }
1154 #ifdef RISCV
1155   virtual LIR_Op4* as_Op4() { return NULL; }
1156 #endif
1157   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1158   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
1159   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1160   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1161   virtual LIR_OpLoadKlass* as_OpLoadKlass() { return NULL; }
1162   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1163   virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
1164 #ifdef ASSERT
1165   virtual LIR_OpAssert* as_OpAssert() { return NULL; }
1166 #endif
1167 
1168   virtual void verify() const {}
1169 };
1170 
1171 // for calls
1172 class LIR_OpCall: public LIR_Op {
1173  friend class LIR_OpVisitState;
1174 
1175  protected:
1176   address      _addr;
1177   LIR_OprList* _arguments;
1178  protected:
1179   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1180              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1181     : LIR_Op(code, result, info)
1182     , _addr(addr)
1183     , _arguments(arguments) {}
1184 
1185  public:
1186   address addr() const                           { return _addr; }
1187   const LIR_OprList* arguments() const           { return _arguments; }
1188   virtual LIR_OpCall* as_OpCall()                { return this; }
1189 };
1190 
1191 
1192 // --------------------------------------------------
1193 // LIR_OpJavaCall
1194 // --------------------------------------------------
1195 class LIR_OpJavaCall: public LIR_OpCall {
1196  friend class LIR_OpVisitState;
1197 
1198  private:
1199   ciMethod* _method;
1200   LIR_Opr   _receiver;
1201   LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1202 
1203  public:
1204   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1205                  LIR_Opr receiver, LIR_Opr result,
1206                  address addr, LIR_OprList* arguments,
1207                  CodeEmitInfo* info)
1208   : LIR_OpCall(code, addr, result, arguments, info)
1209   , _method(method)
1210   , _receiver(receiver)
1211   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1212   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1213 
1214   LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1215                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1216                  LIR_OprList* arguments, CodeEmitInfo* info)
1217   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1218   , _method(method)
1219   , _receiver(receiver)
1220   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1221   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1222 
1223   LIR_Opr receiver() const                       { return _receiver; }
1224   ciMethod* method() const                       { return _method;   }
1225 
1226   // JSR 292 support.
1227   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
1228   bool is_method_handle_invoke() const {
1229     return method()->is_compiled_lambda_form() ||   // Java-generated lambda form
1230            method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
1231   }
1232 
1233   virtual void emit_code(LIR_Assembler* masm);
1234   virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1235   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1236 };
1237 
1238 // --------------------------------------------------
1239 // LIR_OpLabel
1240 // --------------------------------------------------
1241 // Location where a branch can continue
1242 class LIR_OpLabel: public LIR_Op {
1243  friend class LIR_OpVisitState;
1244 
1245  private:
1246   Label* _label;
1247  public:
1248   LIR_OpLabel(Label* lbl)
1249    : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1250    , _label(lbl)                                 {}
1251   Label* label() const                           { return _label; }
1252 
1253   virtual void emit_code(LIR_Assembler* masm);
1254   virtual LIR_OpLabel* as_OpLabel() { return this; }
1255   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1256 };
1257 
1258 // LIR_OpArrayCopy
1259 class LIR_OpArrayCopy: public LIR_Op {
1260  friend class LIR_OpVisitState;
1261 
1262  private:
1263   ArrayCopyStub*  _stub;
1264   LIR_Opr   _src;
1265   LIR_Opr   _src_pos;
1266   LIR_Opr   _dst;
1267   LIR_Opr   _dst_pos;
1268   LIR_Opr   _length;
1269   LIR_Opr   _tmp;
1270   ciArrayKlass* _expected_type;
1271   int       _flags;
1272 
1273 public:
1274   enum Flags {
1275     src_null_check         = 1 << 0,
1276     dst_null_check         = 1 << 1,
1277     src_pos_positive_check = 1 << 2,
1278     dst_pos_positive_check = 1 << 3,
1279     length_positive_check  = 1 << 4,
1280     src_range_check        = 1 << 5,
1281     dst_range_check        = 1 << 6,
1282     type_check             = 1 << 7,
1283     overlapping            = 1 << 8,
1284     unaligned              = 1 << 9,
1285     src_objarray           = 1 << 10,
1286     dst_objarray           = 1 << 11,
1287     all_flags              = (1 << 12) - 1
1288   };
1289 
1290   LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1291                   ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1292 
1293   LIR_Opr src() const                            { return _src; }
1294   LIR_Opr src_pos() const                        { return _src_pos; }
1295   LIR_Opr dst() const                            { return _dst; }
1296   LIR_Opr dst_pos() const                        { return _dst_pos; }
1297   LIR_Opr length() const                         { return _length; }
1298   LIR_Opr tmp() const                            { return _tmp; }
1299   int flags() const                              { return _flags; }
1300   ciArrayKlass* expected_type() const            { return _expected_type; }
1301   ArrayCopyStub* stub() const                    { return _stub; }
1302 
1303   virtual void emit_code(LIR_Assembler* masm);
1304   virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1305   void print_instr(outputStream* out) const PRODUCT_RETURN;
1306 };
1307 
1308 // LIR_OpUpdateCRC32
1309 class LIR_OpUpdateCRC32: public LIR_Op {
1310   friend class LIR_OpVisitState;
1311 
1312 private:
1313   LIR_Opr   _crc;
1314   LIR_Opr   _val;
1315 
1316 public:
1317 
1318   LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1319 
1320   LIR_Opr crc() const                            { return _crc; }
1321   LIR_Opr val() const                            { return _val; }
1322 
1323   virtual void emit_code(LIR_Assembler* masm);
1324   virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32()  { return this; }
1325   void print_instr(outputStream* out) const PRODUCT_RETURN;
1326 };
1327 
1328 // --------------------------------------------------
1329 // LIR_Op0
1330 // --------------------------------------------------
1331 class LIR_Op0: public LIR_Op {
1332  friend class LIR_OpVisitState;
1333 
1334  public:
1335   LIR_Op0(LIR_Code code)
1336    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1337   LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1338    : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1339 
1340   virtual void emit_code(LIR_Assembler* masm);
1341   virtual LIR_Op0* as_Op0() { return this; }
1342   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1343 };
1344 
1345 
1346 // --------------------------------------------------
1347 // LIR_Op1
1348 // --------------------------------------------------
1349 
1350 class LIR_Op1: public LIR_Op {
1351  friend class LIR_OpVisitState;
1352 
1353  protected:
1354   LIR_Opr         _opr;   // input operand
1355   BasicType       _type;  // Operand types
1356   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1357 
1358   static void print_patch_code(outputStream* out, LIR_PatchCode code);
1359 
1360   void set_kind(LIR_MoveKind kind) {
1361     assert(code() == lir_move, "must be");
1362     _flags = kind;
1363   }
1364 
1365  public:
1366   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1367     : LIR_Op(code, result, info)
1368     , _opr(opr)
1369     , _type(type)
1370     , _patch(patch)                    { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1371 
1372   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1373     : LIR_Op(code, result, info)
1374     , _opr(opr)
1375     , _type(type)
1376     , _patch(patch)                    {
1377     assert(code == lir_move, "must be");
1378     set_kind(kind);
1379   }
1380 
1381   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1382     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1383     , _opr(opr)
1384     , _type(T_ILLEGAL)
1385     , _patch(lir_patch_none)           { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1386 
1387   LIR_Opr in_opr()           const               { return _opr;   }
1388   LIR_PatchCode patch_code() const               { return _patch; }
1389   BasicType type()           const               { return _type;  }
1390 
1391   LIR_MoveKind move_kind() const {
1392     assert(code() == lir_move, "must be");
1393     return (LIR_MoveKind)_flags;
1394   }
1395 
1396   virtual bool is_patching() { return _patch != lir_patch_none; }
1397   virtual void emit_code(LIR_Assembler* masm);
1398   virtual LIR_Op1* as_Op1() { return this; }
1399   virtual const char * name() const PRODUCT_RETURN0;
1400 
1401   void set_in_opr(LIR_Opr opr) { _opr = opr; }
1402 
1403   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1404   virtual void verify() const;
1405 };
1406 
1407 
1408 // for runtime calls
1409 class LIR_OpRTCall: public LIR_OpCall {
1410  friend class LIR_OpVisitState;
1411 
1412  private:
1413   LIR_Opr _tmp;
1414  public:
1415   LIR_OpRTCall(address addr, LIR_Opr tmp,
1416                LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1417     : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1418     , _tmp(tmp) {}
1419 
1420   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1421   virtual void emit_code(LIR_Assembler* masm);
1422   virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1423 
1424   LIR_Opr tmp() const                            { return _tmp; }
1425 
1426   virtual void verify() const;
1427 };
1428 
1429 class LIR_OpReturn: public LIR_Op1 {
1430  friend class LIR_OpVisitState;
1431 
1432  private:
1433   C1SafepointPollStub* _stub;
1434 
1435  public:
1436   LIR_OpReturn(LIR_Opr opr);
1437 
1438   C1SafepointPollStub* stub() const { return _stub; }
1439   virtual LIR_OpReturn* as_OpReturn() { return this; }
1440 };
1441 
1442 class ConversionStub;
1443 
1444 class LIR_OpConvert: public LIR_Op1 {
1445  friend class LIR_OpVisitState;
1446 
1447  private:
1448    Bytecodes::Code _bytecode;
1449    ConversionStub* _stub;
1450 
1451  public:
1452    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1453      : LIR_Op1(lir_convert, opr, result)
1454      , _bytecode(code)
1455      , _stub(stub)                               {}
1456 
1457   Bytecodes::Code bytecode() const               { return _bytecode; }
1458   ConversionStub* stub() const                   { return _stub; }
1459 
1460   virtual void emit_code(LIR_Assembler* masm);
1461   virtual LIR_OpConvert* as_OpConvert() { return this; }
1462   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1463 
1464   static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1465 };
1466 
1467 
1468 // LIR_OpAllocObj
1469 class LIR_OpAllocObj : public LIR_Op1 {
1470  friend class LIR_OpVisitState;
1471 
1472  private:
1473   LIR_Opr _tmp1;
1474   LIR_Opr _tmp2;
1475   LIR_Opr _tmp3;
1476   LIR_Opr _tmp4;
1477   int     _hdr_size;
1478   int     _obj_size;
1479   CodeStub* _stub;
1480   bool    _init_check;
1481 
1482  public:
1483   LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1484                  LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1485                  int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1486     : LIR_Op1(lir_alloc_object, klass, result)
1487     , _tmp1(t1)
1488     , _tmp2(t2)
1489     , _tmp3(t3)
1490     , _tmp4(t4)
1491     , _hdr_size(hdr_size)
1492     , _obj_size(obj_size)
1493     , _stub(stub)
1494     , _init_check(init_check)                    { }
1495 
1496   LIR_Opr klass()        const                   { return in_opr();     }
1497   LIR_Opr obj()          const                   { return result_opr(); }
1498   LIR_Opr tmp1()         const                   { return _tmp1;        }
1499   LIR_Opr tmp2()         const                   { return _tmp2;        }
1500   LIR_Opr tmp3()         const                   { return _tmp3;        }
1501   LIR_Opr tmp4()         const                   { return _tmp4;        }
1502   int     header_size()  const                   { return _hdr_size;    }
1503   int     object_size()  const                   { return _obj_size;    }
1504   bool    init_check()   const                   { return _init_check;  }
1505   CodeStub* stub()       const                   { return _stub;        }
1506 
1507   virtual void emit_code(LIR_Assembler* masm);
1508   virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1509   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1510 };
1511 
1512 
1513 // LIR_OpRoundFP
1514 class LIR_OpRoundFP : public LIR_Op1 {
1515  friend class LIR_OpVisitState;
1516 
1517  private:
1518   LIR_Opr _tmp;
1519 
1520  public:
1521   LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1522     : LIR_Op1(lir_roundfp, reg, result)
1523     , _tmp(stack_loc_temp) {}
1524 
1525   LIR_Opr tmp() const                            { return _tmp; }
1526   virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
1527   void print_instr(outputStream* out) const PRODUCT_RETURN;
1528 };
1529 
1530 // LIR_OpTypeCheck
1531 class LIR_OpTypeCheck: public LIR_Op {
1532  friend class LIR_OpVisitState;
1533 
1534  private:
1535   LIR_Opr       _object;
1536   LIR_Opr       _array;
1537   ciKlass*      _klass;
1538   LIR_Opr       _tmp1;
1539   LIR_Opr       _tmp2;
1540   LIR_Opr       _tmp3;
1541   bool          _fast_check;
1542   CodeEmitInfo* _info_for_patch;
1543   CodeEmitInfo* _info_for_exception;
1544   CodeStub*     _stub;
1545   ciMethod*     _profiled_method;
1546   int           _profiled_bci;
1547   bool          _should_profile;
1548 
1549 public:
1550   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1551                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1552                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1553   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1554                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1555 
1556   LIR_Opr object() const                         { return _object;         }
1557   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
1558   LIR_Opr tmp1() const                           { return _tmp1;           }
1559   LIR_Opr tmp2() const                           { return _tmp2;           }
1560   LIR_Opr tmp3() const                           { return _tmp3;           }
1561   ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
1562   bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
1563   CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
1564   CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
1565   CodeStub* stub() const                         { return _stub;           }
1566 
1567   // MethodData* profiling
1568   void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
1569   void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
1570   void set_should_profile(bool b)                { _should_profile = b;       }
1571   ciMethod* profiled_method() const              { return _profiled_method;   }
1572   int       profiled_bci() const                 { return _profiled_bci;      }
1573   bool      should_profile() const               { return _should_profile;    }
1574 
1575   virtual bool is_patching() { return _info_for_patch != NULL; }
1576   virtual void emit_code(LIR_Assembler* masm);
1577   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1578   void print_instr(outputStream* out) const PRODUCT_RETURN;
1579 };
1580 
1581 // LIR_Op2
1582 class LIR_Op2: public LIR_Op {
1583  friend class LIR_OpVisitState;
1584 
1585   int  _fpu_stack_size; // for sin/cos implementation on Intel
1586 
1587  protected:
1588   LIR_Opr   _opr1;
1589   LIR_Opr   _opr2;
1590   BasicType _type;
1591   LIR_Opr   _tmp1;
1592   LIR_Opr   _tmp2;
1593   LIR_Opr   _tmp3;
1594   LIR_Opr   _tmp4;
1595   LIR_Opr   _tmp5;
1596   LIR_Condition _condition;
1597 
1598   void verify() const;
1599 
1600  public:
1601   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1602     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1603     , _fpu_stack_size(0)
1604     , _opr1(opr1)
1605     , _opr2(opr2)
1606     , _type(type)
1607     , _tmp1(LIR_OprFact::illegalOpr)
1608     , _tmp2(LIR_OprFact::illegalOpr)
1609     , _tmp3(LIR_OprFact::illegalOpr)
1610     , _tmp4(LIR_OprFact::illegalOpr)
1611     , _tmp5(LIR_OprFact::illegalOpr)
1612     , _condition(condition) {
1613     assert(code == lir_cmp || code == lir_assert RISCV_ONLY(|| code == lir_branch || code == lir_cond_float_branch), "code check");
1614   }
1615 
1616   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1617     : LIR_Op(code, result, NULL)
1618     , _fpu_stack_size(0)
1619     , _opr1(opr1)
1620     , _opr2(opr2)
1621     , _type(type)
1622     , _tmp1(LIR_OprFact::illegalOpr)
1623     , _tmp2(LIR_OprFact::illegalOpr)
1624     , _tmp3(LIR_OprFact::illegalOpr)
1625     , _tmp4(LIR_OprFact::illegalOpr)
1626     , _tmp5(LIR_OprFact::illegalOpr)
1627     , _condition(condition) {
1628     assert(code == lir_cmove, "code check");
1629     assert(type != T_ILLEGAL, "cmove should have type");
1630   }
1631 
1632   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1633           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1634     : LIR_Op(code, result, info)
1635     , _fpu_stack_size(0)
1636     , _opr1(opr1)
1637     , _opr2(opr2)
1638     , _type(type)
1639     , _tmp1(LIR_OprFact::illegalOpr)
1640     , _tmp2(LIR_OprFact::illegalOpr)
1641     , _tmp3(LIR_OprFact::illegalOpr)
1642     , _tmp4(LIR_OprFact::illegalOpr)
1643     , _tmp5(LIR_OprFact::illegalOpr)
1644     , _condition(lir_cond_unknown) {
1645     assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check");
1646   }
1647 
1648   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1649           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1650     : LIR_Op(code, result, NULL)
1651     , _fpu_stack_size(0)
1652     , _opr1(opr1)
1653     , _opr2(opr2)
1654     , _type(T_ILLEGAL)
1655     , _tmp1(tmp1)
1656     , _tmp2(tmp2)
1657     , _tmp3(tmp3)
1658     , _tmp4(tmp4)
1659     , _tmp5(tmp5)
1660     , _condition(lir_cond_unknown) {
1661     assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check");
1662   }
1663 
1664   LIR_Opr in_opr1() const                        { return _opr1; }
1665   LIR_Opr in_opr2() const                        { return _opr2; }
1666   BasicType type()  const                        { return _type; }
1667   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1668   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1669   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1670   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1671   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1672   LIR_Condition condition() const  {
1673 #ifdef RISCV
1674     assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch || code() == lir_assert, "only valid for branch and assert"); return _condition;
1675 #else
1676     assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
1677 #endif
1678   }
1679   void set_condition(LIR_Condition condition) {
1680 #ifdef RISCV
1681     assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch, "only valid for branch"); _condition = condition;
1682 #else
1683     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
1684 #endif
1685   }
1686 
1687   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
1688   int  fpu_stack_size() const                    { return _fpu_stack_size; }
1689 
1690   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1691   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1692 
1693   virtual void emit_code(LIR_Assembler* masm);
1694   virtual LIR_Op2* as_Op2() { return this; }
1695   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1696 };
1697 
1698 #ifdef RISCV
1699 class LIR_OpBranch: public LIR_Op2 {
1700 #else
1701 class LIR_OpBranch: public LIR_Op {
1702 #endif
1703  friend class LIR_OpVisitState;
1704 
1705  private:
1706 #ifndef RISCV
1707   LIR_Condition _cond;
1708 #endif
1709   Label*        _label;
1710   BlockBegin*   _block;  // if this is a branch to a block, this is the block
1711   BlockBegin*   _ublock; // if this is a float-branch, this is the unordered block
1712   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
1713 
1714  public:
1715   LIR_OpBranch(LIR_Condition cond, Label* lbl)
1716 #ifdef RISCV
1717     : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1718 #else
1719     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1720     , _cond(cond)
1721 #endif
1722     , _label(lbl)
1723     , _block(NULL)
1724     , _ublock(NULL)
1725     , _stub(NULL) { }
1726 
1727   LIR_OpBranch(LIR_Condition cond, BlockBegin* block);
1728   LIR_OpBranch(LIR_Condition cond, CodeStub* stub);
1729 
1730   // for unordered comparisons
1731   LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock);
1732 
1733 #ifdef RISCV
1734   LIR_Condition cond()        const              { return condition();  }
1735   void set_cond(LIR_Condition cond)              { set_condition(cond); }
1736 #else
1737   LIR_Condition cond()        const              { return _cond;        }
1738   void set_cond(LIR_Condition cond)              { _cond = cond;        }
1739 #endif
1740   Label*        label()       const              { return _label;       }
1741   BlockBegin*   block()       const              { return _block;       }
1742   BlockBegin*   ublock()      const              { return _ublock;      }
1743   CodeStub*     stub()        const              { return _stub;        }
1744 
1745   void          change_block(BlockBegin* b);
1746   void          change_ublock(BlockBegin* b);
1747   void          negate_cond();
1748 
1749   virtual void emit_code(LIR_Assembler* masm);
1750   virtual LIR_OpBranch* as_OpBranch() { return this; }
1751   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1752 };
1753 
1754 class LIR_OpAllocArray : public LIR_Op {
1755  friend class LIR_OpVisitState;
1756 
1757  private:
1758   LIR_Opr   _klass;
1759   LIR_Opr   _len;
1760   LIR_Opr   _tmp1;
1761   LIR_Opr   _tmp2;
1762   LIR_Opr   _tmp3;
1763   LIR_Opr   _tmp4;
1764   BasicType _type;
1765   CodeStub* _stub;
1766 
1767  public:
1768   LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1769     : LIR_Op(lir_alloc_array, result, NULL)
1770     , _klass(klass)
1771     , _len(len)
1772     , _tmp1(t1)
1773     , _tmp2(t2)
1774     , _tmp3(t3)
1775     , _tmp4(t4)
1776     , _type(type)
1777     , _stub(stub) {}
1778 
1779   LIR_Opr   klass()   const                      { return _klass;       }
1780   LIR_Opr   len()     const                      { return _len;         }
1781   LIR_Opr   obj()     const                      { return result_opr(); }
1782   LIR_Opr   tmp1()    const                      { return _tmp1;        }
1783   LIR_Opr   tmp2()    const                      { return _tmp2;        }
1784   LIR_Opr   tmp3()    const                      { return _tmp3;        }
1785   LIR_Opr   tmp4()    const                      { return _tmp4;        }
1786   BasicType type()    const                      { return _type;        }
1787   CodeStub* stub()    const                      { return _stub;        }
1788 
1789   virtual void emit_code(LIR_Assembler* masm);
1790   virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1791   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1792 };
1793 
1794 
1795 class LIR_Op3: public LIR_Op {
1796  friend class LIR_OpVisitState;
1797 
1798  private:
1799   LIR_Opr _opr1;
1800   LIR_Opr _opr2;
1801   LIR_Opr _opr3;
1802  public:
1803   LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1804     : LIR_Op(code, result, info)
1805     , _opr1(opr1)
1806     , _opr2(opr2)
1807     , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1808   LIR_Opr in_opr1() const                        { return _opr1; }
1809   LIR_Opr in_opr2() const                        { return _opr2; }
1810   LIR_Opr in_opr3() const                        { return _opr3; }
1811 
1812   virtual void emit_code(LIR_Assembler* masm);
1813   virtual LIR_Op3* as_Op3() { return this; }
1814   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1815 };
1816 
1817 #ifdef RISCV
1818 class LIR_Op4: public LIR_Op {
1819   friend class LIR_OpVisitState;
1820  protected:
1821   LIR_Opr   _opr1;
1822   LIR_Opr   _opr2;
1823   LIR_Opr   _opr3;
1824   LIR_Opr   _opr4;
1825   BasicType _type;
1826   LIR_Opr   _tmp1;
1827   LIR_Opr   _tmp2;
1828   LIR_Opr   _tmp3;
1829   LIR_Opr   _tmp4;
1830   LIR_Opr   _tmp5;
1831   LIR_Condition _condition;
1832 
1833  public:
1834   LIR_Op4(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr opr4,
1835           LIR_Opr result, BasicType type)
1836     : LIR_Op(code, result, NULL)
1837     , _opr1(opr1)
1838     , _opr2(opr2)
1839     , _opr3(opr3)
1840     , _opr4(opr4)
1841     , _type(type)
1842     , _tmp1(LIR_OprFact::illegalOpr)
1843     , _tmp2(LIR_OprFact::illegalOpr)
1844     , _tmp3(LIR_OprFact::illegalOpr)
1845     , _tmp4(LIR_OprFact::illegalOpr)
1846     , _tmp5(LIR_OprFact::illegalOpr)
1847     , _condition(condition) {
1848     assert(code == lir_cmove, "code check");
1849     assert(type != T_ILLEGAL, "cmove should have type");
1850   }
1851 
1852   LIR_Opr in_opr1() const                        { return _opr1; }
1853   LIR_Opr in_opr2() const                        { return _opr2; }
1854   LIR_Opr in_opr3() const                        { return _opr3; }
1855   LIR_Opr in_opr4() const                        { return _opr4; }
1856   BasicType type()  const                        { return _type; }
1857   LIR_Opr tmp1_opr() const                       { return _tmp1; }
1858   LIR_Opr tmp2_opr() const                       { return _tmp2; }
1859   LIR_Opr tmp3_opr() const                       { return _tmp3; }
1860   LIR_Opr tmp4_opr() const                       { return _tmp4; }
1861   LIR_Opr tmp5_opr() const                       { return _tmp5; }
1862 
1863   LIR_Condition condition() const                { return _condition; }
1864   void set_condition(LIR_Condition condition)    { _condition = condition; }
1865 
1866   void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
1867   void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
1868   void set_in_opr3(LIR_Opr opr)                  { _opr3 = opr; }
1869   void set_in_opr4(LIR_Opr opr)                  { _opr4 = opr; }
1870   virtual void emit_code(LIR_Assembler* masm);
1871   virtual LIR_Op4* as_Op4() { return this; }
1872 
1873   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1874 };
1875 #endif
1876 
1877 //--------------------------------
1878 class LabelObj: public CompilationResourceObj {
1879  private:
1880   Label _label;
1881  public:
1882   LabelObj()                                     {}
1883   Label* label()                                 { return &_label; }
1884 };
1885 
1886 
1887 class LIR_OpLock: public LIR_Op {
1888  friend class LIR_OpVisitState;
1889 
1890  private:
1891   LIR_Opr _hdr;
1892   LIR_Opr _obj;
1893   LIR_Opr _lock;
1894   LIR_Opr _scratch;
1895   CodeStub* _stub;
1896  public:
1897   LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1898     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
1899     , _hdr(hdr)
1900     , _obj(obj)
1901     , _lock(lock)
1902     , _scratch(scratch)
1903     , _stub(stub)                      {}
1904 
1905   LIR_Opr hdr_opr() const                        { return _hdr; }
1906   LIR_Opr obj_opr() const                        { return _obj; }
1907   LIR_Opr lock_opr() const                       { return _lock; }
1908   LIR_Opr scratch_opr() const                    { return _scratch; }
1909   CodeStub* stub() const                         { return _stub; }
1910 
1911   virtual void emit_code(LIR_Assembler* masm);
1912   virtual LIR_OpLock* as_OpLock() { return this; }
1913   void print_instr(outputStream* out) const PRODUCT_RETURN;
1914 };
1915 
1916 class LIR_OpLoadKlass: public LIR_Op {
1917   friend class LIR_OpVisitState;
1918 
1919  private:
1920   LIR_Opr _obj;
1921  public:
1922   LIR_OpLoadKlass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info)
1923     : LIR_Op(lir_load_klass, result, info)
1924     , _obj(obj)
1925     {}
1926 
1927   LIR_Opr obj()        const { return _obj;  }
1928 
1929   virtual LIR_OpLoadKlass* as_OpLoadKlass() { return this; }
1930   virtual void emit_code(LIR_Assembler* masm);
1931   void print_instr(outputStream* out) const PRODUCT_RETURN;
1932 };
1933 
1934 class LIR_OpDelay: public LIR_Op {
1935  friend class LIR_OpVisitState;
1936 
1937  private:
1938   LIR_Op* _op;
1939 
1940  public:
1941   LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1942     LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1943     _op(op) {
1944     assert(op->code() == lir_nop, "should be filling with nops");
1945   }
1946   virtual void emit_code(LIR_Assembler* masm);
1947   virtual LIR_OpDelay* as_OpDelay() { return this; }
1948   void print_instr(outputStream* out) const PRODUCT_RETURN;
1949   LIR_Op* delay_op() const { return _op; }
1950   CodeEmitInfo* call_info() const { return info(); }
1951 };
1952 
1953 #ifdef ASSERT
1954 // LIR_OpAssert
1955 class LIR_OpAssert : public LIR_Op2 {
1956  friend class LIR_OpVisitState;
1957 
1958  private:
1959   const char* _msg;
1960   bool        _halt;
1961 
1962  public:
1963   LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1964     : LIR_Op2(lir_assert, condition, opr1, opr2)
1965     , _msg(msg)
1966     , _halt(halt) {
1967   }
1968 
1969   const char* msg() const                        { return _msg; }
1970   bool        halt() const                       { return _halt; }
1971 
1972   virtual void emit_code(LIR_Assembler* masm);
1973   virtual LIR_OpAssert* as_OpAssert()            { return this; }
1974   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1975 };
1976 #endif
1977 
1978 // LIR_OpCompareAndSwap
1979 class LIR_OpCompareAndSwap : public LIR_Op {
1980  friend class LIR_OpVisitState;
1981 
1982  private:
1983   LIR_Opr _addr;
1984   LIR_Opr _cmp_value;
1985   LIR_Opr _new_value;
1986   LIR_Opr _tmp1;
1987   LIR_Opr _tmp2;
1988 
1989  public:
1990   LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1991                        LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1992     : LIR_Op(code, result, NULL)  // no result, no info
1993     , _addr(addr)
1994     , _cmp_value(cmp_value)
1995     , _new_value(new_value)
1996     , _tmp1(t1)
1997     , _tmp2(t2)                                  { }
1998 
1999   LIR_Opr addr()        const                    { return _addr;  }
2000   LIR_Opr cmp_value()   const                    { return _cmp_value; }
2001   LIR_Opr new_value()   const                    { return _new_value; }
2002   LIR_Opr tmp1()        const                    { return _tmp1;      }
2003   LIR_Opr tmp2()        const                    { return _tmp2;      }
2004 
2005   virtual void emit_code(LIR_Assembler* masm);
2006   virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
2007   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2008 };
2009 
2010 // LIR_OpProfileCall
2011 class LIR_OpProfileCall : public LIR_Op {
2012  friend class LIR_OpVisitState;
2013 
2014  private:
2015   ciMethod* _profiled_method;
2016   int       _profiled_bci;
2017   ciMethod* _profiled_callee;
2018   LIR_Opr   _mdo;
2019   LIR_Opr   _recv;
2020   LIR_Opr   _tmp1;
2021   ciKlass*  _known_holder;
2022 
2023  public:
2024   // Destroys recv
2025   LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
2026     : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
2027     , _profiled_method(profiled_method)
2028     , _profiled_bci(profiled_bci)
2029     , _profiled_callee(profiled_callee)
2030     , _mdo(mdo)
2031     , _recv(recv)
2032     , _tmp1(t1)
2033     , _known_holder(known_holder)                { }
2034 
2035   ciMethod* profiled_method() const              { return _profiled_method;  }
2036   int       profiled_bci()    const              { return _profiled_bci;     }
2037   ciMethod* profiled_callee() const              { return _profiled_callee;  }
2038   LIR_Opr   mdo()             const              { return _mdo;              }
2039   LIR_Opr   recv()            const              { return _recv;             }
2040   LIR_Opr   tmp1()            const              { return _tmp1;             }
2041   ciKlass*  known_holder()    const              { return _known_holder;     }
2042 
2043   virtual void emit_code(LIR_Assembler* masm);
2044   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
2045   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2046   bool should_profile_receiver_type() const {
2047     bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
2048     Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
2049     bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !_profiled_callee->can_be_statically_bound()) || bc == Bytecodes::_invokeinterface;
2050     return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
2051   }
2052 };
2053 
2054 // LIR_OpProfileType
2055 class LIR_OpProfileType : public LIR_Op {
2056  friend class LIR_OpVisitState;
2057 
2058  private:
2059   LIR_Opr      _mdp;
2060   LIR_Opr      _obj;
2061   LIR_Opr      _tmp;
2062   ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
2063   intptr_t     _current_klass; // what the profiling currently reports
2064   bool         _not_null;      // true if we know statically that _obj cannot be null
2065   bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
2066                                // _exact_klass it the only possible type for this parameter in any context.
2067 
2068  public:
2069   // Destroys recv
2070   LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
2071     : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
2072     , _mdp(mdp)
2073     , _obj(obj)
2074     , _tmp(tmp)
2075     , _exact_klass(exact_klass)
2076     , _current_klass(current_klass)
2077     , _not_null(not_null)
2078     , _no_conflict(no_conflict) { }
2079 
2080   LIR_Opr      mdp()              const             { return _mdp;              }
2081   LIR_Opr      obj()              const             { return _obj;              }
2082   LIR_Opr      tmp()              const             { return _tmp;              }
2083   ciKlass*     exact_klass()      const             { return _exact_klass;      }
2084   intptr_t     current_klass()    const             { return _current_klass;    }
2085   bool         not_null()         const             { return _not_null;         }
2086   bool         no_conflict()      const             { return _no_conflict;      }
2087 
2088   virtual void emit_code(LIR_Assembler* masm);
2089   virtual LIR_OpProfileType* as_OpProfileType() { return this; }
2090   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
2091 };
2092 
2093 class LIR_InsertionBuffer;
2094 
2095 //--------------------------------LIR_List---------------------------------------------------
2096 // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
2097 // The LIR instructions are appended by the LIR_List class itself;
2098 //
2099 // Notes:
2100 // - all offsets are(should be) in bytes
2101 // - local positions are specified with an offset, with offset 0 being local 0
2102 
2103 class LIR_List: public CompilationResourceObj {
2104  private:
2105   LIR_OpList  _operations;
2106 
2107   Compilation*  _compilation;
2108 #ifndef PRODUCT
2109   BlockBegin*   _block;
2110 #endif
2111 #ifdef ASSERT
2112   const char *  _file;
2113   int           _line;
2114 #endif
2115 #ifdef RISCV
2116   LIR_Opr       _cmp_opr1;
2117   LIR_Opr       _cmp_opr2;
2118 #endif
2119 
2120  public:
2121   void append(LIR_Op* op) {
2122     if (op->source() == NULL)
2123       op->set_source(_compilation->current_instruction());
2124 #ifndef PRODUCT
2125     if (PrintIRWithLIR) {
2126       _compilation->maybe_print_current_instruction();
2127       op->print(); tty->cr();
2128     }
2129 #endif // PRODUCT
2130 
2131 #ifdef RISCV
2132     set_cmp_oprs(op);
2133     // lir_cmp set cmp oprs only on riscv
2134     if (op->code() == lir_cmp) return;
2135 #endif
2136 
2137     _operations.append(op);
2138 
2139 #ifdef ASSERT
2140     op->verify();
2141     op->set_file_and_line(_file, _line);
2142     _file = NULL;
2143     _line = 0;
2144 #endif
2145   }
2146 
2147   LIR_List(Compilation* compilation, BlockBegin* block = NULL);
2148 
2149 #ifdef ASSERT
2150   void set_file_and_line(const char * file, int line);
2151 #endif
2152 
2153 #ifdef RISCV
2154   void set_cmp_oprs(LIR_Op* op);
2155 #endif
2156 
2157   //---------- accessors ---------------
2158   LIR_OpList* instructions_list()                { return &_operations; }
2159   int         length() const                     { return _operations.length(); }
2160   LIR_Op*     at(int i) const                    { return _operations.at(i); }
2161 
2162   NOT_PRODUCT(BlockBegin* block() const          { return _block; });
2163 
2164   // insert LIR_Ops in buffer to right places in LIR_List
2165   void append(LIR_InsertionBuffer* buffer);
2166 
2167   //---------- mutators ---------------
2168   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
2169   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
2170   void remove_at(int i)                          { _operations.remove_at(i); }
2171 
2172   //---------- printing -------------
2173   void print_instructions() PRODUCT_RETURN;
2174 
2175 
2176   //---------- instructions -------------
2177   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2178                         address dest, LIR_OprList* arguments,
2179                         CodeEmitInfo* info) {
2180     append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2181   }
2182   void call_static(ciMethod* method, LIR_Opr result,
2183                    address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2184     append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2185   }
2186   void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2187                       address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2188     append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2189   }
2190   void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2191                     address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2192     append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2193   }
2194 
2195   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
2196   void membar()                                  { append(new LIR_Op0(lir_membar)); }
2197   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
2198   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
2199   void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
2200   void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
2201   void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
2202   void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
2203 
2204   void nop()                                     { append(new LIR_Op0(lir_nop)); }
2205 
2206   void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
2207   void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2208 
2209   void on_spin_wait()                            { append(new LIR_Op0(lir_on_spin_wait)); }
2210 
2211   void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
2212 
2213   void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
2214 
2215   // result is a stack location for old backend and vreg for UseLinearScan
2216   // stack_loc_temp is an illegal register for old backend
2217   void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2218   void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2219   void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2220   void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2221   void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2222   void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2223   void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2224   void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
2225     if (UseCompressedOops) {
2226       append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2227     } else {
2228       move(src, dst, info);
2229     }
2230   }
2231   void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2232     if (UseCompressedOops) {
2233       append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2234     } else {
2235       move(src, dst, info);
2236     }
2237   }
2238   void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2239 
2240   void oop2reg  (jobject o, LIR_Opr reg)         { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
2241   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2242 
2243   void metadata2reg  (Metadata* o, LIR_Opr reg)  { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg));   }
2244   void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2245 
2246   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2247   void return_op(LIR_Opr result)                   { append(new LIR_OpReturn(result)); }
2248 
2249   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2250 
2251   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
2252   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
2253   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
2254 
2255   void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
2256   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2257     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2258   }
2259   void unwind_exception(LIR_Opr exceptionOop) {
2260     append(new LIR_Op1(lir_unwind, exceptionOop));
2261   }
2262 
2263   void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
2264   void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
2265 
2266   void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2267     append(new LIR_Op2(lir_cmp, condition, left, right, info));
2268   }
2269   void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2270     cmp(condition, left, LIR_OprFact::intConst(right), info);
2271   }
2272 
2273   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2274   void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2275 
2276 #ifdef RISCV
2277   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type,
2278              LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr) {
2279     append(new LIR_Op4(lir_cmove, condition, src1, src2, cmp_opr1, cmp_opr2, dst, type));
2280   }
2281 #else
2282   void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2283     append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2284   }
2285 #endif
2286 
2287   void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2288                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2289   void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2290                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2291   void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2292                LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2293 
2294   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2295   void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr)              { append(new LIR_Op2(lir_neg, from, tmp, to)); }
2296   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2297   void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
2298   void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }
2299   void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2300   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2301 
2302   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
2303   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2304   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2305   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul, left, right, res, tmp)); }
2306   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
2307   void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div, left, right, res, tmp)); }
2308   void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2309 
2310   void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2311   void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2312 
2313   void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2314 
2315   void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2316   void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2317   void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2318   void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2319   void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2320 
2321   void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2322   void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2323   void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2324   void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2325 
2326   void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2327   void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2328 
2329   // jump is an unconditional branch
2330   void jump(BlockBegin* block) {
2331     append(new LIR_OpBranch(lir_cond_always, block));
2332   }
2333   void jump(CodeStub* stub) {
2334     append(new LIR_OpBranch(lir_cond_always, stub));
2335   }
2336   void branch(LIR_Condition cond, Label* lbl) {
2337     append(new LIR_OpBranch(cond, lbl));
2338   }
2339   // Should not be used for fp comparisons
2340   void branch(LIR_Condition cond, BlockBegin* block) {
2341     append(new LIR_OpBranch(cond, block));
2342   }
2343   // Should not be used for fp comparisons
2344   void branch(LIR_Condition cond, CodeStub* stub) {
2345     append(new LIR_OpBranch(cond, stub));
2346   }
2347   // Should only be used for fp comparisons
2348   void branch(LIR_Condition cond, BlockBegin* block, BlockBegin* unordered) {
2349     append(new LIR_OpBranch(cond, block, unordered));
2350   }
2351 
2352   void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2353   void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2354   void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2355 
2356   void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2357   void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2358   void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2359 
2360   void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
2361   void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2362 
2363   void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2364     append(new LIR_OpRTCall(routine, tmp, result, arguments));
2365   }
2366 
2367   void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2368                     LIR_OprList* arguments, CodeEmitInfo* info) {
2369     append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2370   }
2371 
2372   void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2373   void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2374   void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2375 
2376   void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
2377 
2378   void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2379 
2380   void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)  { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2381 
2382   void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2383   void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2384 
2385   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2386                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2387                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2388                   ciMethod* profiled_method, int profiled_bci);
2389   // MethodData* profiling
2390   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2391     append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2392   }
2393   void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2394     append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2395   }
2396 
2397   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2398   void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2399 
2400   void load_klass(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info) { append(new LIR_OpLoadKlass(obj, result, info)); }
2401 
2402 #ifdef ASSERT
2403   void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2404 #endif
2405 };
2406 
2407 void print_LIR(BlockList* blocks);
2408 
2409 class LIR_InsertionBuffer : public CompilationResourceObj {
2410  private:
2411   LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2412 
2413   // list of insertion points. index and count are stored alternately:
2414   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
2415   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2416   intStack    _index_and_count;
2417 
2418   // the LIR_Ops to be inserted
2419   LIR_OpList  _ops;
2420 
2421   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
2422   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
2423   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
2424 
2425 #ifdef ASSERT
2426   void verify();
2427 #endif
2428  public:
2429   LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2430 
2431   // must be called before using the insertion buffer
2432   void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2433   bool initialized() const  { return _lir != NULL; }
2434   // called automatically when the buffer is appended to the LIR_List
2435   void finish()             { _lir = NULL; }
2436 
2437   // accessors
2438   LIR_List*  lir_list() const             { return _lir; }
2439   int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
2440   int index_at(int i) const               { return _index_and_count.at((i << 1));     }
2441   int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
2442 
2443   int number_of_ops() const               { return _ops.length(); }
2444   LIR_Op* op_at(int i) const              { return _ops.at(i); }
2445 
2446   // append an instruction to the buffer
2447   void append(int index, LIR_Op* op);
2448 
2449   // instruction
2450   void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2451 };
2452 
2453 
2454 //
2455 // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2456 // Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2457 // information about the input, output and temporaries used by the
2458 // op to be recorded.  It also records whether the op has call semantics
2459 // and also records all the CodeEmitInfos used by this op.
2460 //
2461 
2462 
2463 class LIR_OpVisitState: public StackObj {
2464  public:
2465   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2466 
2467   enum {
2468     maxNumberOfOperands = 21,
2469     maxNumberOfInfos = 4
2470   };
2471 
2472  private:
2473   LIR_Op*          _op;
2474 
2475   // optimization: the operands and infos are not stored in a variable-length
2476   //               list, but in a fixed-size array to save time of size checks and resizing
2477   int              _oprs_len[numModes];
2478   LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
2479   int _info_len;
2480   CodeEmitInfo*    _info_new[maxNumberOfInfos];
2481 
2482   bool             _has_call;
2483   bool             _has_slow_case;
2484 
2485 
2486   // only include register operands
2487   // addresses are decomposed to the base and index registers
2488   // constants and stack operands are ignored
2489   void append(LIR_Opr& opr, OprMode mode) {
2490     assert(opr->is_valid(), "should not call this otherwise");
2491     assert(mode >= 0 && mode < numModes, "bad mode");
2492 
2493     if (opr->is_register()) {
2494        assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2495       _oprs_new[mode][_oprs_len[mode]++] = &opr;
2496 
2497     } else if (opr->is_pointer()) {
2498       LIR_Address* address = opr->as_address_ptr();
2499       if (address != NULL) {
2500         // special handling for addresses: add base and index register of the address
2501         // both are always input operands or temp if we want to extend
2502         // their liveness!
2503         if (mode == outputMode) {
2504           mode = inputMode;
2505         }
2506         assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2507         if (address->_base->is_valid()) {
2508           assert(address->_base->is_register(), "must be");
2509           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2510           _oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2511         }
2512         if (address->_index->is_valid()) {
2513           assert(address->_index->is_register(), "must be");
2514           assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2515           _oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2516         }
2517 
2518       } else {
2519         assert(opr->is_constant(), "constant operands are not processed");
2520       }
2521     } else {
2522       assert(opr->is_stack(), "stack operands are not processed");
2523     }
2524   }
2525 
2526   void append(CodeEmitInfo* info) {
2527     assert(info != NULL, "should not call this otherwise");
2528     assert(_info_len < maxNumberOfInfos, "array overflow");
2529     _info_new[_info_len++] = info;
2530   }
2531 
2532  public:
2533   LIR_OpVisitState()         { reset(); }
2534 
2535   LIR_Op* op() const         { return _op; }
2536   void set_op(LIR_Op* op)    { reset(); _op = op; }
2537 
2538   bool has_call() const      { return _has_call; }
2539   bool has_slow_case() const { return _has_slow_case; }
2540 
2541   void reset() {
2542     _op = NULL;
2543     _has_call = false;
2544     _has_slow_case = false;
2545 
2546     _oprs_len[inputMode] = 0;
2547     _oprs_len[tempMode] = 0;
2548     _oprs_len[outputMode] = 0;
2549     _info_len = 0;
2550   }
2551 
2552 
2553   int opr_count(OprMode mode) const {
2554     assert(mode >= 0 && mode < numModes, "bad mode");
2555     return _oprs_len[mode];
2556   }
2557 
2558   LIR_Opr opr_at(OprMode mode, int index) const {
2559     assert(mode >= 0 && mode < numModes, "bad mode");
2560     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2561     return *_oprs_new[mode][index];
2562   }
2563 
2564   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2565     assert(mode >= 0 && mode < numModes, "bad mode");
2566     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2567     *_oprs_new[mode][index] = opr;
2568   }
2569 
2570   int info_count() const {
2571     return _info_len;
2572   }
2573 
2574   CodeEmitInfo* info_at(int index) const {
2575     assert(index < _info_len, "index out of bounds");
2576     return _info_new[index];
2577   }
2578 
2579   XHandlers* all_xhandler();
2580 
2581   // collects all register operands of the instruction
2582   void visit(LIR_Op* op);
2583 
2584 #ifdef ASSERT
2585   // check that an operation has no operands
2586   bool no_operands(LIR_Op* op);
2587 #endif
2588 
2589   // LIR_Op visitor functions use these to fill in the state
2590   void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
2591   void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
2592   void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
2593   void do_info(CodeEmitInfo* info)        { append(info); }
2594 
2595   void do_stub(CodeStub* stub);
2596   void do_call()                          { _has_call = true; }
2597   void do_slow_case()                     { _has_slow_case = true; }
2598   void do_slow_case(CodeEmitInfo* info) {
2599     _has_slow_case = true;
2600     append(info);
2601   }
2602 };
2603 
2604 
2605 inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
2606 
2607 #endif // SHARE_C1_C1_LIR_HPP