1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef SHARE_OPTO_MEMNODE_HPP
  27 #define SHARE_OPTO_MEMNODE_HPP
  28 
  29 #include "opto/multnode.hpp"
  30 #include "opto/node.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/type.hpp"
  33 
  34 // Portions of code courtesy of Clifford Click
  35 
  36 class MultiNode;
  37 class PhaseCCP;
  38 class PhaseTransform;
  39 
  40 //------------------------------MemNode----------------------------------------
  41 // Load or Store, possibly throwing a null pointer exception
  42 class MemNode : public Node {
  43 private:
  44   bool _unaligned_access; // Unaligned access from unsafe
  45   bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
  46   bool _unsafe_access;     // Access of unsafe origin.
  47   uint8_t _barrier_data;   // Bit field with barrier information
  48 
  49 protected:
  50 #ifdef ASSERT
  51   const TypePtr* _adr_type;     // What kind of memory is being addressed?
  52 #endif
  53   virtual uint size_of() const;
  54 public:
  55   enum { Control,               // When is it safe to do this load?
  56          Memory,                // Chunk of memory is being loaded from
  57          Address,               // Actually address, derived from base
  58          ValueIn                // Value to store
  59   };
  60   typedef enum { unordered = 0,
  61                  acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
  62                  release,       // Store has to release or be preceded by MemBarRelease.
  63                  seqcst,        // LoadStore has to have both acquire and release semantics.
  64                  unset          // The memory ordering is not set (used for testing)
  65   } MemOrd;
  66 protected:
  67   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) :
  68       Node(c0,c1,c2),
  69       _unaligned_access(false),
  70       _mismatched_access(false),
  71       _unsafe_access(false),
  72       _barrier_data(0) {
  73     init_class_id(Class_Mem);
  74     debug_only(_adr_type=at; adr_type();)
  75   }
  76   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
  77       Node(c0,c1,c2,c3),
  78       _unaligned_access(false),
  79       _mismatched_access(false),
  80       _unsafe_access(false),
  81       _barrier_data(0) {
  82     init_class_id(Class_Mem);
  83     debug_only(_adr_type=at; adr_type();)
  84   }
  85   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
  86       Node(c0,c1,c2,c3,c4),
  87       _unaligned_access(false),
  88       _mismatched_access(false),
  89       _unsafe_access(false),
  90       _barrier_data(0) {
  91     init_class_id(Class_Mem);
  92     debug_only(_adr_type=at; adr_type();)
  93   }
  94 
  95   virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
  96   ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const;
  97   static bool check_if_adr_maybe_raw(Node* adr);
  98 
  99 public:
 100   // Helpers for the optimizer.  Documented in memnode.cpp.
 101   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
 102                                       Node* p2, AllocateNode* a2,
 103                                       PhaseTransform* phase);
 104   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
 105 
 106   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
 107   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
 108   // The following two should probably be phase-specific functions:
 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 
 129 #ifdef ASSERT
 130   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 131 #endif
 132 
 133   // Return the barrier data of n, if available, or 0 otherwise.
 134   static uint8_t barrier_data(const Node* n);
 135 
 136   // Map a load or store opcode to its corresponding store opcode.
 137   // (Return -1 if unknown.)
 138   virtual int store_Opcode() const { return -1; }
 139 
 140   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 141   virtual BasicType memory_type() const = 0;
 142   virtual int memory_size() const {
 143 #ifdef ASSERT
 144     return type2aelembytes(memory_type(), true);
 145 #else
 146     return type2aelembytes(memory_type());
 147 #endif
 148   }
 149 
 150   uint8_t barrier_data() { return _barrier_data; }
 151   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 152 
 153   // Search through memory states which precede this node (load or store).
 154   // Look for an exact match for the address, with no intervening
 155   // aliased stores.
 156   Node* find_previous_store(PhaseValues* phase);
 157 
 158   // Can this node (load or store) accurately see a stored value in
 159   // the given memory state?  (The state may or may not be in(Memory).)
 160   Node* can_see_stored_value(Node* st, PhaseValues* phase) const;
 161 
 162   void set_unaligned_access() { _unaligned_access = true; }
 163   bool is_unaligned_access() const { return _unaligned_access; }
 164   void set_mismatched_access() { _mismatched_access = true; }
 165   bool is_mismatched_access() const { return _mismatched_access; }
 166   void set_unsafe_access() { _unsafe_access = true; }
 167   bool is_unsafe_access() const { return _unsafe_access; }
 168 
 169 #ifndef PRODUCT
 170   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
 171   virtual void dump_spec(outputStream *st) const;
 172 #endif
 173 };
 174 
 175 //------------------------------LoadNode---------------------------------------
 176 // Load value; requires Memory and Address
 177 class LoadNode : public MemNode {
 178 public:
 179   // Some loads (from unsafe) should be pinned: they don't depend only
 180   // on the dominating test.  The field _control_dependency below records
 181   // whether that node depends only on the dominating test.
 182   // Pinned and UnknownControl are similar, but differ in that Pinned
 183   // loads are not allowed to float across safepoints, whereas UnknownControl
 184   // loads are allowed to do that. Therefore, Pinned is stricter.
 185   enum ControlDependency {
 186     Pinned,
 187     UnknownControl,
 188     DependsOnlyOnTest
 189   };
 190 
 191 private:
 192   // LoadNode::hash() doesn't take the _control_dependency field
 193   // into account: If the graph already has a non-pinned LoadNode and
 194   // we add a pinned LoadNode with the same inputs, it's safe for GVN
 195   // to replace the pinned LoadNode with the non-pinned LoadNode,
 196   // otherwise it wouldn't be safe to have a non pinned LoadNode with
 197   // those inputs in the first place. If the graph already has a
 198   // pinned LoadNode and we add a non pinned LoadNode with the same
 199   // inputs, it's safe (but suboptimal) for GVN to replace the
 200   // non-pinned LoadNode by the pinned LoadNode.
 201   ControlDependency _control_dependency;
 202 
 203   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 204   // loads that can be reordered, and such requiring acquire semantics to
 205   // adhere to the Java specification.  The required behaviour is stored in
 206   // this field.
 207   const MemOrd _mo;
 208 
 209   AllocateNode* is_new_object_mark_load() const;
 210 
 211 protected:
 212   virtual bool cmp(const Node &n) const;
 213   virtual uint size_of() const; // Size is bigger
 214   // Should LoadNode::Ideal() attempt to remove control edges?
 215   virtual bool can_remove_control() const;
 216   const Type* const _type;      // What kind of value is loaded?
 217 
 218   virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
 219 public:
 220 
 221   LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
 222     : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
 223     init_class_id(Class_Load);
 224   }
 225   inline bool is_unordered() const { return !is_acquire(); }
 226   inline bool is_acquire() const {
 227     assert(_mo == unordered || _mo == acquire, "unexpected");
 228     return _mo == acquire;
 229   }
 230   inline bool is_unsigned() const {
 231     int lop = Opcode();
 232     return (lop == Op_LoadUB) || (lop == Op_LoadUS);
 233   }
 234 
 235   // Polymorphic factory method:
 236   static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
 237                     const TypePtr* at, const Type* rt, BasicType bt,
 238                     MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
 239                     bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
 240                     uint8_t barrier_data = 0);
 241 
 242   virtual uint hash()   const;  // Check the type
 243 
 244   // Handle algebraic identities here.  If we have an identity, return the Node
 245   // we are equivalent to.  We look for Load of a Store.
 246   virtual Node* Identity(PhaseGVN* phase);
 247 
 248   // If the load is from Field memory and the pointer is non-null, it might be possible to
 249   // zero out the control input.
 250   // If the offset is constant and the base is an object allocation,
 251   // try to hook me up to the exact initializing store.
 252   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 253 
 254   // Return true if it's possible to split the Load through a Phi merging the bases
 255   bool can_split_through_phi_base(PhaseGVN *phase);
 256 
 257   // Split instance field load through Phi.
 258   Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
 259 
 260   // Recover original value from boxed values
 261   Node *eliminate_autobox(PhaseIterGVN *igvn);
 262 
 263   // Compute a new Type for this node.  Basically we just do the pre-check,
 264   // then call the virtual add() to set the type.
 265   virtual const Type* Value(PhaseGVN* phase) const;
 266 
 267   // Common methods for LoadKlass and LoadNKlass nodes.
 268   const Type* klass_value_common(PhaseGVN* phase, bool fold_for_arrays) const;
 269   Node* klass_identity_common(PhaseGVN* phase);
 270 
 271   virtual uint ideal_reg() const;
 272   virtual const Type *bottom_type() const;
 273   // Following method is copied from TypeNode:
 274   void set_type(const Type* t) {
 275     assert(t != nullptr, "sanity");
 276     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 277     *(const Type**)&_type = t;   // cast away const-ness
 278     // If this node is in the hash table, make sure it doesn't need a rehash.
 279     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 280   }
 281   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
 282 
 283   // Do not match memory edge
 284   virtual uint match_edge(uint idx) const;
 285 
 286   // Map a load opcode to its corresponding store opcode.
 287   virtual int store_Opcode() const = 0;
 288 
 289   // Check if the load's memory input is a Phi node with the same control.
 290   bool is_instance_field_load_with_local_phi(Node* ctrl);
 291 
 292   Node* convert_to_unsigned_load(PhaseGVN& gvn);
 293   Node* convert_to_signed_load(PhaseGVN& gvn);
 294 
 295   bool  has_reinterpret_variant(const Type* rt);
 296   Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt);
 297 
 298   ControlDependency control_dependency() const { return _control_dependency; }
 299   bool has_unknown_control_dependency() const  { return _control_dependency == UnknownControl; }
 300   bool has_pinned_control_dependency() const   { return _control_dependency == Pinned; }
 301 
 302   LoadNode* pin_array_access_node() const;
 303 
 304 #ifndef PRODUCT
 305   virtual void dump_spec(outputStream *st) const;
 306 #endif
 307 #ifdef ASSERT
 308   // Helper function to allow a raw load without control edge for some cases
 309   static bool is_immutable_value(Node* adr);
 310 #endif
 311 protected:
 312   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 313                                      ciKlass* klass) const;
 314 
 315   Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
 316 
 317   // depends_only_on_test is almost always true, and needs to be almost always
 318   // true to enable key hoisting & commoning optimizations.  However, for the
 319   // special case of RawPtr loads from TLS top & end, and other loads performed by
 320   // GC barriers, the control edge carries the dependence preventing hoisting past
 321   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 322   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 323   // which produce results (new raw memory state) inside of loops preventing all
 324   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 325   // See comment in macro.cpp, around line 125 expand_allocate_common().
 326   virtual bool depends_only_on_test() const {
 327     return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
 328   }
 329 
 330   LoadNode* clone_pinned() const;
 331 };
 332 
 333 //------------------------------LoadBNode--------------------------------------
 334 // Load a byte (8bits signed) from memory
 335 class LoadBNode : public LoadNode {
 336 public:
 337   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 338     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 339   virtual int Opcode() const;
 340   virtual uint ideal_reg() const { return Op_RegI; }
 341   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 342   virtual const Type* Value(PhaseGVN* phase) const;
 343   virtual int store_Opcode() const { return Op_StoreB; }
 344   virtual BasicType memory_type() const { return T_BYTE; }
 345 };
 346 
 347 //------------------------------LoadUBNode-------------------------------------
 348 // Load a unsigned byte (8bits unsigned) from memory
 349 class LoadUBNode : public LoadNode {
 350 public:
 351   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 352     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 353   virtual int Opcode() const;
 354   virtual uint ideal_reg() const { return Op_RegI; }
 355   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
 356   virtual const Type* Value(PhaseGVN* phase) const;
 357   virtual int store_Opcode() const { return Op_StoreB; }
 358   virtual BasicType memory_type() const { return T_BYTE; }
 359 };
 360 
 361 //------------------------------LoadUSNode-------------------------------------
 362 // Load an unsigned short/char (16bits unsigned) from memory
 363 class LoadUSNode : public LoadNode {
 364 public:
 365   LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 366     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 367   virtual int Opcode() const;
 368   virtual uint ideal_reg() const { return Op_RegI; }
 369   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 370   virtual const Type* Value(PhaseGVN* phase) const;
 371   virtual int store_Opcode() const { return Op_StoreC; }
 372   virtual BasicType memory_type() const { return T_CHAR; }
 373 };
 374 
 375 //------------------------------LoadSNode--------------------------------------
 376 // Load a short (16bits signed) from memory
 377 class LoadSNode : public LoadNode {
 378 public:
 379   LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 380     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 381   virtual int Opcode() const;
 382   virtual uint ideal_reg() const { return Op_RegI; }
 383   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 384   virtual const Type* Value(PhaseGVN* phase) const;
 385   virtual int store_Opcode() const { return Op_StoreC; }
 386   virtual BasicType memory_type() const { return T_SHORT; }
 387 };
 388 
 389 //------------------------------LoadINode--------------------------------------
 390 // Load an integer from memory
 391 class LoadINode : public LoadNode {
 392 public:
 393   LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 394     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 395   virtual int Opcode() const;
 396   virtual uint ideal_reg() const { return Op_RegI; }
 397   virtual int store_Opcode() const { return Op_StoreI; }
 398   virtual BasicType memory_type() const { return T_INT; }
 399 };
 400 
 401 //------------------------------LoadRangeNode----------------------------------
 402 // Load an array length from the array
 403 class LoadRangeNode : public LoadINode {
 404 public:
 405   LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
 406     : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
 407   virtual int Opcode() const;
 408   virtual const Type* Value(PhaseGVN* phase) const;
 409   virtual Node* Identity(PhaseGVN* phase);
 410   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 411 };
 412 
 413 //------------------------------LoadLNode--------------------------------------
 414 // Load a long from memory
 415 class LoadLNode : public LoadNode {
 416   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 417   virtual bool cmp( const Node &n ) const {
 418     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
 419       && LoadNode::cmp(n);
 420   }
 421   virtual uint size_of() const { return sizeof(*this); }
 422   const bool _require_atomic_access;  // is piecewise load forbidden?
 423 
 424 public:
 425   LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
 426             MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
 427     : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
 428   virtual int Opcode() const;
 429   virtual uint ideal_reg() const { return Op_RegL; }
 430   virtual int store_Opcode() const { return Op_StoreL; }
 431   virtual BasicType memory_type() const { return T_LONG; }
 432   bool require_atomic_access() const { return _require_atomic_access; }
 433 
 434 #ifndef PRODUCT
 435   virtual void dump_spec(outputStream *st) const {
 436     LoadNode::dump_spec(st);
 437     if (_require_atomic_access)  st->print(" Atomic!");
 438   }
 439 #endif
 440 };
 441 
 442 //------------------------------LoadL_unalignedNode----------------------------
 443 // Load a long from unaligned memory
 444 class LoadL_unalignedNode : public LoadLNode {
 445 public:
 446   LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 447     : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
 448   virtual int Opcode() const;
 449 };
 450 
 451 //------------------------------LoadFNode--------------------------------------
 452 // Load a float (64 bits) from memory
 453 class LoadFNode : public LoadNode {
 454 public:
 455   LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 456     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 457   virtual int Opcode() const;
 458   virtual uint ideal_reg() const { return Op_RegF; }
 459   virtual int store_Opcode() const { return Op_StoreF; }
 460   virtual BasicType memory_type() const { return T_FLOAT; }
 461 };
 462 
 463 //------------------------------LoadDNode--------------------------------------
 464 // Load a double (64 bits) from memory
 465 class LoadDNode : public LoadNode {
 466   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 467   virtual bool cmp( const Node &n ) const {
 468     return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
 469       && LoadNode::cmp(n);
 470   }
 471   virtual uint size_of() const { return sizeof(*this); }
 472   const bool _require_atomic_access;  // is piecewise load forbidden?
 473 
 474 public:
 475   LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
 476             MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
 477     : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
 478   virtual int Opcode() const;
 479   virtual uint ideal_reg() const { return Op_RegD; }
 480   virtual int store_Opcode() const { return Op_StoreD; }
 481   virtual BasicType memory_type() const { return T_DOUBLE; }
 482   bool require_atomic_access() const { return _require_atomic_access; }
 483 
 484 #ifndef PRODUCT
 485   virtual void dump_spec(outputStream *st) const {
 486     LoadNode::dump_spec(st);
 487     if (_require_atomic_access)  st->print(" Atomic!");
 488   }
 489 #endif
 490 };
 491 
 492 //------------------------------LoadD_unalignedNode----------------------------
 493 // Load a double from unaligned memory
 494 class LoadD_unalignedNode : public LoadDNode {
 495 public:
 496   LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 497     : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
 498   virtual int Opcode() const;
 499 };
 500 
 501 //------------------------------LoadPNode--------------------------------------
 502 // Load a pointer from memory (either object or array)
 503 class LoadPNode : public LoadNode {
 504 public:
 505   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 506     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 507   virtual int Opcode() const;
 508   virtual uint ideal_reg() const { return Op_RegP; }
 509   virtual int store_Opcode() const { return Op_StoreP; }
 510   virtual BasicType memory_type() const { return T_ADDRESS; }
 511 };
 512 
 513 
 514 //------------------------------LoadNNode--------------------------------------
 515 // Load a narrow oop from memory (either object or array)
 516 class LoadNNode : public LoadNode {
 517 public:
 518   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 519     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 520   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 521   virtual int Opcode() const;
 522   virtual uint ideal_reg() const { return Op_RegN; }
 523   virtual int store_Opcode() const { return Op_StoreN; }
 524   virtual BasicType memory_type() const { return T_NARROWOOP; }
 525 };
 526 
 527 //------------------------------LoadKlassNode----------------------------------
 528 // Load a Klass from an object
 529 class LoadKlassNode : public LoadPNode {
 530   bool _fold_for_arrays;
 531 
 532   virtual uint size_of() const { return sizeof(*this); }
 533   virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
 534   virtual bool cmp( const Node &n ) const {
 535     return _fold_for_arrays == ((LoadKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
 536   }
 537 
 538 private:
 539   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo, bool fold_for_arrays)
 540     : LoadPNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
 541 
 542 public:
 543   virtual int Opcode() const;
 544   virtual const Type* Value(PhaseGVN* phase) const;
 545   virtual Node* Identity(PhaseGVN* phase);
 546   virtual bool depends_only_on_test() const { return true; }
 547 
 548   // Polymorphic factory method:
 549   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
 550                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT, bool fold_for_arrays = true);
 551 };
 552 
 553 //------------------------------LoadNKlassNode---------------------------------
 554 // Load a narrow Klass from an object.
 555 // With compact headers, the input address (adr) does not point at the exact
 556 // header position where the (narrow) class pointer is located, but into the
 557 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 558 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 559 // extract the actual class pointer. C2's type system is agnostic on whether the
 560 // input address directly points into the class pointer.
 561 class LoadNKlassNode : public LoadNNode {
 562   bool _fold_for_arrays;
 563 
 564   virtual uint size_of() const { return sizeof(*this); }
 565   virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
 566   virtual bool cmp( const Node &n ) const {
 567     return _fold_for_arrays == ((LoadNKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
 568   }
 569 
 570 private:
 571   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*, bool fold_for_arrays);
 572   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo, bool fold_for_arrays)
 573     : LoadNNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
 574 
 575 public:
 576   virtual int Opcode() const;
 577   virtual uint ideal_reg() const { return Op_RegN; }
 578   virtual int store_Opcode() const { return Op_StoreNKlass; }
 579   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 580 
 581   virtual const Type* Value(PhaseGVN* phase) const;
 582   virtual Node* Identity(PhaseGVN* phase);
 583   virtual bool depends_only_on_test() const { return true; }
 584 };
 585 
 586 //------------------------------StoreNode--------------------------------------
 587 // Store value; requires Store, Address and Value
 588 class StoreNode : public MemNode {
 589 private:
 590   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 591   // stores that can be reordered, and such requiring release semantics to
 592   // adhere to the Java specification.  The required behaviour is stored in
 593   // this field.
 594   const MemOrd _mo;
 595   // Needed for proper cloning.
 596   virtual uint size_of() const { return sizeof(*this); }
 597 protected:
 598   virtual bool cmp( const Node &n ) const;
 599   virtual bool depends_only_on_test() const { return false; }
 600 
 601   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 602   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 603 
 604 public:
 605   // We must ensure that stores of object references will be visible
 606   // only after the object's initialization. So the callers of this
 607   // procedure must indicate that the store requires `release'
 608   // semantics, if the stored value is an object reference that might
 609   // point to a new object and may become externally visible.
 610   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 611     : MemNode(c, mem, adr, at, val), _mo(mo) {
 612     init_class_id(Class_Store);
 613   }
 614   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
 615     : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
 616     init_class_id(Class_Store);
 617   }
 618 
 619   inline bool is_unordered() const { return !is_release(); }
 620   inline bool is_release() const {
 621     assert((_mo == unordered || _mo == release), "unexpected");
 622     return _mo == release;
 623   }
 624 
 625   // Conservatively release stores of object references in order to
 626   // ensure visibility of object initialization.
 627   static inline MemOrd release_if_reference(const BasicType t) {
 628 #ifdef AARCH64
 629     // AArch64 doesn't need a release store here because object
 630     // initialization contains the necessary barriers.
 631     return unordered;
 632 #else
 633     const MemOrd mo = (t == T_ARRAY ||
 634                        t == T_ADDRESS || // Might be the address of an object reference (`boxing').
 635                        t == T_OBJECT) ? release : unordered;
 636     return mo;
 637 #endif
 638   }
 639 
 640   // Polymorphic factory method
 641   //
 642   // We must ensure that stores of object references will be visible
 643   // only after the object's initialization. So the callers of this
 644   // procedure must indicate that the store requires `release'
 645   // semantics, if the stored value is an object reference that might
 646   // point to a new object and may become externally visible.
 647   static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
 648                          const TypePtr* at, Node* val, BasicType bt,
 649                          MemOrd mo, bool require_atomic_access = false);
 650 
 651   virtual uint hash() const;    // Check the type
 652 
 653   // If the store is to Field memory and the pointer is non-null, we can
 654   // zero out the control input.
 655   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 656 
 657   // Compute a new Type for this node.  Basically we just do the pre-check,
 658   // then call the virtual add() to set the type.
 659   virtual const Type* Value(PhaseGVN* phase) const;
 660 
 661   // Check for identity function on memory (Load then Store at same address)
 662   virtual Node* Identity(PhaseGVN* phase);
 663 
 664   // Do not match memory edge
 665   virtual uint match_edge(uint idx) const;
 666 
 667   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 668 
 669   // Map a store opcode to its corresponding own opcode, trivially.
 670   virtual int store_Opcode() const { return Opcode(); }
 671 
 672   // have all possible loads of the value stored been optimized away?
 673   bool value_never_loaded(PhaseValues* phase) const;
 674 
 675   bool  has_reinterpret_variant(const Type* vt);
 676   Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt);
 677 
 678   MemBarNode* trailing_membar() const;
 679 };
 680 
 681 //------------------------------StoreBNode-------------------------------------
 682 // Store byte to memory
 683 class StoreBNode : public StoreNode {
 684 public:
 685   StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 686     : StoreNode(c, mem, adr, at, val, mo) {}
 687   virtual int Opcode() const;
 688   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 689   virtual BasicType memory_type() const { return T_BYTE; }
 690 };
 691 
 692 //------------------------------StoreCNode-------------------------------------
 693 // Store char/short to memory
 694 class StoreCNode : public StoreNode {
 695 public:
 696   StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 697     : StoreNode(c, mem, adr, at, val, mo) {}
 698   virtual int Opcode() const;
 699   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 700   virtual BasicType memory_type() const { return T_CHAR; }
 701 };
 702 
 703 //------------------------------StoreINode-------------------------------------
 704 // Store int to memory
 705 class StoreINode : public StoreNode {
 706 public:
 707   StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 708     : StoreNode(c, mem, adr, at, val, mo) {}
 709   virtual int Opcode() const;
 710   virtual BasicType memory_type() const { return T_INT; }
 711 };
 712 
 713 //------------------------------StoreLNode-------------------------------------
 714 // Store long to memory
 715 class StoreLNode : public StoreNode {
 716   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 717   virtual bool cmp( const Node &n ) const {
 718     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
 719       && StoreNode::cmp(n);
 720   }
 721   virtual uint size_of() const { return sizeof(*this); }
 722   const bool _require_atomic_access;  // is piecewise store forbidden?
 723 
 724 public:
 725   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 726     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 727   virtual int Opcode() const;
 728   virtual BasicType memory_type() const { return T_LONG; }
 729   bool require_atomic_access() const { return _require_atomic_access; }
 730 
 731 #ifndef PRODUCT
 732   virtual void dump_spec(outputStream *st) const {
 733     StoreNode::dump_spec(st);
 734     if (_require_atomic_access)  st->print(" Atomic!");
 735   }
 736 #endif
 737 };
 738 
 739 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
 740 class StoreLSpecialNode : public StoreNode {
 741 
 742 public:
 743   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
 744     : StoreNode(c, mem, adr, at, val, mo) {
 745     set_mismatched_access();
 746     if (oop_off != nullptr) {
 747       add_req(oop_off);
 748     }
 749   }
 750   virtual int Opcode() const;
 751   virtual BasicType memory_type() const { return T_LONG; }
 752 
 753   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
 754                                                    idx == MemNode::ValueIn ||
 755                                                    idx == MemNode::ValueIn + 1; }
 756 };
 757 
 758 //------------------------------StoreFNode-------------------------------------
 759 // Store float to memory
 760 class StoreFNode : public StoreNode {
 761 public:
 762   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 763     : StoreNode(c, mem, adr, at, val, mo) {}
 764   virtual int Opcode() const;
 765   virtual BasicType memory_type() const { return T_FLOAT; }
 766 };
 767 
 768 //------------------------------StoreDNode-------------------------------------
 769 // Store double to memory
 770 class StoreDNode : public StoreNode {
 771   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 772   virtual bool cmp( const Node &n ) const {
 773     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 774       && StoreNode::cmp(n);
 775   }
 776   virtual uint size_of() const { return sizeof(*this); }
 777   const bool _require_atomic_access;  // is piecewise store forbidden?
 778 public:
 779   StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
 780              MemOrd mo, bool require_atomic_access = false)
 781     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 782   virtual int Opcode() const;
 783   virtual BasicType memory_type() const { return T_DOUBLE; }
 784   bool require_atomic_access() const { return _require_atomic_access; }
 785 
 786 #ifndef PRODUCT
 787   virtual void dump_spec(outputStream *st) const {
 788     StoreNode::dump_spec(st);
 789     if (_require_atomic_access)  st->print(" Atomic!");
 790   }
 791 #endif
 792 
 793 };
 794 
 795 //------------------------------StorePNode-------------------------------------
 796 // Store pointer to memory
 797 class StorePNode : public StoreNode {
 798 public:
 799   StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 800     : StoreNode(c, mem, adr, at, val, mo) {}
 801   virtual int Opcode() const;
 802   virtual BasicType memory_type() const { return T_ADDRESS; }
 803 };
 804 
 805 //------------------------------StoreNNode-------------------------------------
 806 // Store narrow oop to memory
 807 class StoreNNode : public StoreNode {
 808 public:
 809   StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 810     : StoreNode(c, mem, adr, at, val, mo) {}
 811   virtual int Opcode() const;
 812   virtual BasicType memory_type() const { return T_NARROWOOP; }
 813 };
 814 
 815 //------------------------------StoreNKlassNode--------------------------------------
 816 // Store narrow klass to memory
 817 class StoreNKlassNode : public StoreNNode {
 818 public:
 819   StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 820     : StoreNNode(c, mem, adr, at, val, mo) {}
 821   virtual int Opcode() const;
 822   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 823 };
 824 
 825 //------------------------------SCMemProjNode---------------------------------------
 826 // This class defines a projection of the memory  state of a store conditional node.
 827 // These nodes return a value, but also update memory.
 828 class SCMemProjNode : public ProjNode {
 829 public:
 830   enum {SCMEMPROJCON = (uint)-2};
 831   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
 832   virtual int Opcode() const;
 833   virtual bool      is_CFG() const  { return false; }
 834   virtual const Type *bottom_type() const {return Type::MEMORY;}
 835   virtual const TypePtr *adr_type() const {
 836     Node* ctrl = in(0);
 837     if (ctrl == nullptr)  return nullptr; // node is dead
 838     return ctrl->in(MemNode::Memory)->adr_type();
 839   }
 840   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
 841   virtual const Type* Value(PhaseGVN* phase) const;
 842 #ifndef PRODUCT
 843   virtual void dump_spec(outputStream *st) const {};
 844 #endif
 845 };
 846 
 847 //------------------------------LoadStoreNode---------------------------
 848 // Note: is_Mem() method returns 'true' for this class.
 849 class LoadStoreNode : public Node {
 850 private:
 851   const Type* const _type;      // What kind of value is loaded?
 852   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 853   uint8_t _barrier_data;        // Bit field with barrier information
 854   virtual uint size_of() const; // Size is bigger
 855 public:
 856   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 857   virtual bool depends_only_on_test() const { return false; }
 858   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 859 
 860   virtual const Type *bottom_type() const { return _type; }
 861   virtual uint ideal_reg() const;
 862   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
 863   virtual const Type* Value(PhaseGVN* phase) const;
 864 
 865   bool result_not_used() const;
 866   MemBarNode* trailing_membar() const;
 867 
 868   uint8_t barrier_data() { return _barrier_data; }
 869   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 870 };
 871 
 872 class LoadStoreConditionalNode : public LoadStoreNode {
 873 public:
 874   enum {
 875     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 876   };
 877   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 878   virtual const Type* Value(PhaseGVN* phase) const;
 879 };
 880 
 881 class CompareAndSwapNode : public LoadStoreConditionalNode {
 882 private:
 883   const MemNode::MemOrd _mem_ord;
 884 public:
 885   CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
 886   MemNode::MemOrd order() const {
 887     return _mem_ord;
 888   }
 889   virtual uint size_of() const { return sizeof(*this); }
 890 };
 891 
 892 class CompareAndExchangeNode : public LoadStoreNode {
 893 private:
 894   const MemNode::MemOrd _mem_ord;
 895 public:
 896   enum {
 897     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 898   };
 899   CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
 900     LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
 901      init_req(ExpectedIn, ex );
 902   }
 903 
 904   MemNode::MemOrd order() const {
 905     return _mem_ord;
 906   }
 907   virtual uint size_of() const { return sizeof(*this); }
 908 };
 909 
 910 //------------------------------CompareAndSwapBNode---------------------------
 911 class CompareAndSwapBNode : public CompareAndSwapNode {
 912 public:
 913   CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 914   virtual int Opcode() const;
 915 };
 916 
 917 //------------------------------CompareAndSwapSNode---------------------------
 918 class CompareAndSwapSNode : public CompareAndSwapNode {
 919 public:
 920   CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 921   virtual int Opcode() const;
 922 };
 923 
 924 //------------------------------CompareAndSwapINode---------------------------
 925 class CompareAndSwapINode : public CompareAndSwapNode {
 926 public:
 927   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 928   virtual int Opcode() const;
 929 };
 930 
 931 //------------------------------CompareAndSwapLNode---------------------------
 932 class CompareAndSwapLNode : public CompareAndSwapNode {
 933 public:
 934   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 935   virtual int Opcode() const;
 936 };
 937 
 938 //------------------------------CompareAndSwapPNode---------------------------
 939 class CompareAndSwapPNode : public CompareAndSwapNode {
 940 public:
 941   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 942   virtual int Opcode() const;
 943 };
 944 
 945 //------------------------------CompareAndSwapNNode---------------------------
 946 class CompareAndSwapNNode : public CompareAndSwapNode {
 947 public:
 948   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 949   virtual int Opcode() const;
 950 };
 951 
 952 //------------------------------WeakCompareAndSwapBNode---------------------------
 953 class WeakCompareAndSwapBNode : public CompareAndSwapNode {
 954 public:
 955   WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 956   virtual int Opcode() const;
 957 };
 958 
 959 //------------------------------WeakCompareAndSwapSNode---------------------------
 960 class WeakCompareAndSwapSNode : public CompareAndSwapNode {
 961 public:
 962   WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 963   virtual int Opcode() const;
 964 };
 965 
 966 //------------------------------WeakCompareAndSwapINode---------------------------
 967 class WeakCompareAndSwapINode : public CompareAndSwapNode {
 968 public:
 969   WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 970   virtual int Opcode() const;
 971 };
 972 
 973 //------------------------------WeakCompareAndSwapLNode---------------------------
 974 class WeakCompareAndSwapLNode : public CompareAndSwapNode {
 975 public:
 976   WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 977   virtual int Opcode() const;
 978 };
 979 
 980 //------------------------------WeakCompareAndSwapPNode---------------------------
 981 class WeakCompareAndSwapPNode : public CompareAndSwapNode {
 982 public:
 983   WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 984   virtual int Opcode() const;
 985 };
 986 
 987 //------------------------------WeakCompareAndSwapNNode---------------------------
 988 class WeakCompareAndSwapNNode : public CompareAndSwapNode {
 989 public:
 990   WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 991   virtual int Opcode() const;
 992 };
 993 
 994 //------------------------------CompareAndExchangeBNode---------------------------
 995 class CompareAndExchangeBNode : public CompareAndExchangeNode {
 996 public:
 997   CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { }
 998   virtual int Opcode() const;
 999 };
1000 
1001 
1002 //------------------------------CompareAndExchangeSNode---------------------------
1003 class CompareAndExchangeSNode : public CompareAndExchangeNode {
1004 public:
1005   CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { }
1006   virtual int Opcode() const;
1007 };
1008 
1009 //------------------------------CompareAndExchangeLNode---------------------------
1010 class CompareAndExchangeLNode : public CompareAndExchangeNode {
1011 public:
1012   CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
1013   virtual int Opcode() const;
1014 };
1015 
1016 
1017 //------------------------------CompareAndExchangeINode---------------------------
1018 class CompareAndExchangeINode : public CompareAndExchangeNode {
1019 public:
1020   CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
1021   virtual int Opcode() const;
1022 };
1023 
1024 
1025 //------------------------------CompareAndExchangePNode---------------------------
1026 class CompareAndExchangePNode : public CompareAndExchangeNode {
1027 public:
1028   CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1029   virtual int Opcode() const;
1030 };
1031 
1032 //------------------------------CompareAndExchangeNNode---------------------------
1033 class CompareAndExchangeNNode : public CompareAndExchangeNode {
1034 public:
1035   CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1036   virtual int Opcode() const;
1037 };
1038 
1039 //------------------------------GetAndAddBNode---------------------------
1040 class GetAndAddBNode : public LoadStoreNode {
1041 public:
1042   GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1043   virtual int Opcode() const;
1044 };
1045 
1046 //------------------------------GetAndAddSNode---------------------------
1047 class GetAndAddSNode : public LoadStoreNode {
1048 public:
1049   GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1050   virtual int Opcode() const;
1051 };
1052 
1053 //------------------------------GetAndAddINode---------------------------
1054 class GetAndAddINode : public LoadStoreNode {
1055 public:
1056   GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1057   virtual int Opcode() const;
1058 };
1059 
1060 //------------------------------GetAndAddLNode---------------------------
1061 class GetAndAddLNode : public LoadStoreNode {
1062 public:
1063   GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1064   virtual int Opcode() const;
1065 };
1066 
1067 //------------------------------GetAndSetBNode---------------------------
1068 class GetAndSetBNode : public LoadStoreNode {
1069 public:
1070   GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1071   virtual int Opcode() const;
1072 };
1073 
1074 //------------------------------GetAndSetSNode---------------------------
1075 class GetAndSetSNode : public LoadStoreNode {
1076 public:
1077   GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1078   virtual int Opcode() const;
1079 };
1080 
1081 //------------------------------GetAndSetINode---------------------------
1082 class GetAndSetINode : public LoadStoreNode {
1083 public:
1084   GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1085   virtual int Opcode() const;
1086 };
1087 
1088 //------------------------------GetAndSetLNode---------------------------
1089 class GetAndSetLNode : public LoadStoreNode {
1090 public:
1091   GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1092   virtual int Opcode() const;
1093 };
1094 
1095 //------------------------------GetAndSetPNode---------------------------
1096 class GetAndSetPNode : public LoadStoreNode {
1097 public:
1098   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1099   virtual int Opcode() const;
1100 };
1101 
1102 //------------------------------GetAndSetNNode---------------------------
1103 class GetAndSetNNode : public LoadStoreNode {
1104 public:
1105   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1106   virtual int Opcode() const;
1107 };
1108 
1109 //------------------------------ClearArray-------------------------------------
1110 class ClearArrayNode: public Node {
1111 private:
1112   bool _is_large;
1113   bool _word_copy_only;
1114 public:
1115   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1116     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1117       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1118     init_class_id(Class_ClearArray);
1119   }
1120   virtual int         Opcode() const;
1121   virtual const Type *bottom_type() const { return Type::MEMORY; }
1122   // ClearArray modifies array elements, and so affects only the
1123   // array memory addressed by the bottom_type of its base address.
1124   virtual const class TypePtr *adr_type() const;
1125   virtual Node* Identity(PhaseGVN* phase);
1126   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1127   virtual uint match_edge(uint idx) const;
1128   bool is_large() const { return _is_large; }
1129   bool word_copy_only() const { return _word_copy_only; }
1130 
1131   // Clear the given area of an object or array.
1132   // The start offset must always be aligned mod BytesPerInt.
1133   // The end offset must always be aligned mod BytesPerLong.
1134   // Return the new memory.
1135   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1136                             Node* val,
1137                             Node* raw_val,
1138                             intptr_t start_offset,
1139                             intptr_t end_offset,
1140                             PhaseGVN* phase);
1141   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1142                             Node* val,
1143                             Node* raw_val,
1144                             intptr_t start_offset,
1145                             Node* end_offset,
1146                             PhaseGVN* phase);
1147   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1148                             Node* raw_val,
1149                             Node* start_offset,
1150                             Node* end_offset,
1151                             PhaseGVN* phase);
1152   // Return allocation input memory edge if it is different instance
1153   // or itself if it is the one we are looking for.
1154   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1155 };
1156 
1157 //------------------------------MemBar-----------------------------------------
1158 // There are different flavors of Memory Barriers to match the Java Memory
1159 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1160 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1161 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1162 // preceding ref can be moved to after them.  We insert a MemBar-Release
1163 // before a FastUnlock or volatile-store.  All volatiles need to be
1164 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1165 // separate it from any following volatile-load.
1166 class MemBarNode: public MultiNode {
1167   virtual uint hash() const ;                  // { return NO_HASH; }
1168   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self
1169 
1170   virtual uint size_of() const { return sizeof(*this); }
1171   // Memory type this node is serializing.  Usually either rawptr or bottom.
1172   const TypePtr* _adr_type;
1173 
1174   // How is this membar related to a nearby memory access?
1175   enum {
1176     Standalone,
1177     TrailingLoad,
1178     TrailingStore,
1179     LeadingStore,
1180     TrailingLoadStore,
1181     LeadingLoadStore,
1182     TrailingExpandedArrayCopy
1183   } _kind;
1184 
1185 #ifdef ASSERT
1186   uint _pair_idx;
1187 #endif
1188 
1189 public:
1190   enum {
1191     Precedent = TypeFunc::Parms  // optional edge to force precedence
1192   };
1193   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1194   virtual int Opcode() const = 0;
1195   virtual const class TypePtr *adr_type() const { return _adr_type; }
1196   virtual const Type* Value(PhaseGVN* phase) const;
1197   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1198   virtual uint match_edge(uint idx) const { return 0; }
1199   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1200   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1201   // Factory method.  Builds a wide or narrow membar.
1202   // Optional 'precedent' becomes an extra edge if not null.
1203   static MemBarNode* make(Compile* C, int opcode,
1204                           int alias_idx = Compile::AliasIdxBot,
1205                           Node* precedent = nullptr);
1206 
1207   MemBarNode* trailing_membar() const;
1208   MemBarNode* leading_membar() const;
1209 
1210   void set_trailing_load() { _kind = TrailingLoad; }
1211   bool trailing_load() const { return _kind == TrailingLoad; }
1212   bool trailing_store() const { return _kind == TrailingStore; }
1213   bool leading_store() const { return _kind == LeadingStore; }
1214   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1215   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1216   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1217   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1218   bool standalone() const { return _kind == Standalone; }
1219   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1220   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
1221 
1222   static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
1223   static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
1224 
1225   void remove(PhaseIterGVN *igvn);
1226 };
1227 
1228 // "Acquire" - no following ref can move before (but earlier refs can
1229 // follow, like an early Load stalled in cache).  Requires multi-cpu
1230 // visibility.  Inserted after a volatile load.
1231 class MemBarAcquireNode: public MemBarNode {
1232 public:
1233   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1234     : MemBarNode(C, alias_idx, precedent) {}
1235   virtual int Opcode() const;
1236 };
1237 
1238 // "Acquire" - no following ref can move before (but earlier refs can
1239 // follow, like an early Load stalled in cache).  Requires multi-cpu
1240 // visibility.  Inserted independent of any load, as required
1241 // for intrinsic Unsafe.loadFence().
1242 class LoadFenceNode: public MemBarNode {
1243 public:
1244   LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1245     : MemBarNode(C, alias_idx, precedent) {}
1246   virtual int Opcode() const;
1247 };
1248 
1249 // "Release" - no earlier ref can move after (but later refs can move
1250 // up, like a speculative pipelined cache-hitting Load).  Requires
1251 // multi-cpu visibility.  Inserted before a volatile store.
1252 class MemBarReleaseNode: public MemBarNode {
1253 public:
1254   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1255     : MemBarNode(C, alias_idx, precedent) {}
1256   virtual int Opcode() const;
1257 };
1258 
1259 // "Release" - no earlier ref can move after (but later refs can move
1260 // up, like a speculative pipelined cache-hitting Load).  Requires
1261 // multi-cpu visibility.  Inserted independent of any store, as required
1262 // for intrinsic Unsafe.storeFence().
1263 class StoreFenceNode: public MemBarNode {
1264 public:
1265   StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1266     : MemBarNode(C, alias_idx, precedent) {}
1267   virtual int Opcode() const;
1268 };
1269 
1270 // "Acquire" - no following ref can move before (but earlier refs can
1271 // follow, like an early Load stalled in cache).  Requires multi-cpu
1272 // visibility.  Inserted after a FastLock.
1273 class MemBarAcquireLockNode: public MemBarNode {
1274 public:
1275   MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1276     : MemBarNode(C, alias_idx, precedent) {}
1277   virtual int Opcode() const;
1278 };
1279 
1280 // "Release" - no earlier ref can move after (but later refs can move
1281 // up, like a speculative pipelined cache-hitting Load).  Requires
1282 // multi-cpu visibility.  Inserted before a FastUnLock.
1283 class MemBarReleaseLockNode: public MemBarNode {
1284 public:
1285   MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1286     : MemBarNode(C, alias_idx, precedent) {}
1287   virtual int Opcode() const;
1288 };
1289 
1290 class MemBarStoreStoreNode: public MemBarNode {
1291 public:
1292   MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1293     : MemBarNode(C, alias_idx, precedent) {
1294     init_class_id(Class_MemBarStoreStore);
1295   }
1296   virtual int Opcode() const;
1297 };
1298 
1299 class StoreStoreFenceNode: public MemBarNode {
1300 public:
1301   StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1302     : MemBarNode(C, alias_idx, precedent) {}
1303   virtual int Opcode() const;
1304 };
1305 
1306 // Ordering between a volatile store and a following volatile load.
1307 // Requires multi-CPU visibility?
1308 class MemBarVolatileNode: public MemBarNode {
1309 public:
1310   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1311     : MemBarNode(C, alias_idx, precedent) {}
1312   virtual int Opcode() const;
1313 };
1314 
1315 // Ordering within the same CPU.  Used to order unsafe memory references
1316 // inside the compiler when we lack alias info.  Not needed "outside" the
1317 // compiler because the CPU does all the ordering for us.
1318 class MemBarCPUOrderNode: public MemBarNode {
1319 public:
1320   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1321     : MemBarNode(C, alias_idx, precedent) {}
1322   virtual int Opcode() const;
1323   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1324 };
1325 
1326 class OnSpinWaitNode: public MemBarNode {
1327 public:
1328   OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1329     : MemBarNode(C, alias_idx, precedent) {}
1330   virtual int Opcode() const;
1331 };
1332 
1333 // Isolation of object setup after an AllocateNode and before next safepoint.
1334 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1335 class InitializeNode: public MemBarNode {
1336   friend class AllocateNode;
1337 
1338   enum {
1339     Incomplete    = 0,
1340     Complete      = 1,
1341     WithArraycopy = 2
1342   };
1343   int _is_complete;
1344 
1345   bool _does_not_escape;
1346 
1347 public:
1348   enum {
1349     Control    = TypeFunc::Control,
1350     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
1351     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
1352     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
1353   };
1354 
1355   InitializeNode(Compile* C, int adr_type, Node* rawoop);
1356   virtual int Opcode() const;
1357   virtual uint size_of() const { return sizeof(*this); }
1358   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1359   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
1360 
1361   // Manage incoming memory edges via a MergeMem on in(Memory):
1362   Node* memory(uint alias_idx);
1363 
1364   // The raw memory edge coming directly from the Allocation.
1365   // The contents of this memory are *always* all-zero-bits.
1366   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1367 
1368   // Return the corresponding allocation for this initialization (or null if none).
1369   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1370   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1371   AllocateNode* allocation();
1372 
1373   // Anything other than zeroing in this init?
1374   bool is_non_zero();
1375 
1376   // An InitializeNode must completed before macro expansion is done.
1377   // Completion requires that the AllocateNode must be followed by
1378   // initialization of the new memory to zero, then to any initializers.
1379   bool is_complete() { return _is_complete != Incomplete; }
1380   bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1381 
1382   // Mark complete.  (Must not yet be complete.)
1383   void set_complete(PhaseGVN* phase);
1384   void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1385 
1386   bool does_not_escape() { return _does_not_escape; }
1387   void set_does_not_escape() { _does_not_escape = true; }
1388 
1389 #ifdef ASSERT
1390   // ensure all non-degenerate stores are ordered and non-overlapping
1391   bool stores_are_sane(PhaseValues* phase);
1392 #endif //ASSERT
1393 
1394   // See if this store can be captured; return offset where it initializes.
1395   // Return 0 if the store cannot be moved (any sort of problem).
1396   intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape);
1397 
1398   // Capture another store; reformat it to write my internal raw memory.
1399   // Return the captured copy, else null if there is some sort of problem.
1400   Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape);
1401 
1402   // Find captured store which corresponds to the range [start..start+size).
1403   // Return my own memory projection (meaning the initial zero bits)
1404   // if there is no such store.  Return null if there is a problem.
1405   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase);
1406 
1407   // Called when the associated AllocateNode is expanded into CFG.
1408   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1409                         intptr_t header_size, Node* size_in_bytes,
1410                         PhaseIterGVN* phase);
1411 
1412  private:
1413   void remove_extra_zeroes();
1414 
1415   // Find out where a captured store should be placed (or already is placed).
1416   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1417                                      PhaseValues* phase);
1418 
1419   static intptr_t get_store_offset(Node* st, PhaseValues* phase);
1420 
1421   Node* make_raw_address(intptr_t offset, PhaseGVN* phase);
1422 
1423   bool detect_init_independence(Node* value, PhaseGVN* phase);
1424 
1425   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1426                                PhaseGVN* phase);
1427 
1428   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1429 };
1430 
1431 //------------------------------MergeMem---------------------------------------
1432 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1433 class MergeMemNode: public Node {
1434   virtual uint hash() const ;                  // { return NO_HASH; }
1435   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self
1436   friend class MergeMemStream;
1437   MergeMemNode(Node* def);  // clients use MergeMemNode::make
1438 
1439 public:
1440   // If the input is a whole memory state, clone it with all its slices intact.
1441   // Otherwise, make a new memory state with just that base memory input.
1442   // In either case, the result is a newly created MergeMem.
1443   static MergeMemNode* make(Node* base_memory);
1444 
1445   virtual int Opcode() const;
1446   virtual Node* Identity(PhaseGVN* phase);
1447   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1448   virtual uint ideal_reg() const { return NotAMachineReg; }
1449   virtual uint match_edge(uint idx) const { return 0; }
1450   virtual const RegMask &out_RegMask() const;
1451   virtual const Type *bottom_type() const { return Type::MEMORY; }
1452   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1453   // sparse accessors
1454   // Fetch the previously stored "set_memory_at", or else the base memory.
1455   // (Caller should clone it if it is a phi-nest.)
1456   Node* memory_at(uint alias_idx) const;
1457   // set the memory, regardless of its previous value
1458   void set_memory_at(uint alias_idx, Node* n);
1459   // the "base" is the memory that provides the non-finite support
1460   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
1461   // warning: setting the base can implicitly set any of the other slices too
1462   void set_base_memory(Node* def);
1463   // sentinel value which denotes a copy of the base memory:
1464   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
1465   static Node* make_empty_memory(); // where the sentinel comes from
1466   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1467   // hook for the iterator, to perform any necessary setup
1468   void iteration_setup(const MergeMemNode* other = nullptr);
1469   // push sentinels until I am at least as long as the other (semantic no-op)
1470   void grow_to_match(const MergeMemNode* other);
1471   bool verify_sparse() const PRODUCT_RETURN0;
1472 #ifndef PRODUCT
1473   virtual void dump_spec(outputStream *st) const;
1474 #endif
1475 };
1476 
1477 class MergeMemStream : public StackObj {
1478  private:
1479   MergeMemNode*       _mm;
1480   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
1481   Node*               _mm_base;  // loop-invariant base memory of _mm
1482   int                 _idx;
1483   int                 _cnt;
1484   Node*               _mem;
1485   Node*               _mem2;
1486   int                 _cnt2;
1487 
1488   void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) {
1489     // subsume_node will break sparseness at times, whenever a memory slice
1490     // folds down to a copy of the base ("fat") memory.  In such a case,
1491     // the raw edge will update to base, although it should be top.
1492     // This iterator will recognize either top or base_memory as an
1493     // "empty" slice.  See is_empty, is_empty2, and next below.
1494     //
1495     // The sparseness property is repaired in MergeMemNode::Ideal.
1496     // As long as access to a MergeMem goes through this iterator
1497     // or the memory_at accessor, flaws in the sparseness will
1498     // never be observed.
1499     //
1500     // Also, iteration_setup repairs sparseness.
1501     assert(mm->verify_sparse(), "please, no dups of base");
1502     assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base");
1503 
1504     _mm  = mm;
1505     _mm_base = mm->base_memory();
1506     _mm2 = mm2;
1507     _cnt = mm->req();
1508     _idx = Compile::AliasIdxBot-1; // start at the base memory
1509     _mem = nullptr;
1510     _mem2 = nullptr;
1511   }
1512 
1513 #ifdef ASSERT
1514   Node* check_memory() const {
1515     if (at_base_memory())
1516       return _mm->base_memory();
1517     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1518       return _mm->memory_at(_idx);
1519     else
1520       return _mm_base;
1521   }
1522   Node* check_memory2() const {
1523     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1524   }
1525 #endif
1526 
1527   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1528   void assert_synch() const {
1529     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1530            "no side-effects except through the stream");
1531   }
1532 
1533  public:
1534 
1535   // expected usages:
1536   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1537   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1538 
1539   // iterate over one merge
1540   MergeMemStream(MergeMemNode* mm) {
1541     mm->iteration_setup();
1542     init(mm);
1543     debug_only(_cnt2 = 999);
1544   }
1545   // iterate in parallel over two merges
1546   // only iterates through non-empty elements of mm2
1547   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1548     assert(mm2, "second argument must be a MergeMem also");
1549     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
1550     mm->iteration_setup(mm2);
1551     init(mm, mm2);
1552     _cnt2 = mm2->req();
1553   }
1554 #ifdef ASSERT
1555   ~MergeMemStream() {
1556     assert_synch();
1557   }
1558 #endif
1559 
1560   MergeMemNode* all_memory() const {
1561     return _mm;
1562   }
1563   Node* base_memory() const {
1564     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1565     return _mm_base;
1566   }
1567   const MergeMemNode* all_memory2() const {
1568     assert(_mm2 != nullptr, "");
1569     return _mm2;
1570   }
1571   bool at_base_memory() const {
1572     return _idx == Compile::AliasIdxBot;
1573   }
1574   int alias_idx() const {
1575     assert(_mem, "must call next 1st");
1576     return _idx;
1577   }
1578 
1579   const TypePtr* adr_type() const {
1580     return Compile::current()->get_adr_type(alias_idx());
1581   }
1582 
1583   const TypePtr* adr_type(Compile* C) const {
1584     return C->get_adr_type(alias_idx());
1585   }
1586   bool is_empty() const {
1587     assert(_mem, "must call next 1st");
1588     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1589     return _mem->is_top();
1590   }
1591   bool is_empty2() const {
1592     assert(_mem2, "must call next 1st");
1593     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1594     return _mem2->is_top();
1595   }
1596   Node* memory() const {
1597     assert(!is_empty(), "must not be empty");
1598     assert_synch();
1599     return _mem;
1600   }
1601   // get the current memory, regardless of empty or non-empty status
1602   Node* force_memory() const {
1603     assert(!is_empty() || !at_base_memory(), "");
1604     // Use _mm_base to defend against updates to _mem->base_memory().
1605     Node *mem = _mem->is_top() ? _mm_base : _mem;
1606     assert(mem == check_memory(), "");
1607     return mem;
1608   }
1609   Node* memory2() const {
1610     assert(_mem2 == check_memory2(), "");
1611     return _mem2;
1612   }
1613   void set_memory(Node* mem) {
1614     if (at_base_memory()) {
1615       // Note that this does not change the invariant _mm_base.
1616       _mm->set_base_memory(mem);
1617     } else {
1618       _mm->set_memory_at(_idx, mem);
1619     }
1620     _mem = mem;
1621     assert_synch();
1622   }
1623 
1624   // Recover from a side effect to the MergeMemNode.
1625   void set_memory() {
1626     _mem = _mm->in(_idx);
1627   }
1628 
1629   bool next()  { return next(false); }
1630   bool next2() { return next(true); }
1631 
1632   bool next_non_empty()  { return next_non_empty(false); }
1633   bool next_non_empty2() { return next_non_empty(true); }
1634   // next_non_empty2 can yield states where is_empty() is true
1635 
1636  private:
1637   // find the next item, which might be empty
1638   bool next(bool have_mm2) {
1639     assert((_mm2 != nullptr) == have_mm2, "use other next");
1640     assert_synch();
1641     if (++_idx < _cnt) {
1642       // Note:  This iterator allows _mm to be non-sparse.
1643       // It behaves the same whether _mem is top or base_memory.
1644       _mem = _mm->in(_idx);
1645       if (have_mm2)
1646         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1647       return true;
1648     }
1649     return false;
1650   }
1651 
1652   // find the next non-empty item
1653   bool next_non_empty(bool have_mm2) {
1654     while (next(have_mm2)) {
1655       if (!is_empty()) {
1656         // make sure _mem2 is filled in sensibly
1657         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
1658         return true;
1659       } else if (have_mm2 && !is_empty2()) {
1660         return true;   // is_empty() == true
1661       }
1662     }
1663     return false;
1664   }
1665 };
1666 
1667 // cachewb node for guaranteeing writeback of the cache line at a
1668 // given address to (non-volatile) RAM
1669 class CacheWBNode : public Node {
1670 public:
1671   CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {}
1672   virtual int Opcode() const;
1673   virtual uint ideal_reg() const { return NotAMachineReg; }
1674   virtual uint match_edge(uint idx) const { return (idx == 2); }
1675   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1676   virtual const Type *bottom_type() const { return Type::MEMORY; }
1677 };
1678 
1679 // cachewb pre sync node for ensuring that writebacks are serialised
1680 // relative to preceding or following stores
1681 class CacheWBPreSyncNode : public Node {
1682 public:
1683   CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1684   virtual int Opcode() const;
1685   virtual uint ideal_reg() const { return NotAMachineReg; }
1686   virtual uint match_edge(uint idx) const { return false; }
1687   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1688   virtual const Type *bottom_type() const { return Type::MEMORY; }
1689 };
1690 
1691 // cachewb pre sync node for ensuring that writebacks are serialised
1692 // relative to preceding or following stores
1693 class CacheWBPostSyncNode : public Node {
1694 public:
1695   CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1696   virtual int Opcode() const;
1697   virtual uint ideal_reg() const { return NotAMachineReg; }
1698   virtual uint match_edge(uint idx) const { return false; }
1699   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1700   virtual const Type *bottom_type() const { return Type::MEMORY; }
1701 };
1702 
1703 //------------------------------Prefetch---------------------------------------
1704 
1705 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1706 class PrefetchAllocationNode : public Node {
1707 public:
1708   PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {}
1709   virtual int Opcode() const;
1710   virtual uint ideal_reg() const { return NotAMachineReg; }
1711   virtual uint match_edge(uint idx) const { return idx==2; }
1712   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1713 };
1714 
1715 #endif // SHARE_OPTO_MEMNODE_HPP