1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef SHARE_OPTO_MEMNODE_HPP
  27 #define SHARE_OPTO_MEMNODE_HPP
  28 
  29 #include "opto/multnode.hpp"
  30 #include "opto/node.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/type.hpp"
  33 
  34 // Portions of code courtesy of Clifford Click
  35 
  36 class MultiNode;
  37 class PhaseCCP;
  38 class PhaseTransform;
  39 
  40 //------------------------------MemNode----------------------------------------
  41 // Load or Store, possibly throwing a null pointer exception
  42 class MemNode : public Node {
  43 private:
  44   bool _unaligned_access; // Unaligned access from unsafe
  45   bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
  46   bool _unsafe_access;     // Access of unsafe origin.
  47   uint8_t _barrier_data;   // Bit field with barrier information
  48 
  49 protected:
  50 #ifdef ASSERT
  51   const TypePtr* _adr_type;     // What kind of memory is being addressed?
  52 #endif
  53   virtual uint size_of() const;
  54 public:
  55   enum { Control,               // When is it safe to do this load?
  56          Memory,                // Chunk of memory is being loaded from
  57          Address,               // Actually address, derived from base
  58          ValueIn                // Value to store
  59   };
  60   typedef enum { unordered = 0,
  61                  acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
  62                  release,       // Store has to release or be preceded by MemBarRelease.
  63                  seqcst,        // LoadStore has to have both acquire and release semantics.
  64                  unset          // The memory ordering is not set (used for testing)
  65   } MemOrd;
  66 protected:
  67   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) :
  68       Node(c0,c1,c2),
  69       _unaligned_access(false),
  70       _mismatched_access(false),
  71       _unsafe_access(false),
  72       _barrier_data(0) {
  73     init_class_id(Class_Mem);
  74     DEBUG_ONLY(_adr_type=at; adr_type();)
  75   }
  76   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
  77       Node(c0,c1,c2,c3),
  78       _unaligned_access(false),
  79       _mismatched_access(false),
  80       _unsafe_access(false),
  81       _barrier_data(0) {
  82     init_class_id(Class_Mem);
  83     DEBUG_ONLY(_adr_type=at; adr_type();)
  84   }
  85   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
  86       Node(c0,c1,c2,c3,c4),
  87       _unaligned_access(false),
  88       _mismatched_access(false),
  89       _unsafe_access(false),
  90       _barrier_data(0) {
  91     init_class_id(Class_Mem);
  92     DEBUG_ONLY(_adr_type=at; adr_type();)
  93   }
  94 
  95   virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
  96   ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const;
  97   static bool check_if_adr_maybe_raw(Node* adr);
  98 
  99 public:
 100   // Helpers for the optimizer.  Documented in memnode.cpp.
 101   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
 102                                       Node* p2, AllocateNode* a2,
 103                                       PhaseTransform* phase);
 104   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
 105 
 106   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
 107   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
 108   // The following two should probably be phase-specific functions:
 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 
 129 #ifdef ASSERT
 130   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 131 #endif
 132 
 133   // Return the barrier data of n, if available, or 0 otherwise.
 134   static uint8_t barrier_data(const Node* n);
 135 
 136   // Map a load or store opcode to its corresponding store opcode.
 137   // (Return -1 if unknown.)
 138   virtual int store_Opcode() const { return -1; }
 139 
 140   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 141   // The returned type is a property of the value that is loaded/stored and
 142   // not the memory that is accessed. For mismatched memory accesses
 143   // they might differ. For instance, a value of type 'short' may be stored
 144   // into an array of elements of type 'long'.
 145   virtual BasicType value_basic_type() const = 0;
 146   virtual int memory_size() const {
 147 #ifdef ASSERT
 148     return type2aelembytes(value_basic_type(), true);
 149 #else
 150     return type2aelembytes(value_basic_type());
 151 #endif
 152   }
 153 
 154   uint8_t barrier_data() { return _barrier_data; }
 155   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 156 
 157   // Search through memory states which precede this node (load or store).
 158   // Look for an exact match for the address, with no intervening
 159   // aliased stores.
 160   Node* find_previous_store(PhaseValues* phase);
 161 
 162   // Can this node (load or store) accurately see a stored value in
 163   // the given memory state?  (The state may or may not be in(Memory).)
 164   Node* can_see_stored_value(Node* st, PhaseValues* phase) const;
 165 
 166   void set_unaligned_access() { _unaligned_access = true; }
 167   bool is_unaligned_access() const { return _unaligned_access; }
 168   void set_mismatched_access() { _mismatched_access = true; }
 169   bool is_mismatched_access() const { return _mismatched_access; }
 170   void set_unsafe_access() { _unsafe_access = true; }
 171   bool is_unsafe_access() const { return _unsafe_access; }
 172 
 173 #ifndef PRODUCT
 174   static void dump_adr_type(const TypePtr* adr_type, outputStream* st);
 175   virtual void dump_spec(outputStream *st) const;
 176 #endif
 177 };
 178 
 179 //------------------------------LoadNode---------------------------------------
 180 // Load value; requires Memory and Address
 181 class LoadNode : public MemNode {
 182 public:
 183   // Some loads (from unsafe) should be pinned: they don't depend only
 184   // on the dominating test.  The field _control_dependency below records
 185   // whether that node depends only on the dominating test.
 186   // Pinned and UnknownControl are similar, but differ in that Pinned
 187   // loads are not allowed to float across safepoints, whereas UnknownControl
 188   // loads are allowed to do that. Therefore, Pinned is stricter.
 189   enum ControlDependency {
 190     Pinned,
 191     UnknownControl,
 192     DependsOnlyOnTest
 193   };
 194 
 195 private:
 196   // LoadNode::hash() doesn't take the _control_dependency field
 197   // into account: If the graph already has a non-pinned LoadNode and
 198   // we add a pinned LoadNode with the same inputs, it's safe for GVN
 199   // to replace the pinned LoadNode with the non-pinned LoadNode,
 200   // otherwise it wouldn't be safe to have a non pinned LoadNode with
 201   // those inputs in the first place. If the graph already has a
 202   // pinned LoadNode and we add a non pinned LoadNode with the same
 203   // inputs, it's safe (but suboptimal) for GVN to replace the
 204   // non-pinned LoadNode by the pinned LoadNode.
 205   ControlDependency _control_dependency;
 206 
 207   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 208   // loads that can be reordered, and such requiring acquire semantics to
 209   // adhere to the Java specification.  The required behaviour is stored in
 210   // this field.
 211   const MemOrd _mo;
 212 
 213   AllocateNode* is_new_object_mark_load() const;
 214 
 215 protected:
 216   virtual bool cmp(const Node &n) const;
 217   virtual uint size_of() const; // Size is bigger
 218   // Should LoadNode::Ideal() attempt to remove control edges?
 219   virtual bool can_remove_control() const;
 220   const Type* const _type;      // What kind of value is loaded?
 221 
 222   virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
 223 public:
 224 
 225   LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
 226     : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
 227     init_class_id(Class_Load);
 228   }
 229   inline bool is_unordered() const { return !is_acquire(); }
 230   inline bool is_acquire() const {
 231     assert(_mo == unordered || _mo == acquire, "unexpected");
 232     return _mo == acquire;
 233   }
 234   inline bool is_unsigned() const {
 235     int lop = Opcode();
 236     return (lop == Op_LoadUB) || (lop == Op_LoadUS);
 237   }
 238 
 239   // Polymorphic factory method:
 240   static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
 241                     const TypePtr* at, const Type* rt, BasicType bt,
 242                     MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
 243                     bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
 244                     uint8_t barrier_data = 0);
 245 
 246   virtual uint hash()   const;  // Check the type
 247 
 248   // Handle algebraic identities here.  If we have an identity, return the Node
 249   // we are equivalent to.  We look for Load of a Store.
 250   virtual Node* Identity(PhaseGVN* phase);
 251 
 252   // If the load is from Field memory and the pointer is non-null, it might be possible to
 253   // zero out the control input.
 254   // If the offset is constant and the base is an object allocation,
 255   // try to hook me up to the exact initializing store.
 256   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 257 
 258   // Return true if it's possible to split the Load through a Phi merging the bases
 259   bool can_split_through_phi_base(PhaseGVN *phase);
 260 
 261   // Split instance field load through Phi.
 262   Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
 263 
 264   // Recover original value from boxed values
 265   Node *eliminate_autobox(PhaseIterGVN *igvn);
 266 
 267   // Compute a new Type for this node.  Basically we just do the pre-check,
 268   // then call the virtual add() to set the type.
 269   virtual const Type* Value(PhaseGVN* phase) const;
 270 
 271   // Common methods for LoadKlass and LoadNKlass nodes.
 272   const Type* klass_value_common(PhaseGVN* phase) const;
 273   Node* klass_identity_common(PhaseGVN* phase);
 274 
 275   virtual uint ideal_reg() const;
 276   virtual const Type *bottom_type() const;
 277   // Following method is copied from TypeNode:
 278   void set_type(const Type* t) {
 279     assert(t != nullptr, "sanity");
 280     DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 281     *(const Type**)&_type = t;   // cast away const-ness
 282     // If this node is in the hash table, make sure it doesn't need a rehash.
 283     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 284   }
 285   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
 286 
 287   // Do not match memory edge
 288   virtual uint match_edge(uint idx) const;
 289 
 290   // Map a load opcode to its corresponding store opcode.
 291   virtual int store_Opcode() const = 0;
 292 
 293   // Check if the load's memory input is a Phi node with the same control.
 294   bool is_instance_field_load_with_local_phi(Node* ctrl);
 295 
 296   Node* convert_to_unsigned_load(PhaseGVN& gvn);
 297   Node* convert_to_signed_load(PhaseGVN& gvn);
 298 
 299   bool  has_reinterpret_variant(const Type* rt);
 300   Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt);
 301 
 302   ControlDependency control_dependency() const { return _control_dependency; }
 303   bool has_unknown_control_dependency() const  { return _control_dependency == UnknownControl; }
 304   bool has_pinned_control_dependency() const   { return _control_dependency == Pinned; }
 305 
 306 #ifndef PRODUCT
 307   virtual void dump_spec(outputStream *st) const;
 308 #endif
 309 #ifdef ASSERT
 310   // Helper function to allow a raw load without control edge for some cases
 311   static bool is_immutable_value(Node* adr);
 312 #endif
 313 protected:
 314   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 315                                      ciKlass* klass) const;
 316 
 317   Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
 318 
 319 private:
 320   // depends_only_on_test is almost always true, and needs to be almost always
 321   // true to enable key hoisting & commoning optimizations.  However, for the
 322   // special case of RawPtr loads from TLS top & end, and other loads performed by
 323   // GC barriers, the control edge carries the dependence preventing hoisting past
 324   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 325   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 326   // which produce results (new raw memory state) inside of loops preventing all
 327   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 328   // See comment in macro.cpp, around line 125 expand_allocate_common().
 329   virtual bool depends_only_on_test_impl() const {
 330     return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
 331   }
 332 
 333   LoadNode* clone_pinned() const;
 334   virtual LoadNode* pin_node_under_control_impl() const;
 335 };
 336 
 337 //------------------------------LoadBNode--------------------------------------
 338 // Load a byte (8bits signed) from memory
 339 class LoadBNode : public LoadNode {
 340 public:
 341   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 342     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 343   virtual int Opcode() const;
 344   virtual uint ideal_reg() const { return Op_RegI; }
 345   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 346   virtual const Type* Value(PhaseGVN* phase) const;
 347   virtual int store_Opcode() const { return Op_StoreB; }
 348   virtual BasicType value_basic_type() const { return T_BYTE; }
 349 };
 350 
 351 //------------------------------LoadUBNode-------------------------------------
 352 // Load a unsigned byte (8bits unsigned) from memory
 353 class LoadUBNode : public LoadNode {
 354 public:
 355   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 356     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 357   virtual int Opcode() const;
 358   virtual uint ideal_reg() const { return Op_RegI; }
 359   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
 360   virtual const Type* Value(PhaseGVN* phase) const;
 361   virtual int store_Opcode() const { return Op_StoreB; }
 362   virtual BasicType value_basic_type() const { return T_BYTE; }
 363 };
 364 
 365 //------------------------------LoadUSNode-------------------------------------
 366 // Load an unsigned short/char (16bits unsigned) from memory
 367 class LoadUSNode : public LoadNode {
 368 public:
 369   LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 370     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 371   virtual int Opcode() const;
 372   virtual uint ideal_reg() const { return Op_RegI; }
 373   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 374   virtual const Type* Value(PhaseGVN* phase) const;
 375   virtual int store_Opcode() const { return Op_StoreC; }
 376   virtual BasicType value_basic_type() const { return T_CHAR; }
 377 };
 378 
 379 //------------------------------LoadSNode--------------------------------------
 380 // Load a short (16bits signed) from memory
 381 class LoadSNode : public LoadNode {
 382 public:
 383   LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 384     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 385   virtual int Opcode() const;
 386   virtual uint ideal_reg() const { return Op_RegI; }
 387   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 388   virtual const Type* Value(PhaseGVN* phase) const;
 389   virtual int store_Opcode() const { return Op_StoreC; }
 390   virtual BasicType value_basic_type() const { return T_SHORT; }
 391 };
 392 
 393 //------------------------------LoadINode--------------------------------------
 394 // Load an integer from memory
 395 class LoadINode : public LoadNode {
 396 public:
 397   LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 398     : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
 399   virtual int Opcode() const;
 400   virtual uint ideal_reg() const { return Op_RegI; }
 401   virtual int store_Opcode() const { return Op_StoreI; }
 402   virtual BasicType value_basic_type() const { return T_INT; }
 403 };
 404 
 405 //------------------------------LoadRangeNode----------------------------------
 406 // Load an array length from the array
 407 class LoadRangeNode : public LoadINode {
 408 public:
 409   LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
 410     : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
 411   virtual int Opcode() const;
 412   virtual const Type* Value(PhaseGVN* phase) const;
 413   virtual Node* Identity(PhaseGVN* phase);
 414   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 415 };
 416 
 417 //------------------------------LoadLNode--------------------------------------
 418 // Load a long from memory
 419 class LoadLNode : public LoadNode {
 420   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 421   virtual bool cmp( const Node &n ) const {
 422     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
 423       && LoadNode::cmp(n);
 424   }
 425   virtual uint size_of() const { return sizeof(*this); }
 426   const bool _require_atomic_access;  // is piecewise load forbidden?
 427 
 428 public:
 429   LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
 430             MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
 431     : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
 432   virtual int Opcode() const;
 433   virtual uint ideal_reg() const { return Op_RegL; }
 434   virtual int store_Opcode() const { return Op_StoreL; }
 435   virtual BasicType value_basic_type() const { return T_LONG; }
 436   bool require_atomic_access() const { return _require_atomic_access; }
 437 
 438 #ifndef PRODUCT
 439   virtual void dump_spec(outputStream *st) const {
 440     LoadNode::dump_spec(st);
 441     if (_require_atomic_access)  st->print(" Atomic!");
 442   }
 443 #endif
 444 };
 445 
 446 //------------------------------LoadL_unalignedNode----------------------------
 447 // Load a long from unaligned memory
 448 class LoadL_unalignedNode : public LoadLNode {
 449 public:
 450   LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 451     : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
 452   virtual int Opcode() const;
 453 };
 454 
 455 //------------------------------LoadFNode--------------------------------------
 456 // Load a float (64 bits) from memory
 457 class LoadFNode : public LoadNode {
 458 public:
 459   LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 460     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 461   virtual int Opcode() const;
 462   virtual uint ideal_reg() const { return Op_RegF; }
 463   virtual int store_Opcode() const { return Op_StoreF; }
 464   virtual BasicType value_basic_type() const { return T_FLOAT; }
 465 };
 466 
 467 //------------------------------LoadDNode--------------------------------------
 468 // Load a double (64 bits) from memory
 469 class LoadDNode : public LoadNode {
 470   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 471   virtual bool cmp( const Node &n ) const {
 472     return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
 473       && LoadNode::cmp(n);
 474   }
 475   virtual uint size_of() const { return sizeof(*this); }
 476   const bool _require_atomic_access;  // is piecewise load forbidden?
 477 
 478 public:
 479   LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
 480             MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
 481     : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
 482   virtual int Opcode() const;
 483   virtual uint ideal_reg() const { return Op_RegD; }
 484   virtual int store_Opcode() const { return Op_StoreD; }
 485   virtual BasicType value_basic_type() const { return T_DOUBLE; }
 486   bool require_atomic_access() const { return _require_atomic_access; }
 487 
 488 #ifndef PRODUCT
 489   virtual void dump_spec(outputStream *st) const {
 490     LoadNode::dump_spec(st);
 491     if (_require_atomic_access)  st->print(" Atomic!");
 492   }
 493 #endif
 494 };
 495 
 496 //------------------------------LoadD_unalignedNode----------------------------
 497 // Load a double from unaligned memory
 498 class LoadD_unalignedNode : public LoadDNode {
 499 public:
 500   LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 501     : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
 502   virtual int Opcode() const;
 503 };
 504 
 505 //------------------------------LoadPNode--------------------------------------
 506 // Load a pointer from memory (either object or array)
 507 class LoadPNode : public LoadNode {
 508 public:
 509   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 510     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 511   virtual int Opcode() const;
 512   virtual uint ideal_reg() const { return Op_RegP; }
 513   virtual int store_Opcode() const { return Op_StoreP; }
 514   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 515 };
 516 
 517 
 518 //------------------------------LoadNNode--------------------------------------
 519 // Load a narrow oop from memory (either object or array)
 520 class LoadNNode : public LoadNode {
 521 public:
 522   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 523     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 524   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 525   virtual int Opcode() const;
 526   virtual uint ideal_reg() const { return Op_RegN; }
 527   virtual int store_Opcode() const { return Op_StoreN; }
 528   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 529 };
 530 
 531 //------------------------------LoadKlassNode----------------------------------
 532 // Load a Klass from an object
 533 class LoadKlassNode : public LoadPNode {
 534 private:
 535   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 536     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 537 
 538 public:
 539   virtual int Opcode() const;
 540   virtual const Type* Value(PhaseGVN* phase) const;
 541   virtual Node* Identity(PhaseGVN* phase);
 542 
 543   // Polymorphic factory method:
 544   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
 545                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
 546 };
 547 
 548 //------------------------------LoadNKlassNode---------------------------------
 549 // Load a narrow Klass from an object.
 550 // With compact headers, the input address (adr) does not point at the exact
 551 // header position where the (narrow) class pointer is located, but into the
 552 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 553 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 554 // extract the actual class pointer. C2's type system is agnostic on whether the
 555 // input address directly points into the class pointer.
 556 class LoadNKlassNode : public LoadNNode {
 557 private:
 558   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 559   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 560     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 561 
 562 public:
 563   virtual int Opcode() const;
 564   virtual uint ideal_reg() const { return Op_RegN; }
 565   virtual int store_Opcode() const { return Op_StoreNKlass; }
 566   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 567 
 568   virtual const Type* Value(PhaseGVN* phase) const;
 569   virtual Node* Identity(PhaseGVN* phase);
 570 };
 571 
 572 //------------------------------StoreNode--------------------------------------
 573 // Store value; requires Store, Address and Value
 574 class StoreNode : public MemNode {
 575 private:
 576   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 577   // stores that can be reordered, and such requiring release semantics to
 578   // adhere to the Java specification.  The required behaviour is stored in
 579   // this field.
 580   const MemOrd _mo;
 581   // Needed for proper cloning.
 582   virtual uint size_of() const { return sizeof(*this); }
 583 protected:
 584   virtual bool cmp( const Node &n ) const;
 585 
 586   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 587   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 588 
 589 public:
 590   // We must ensure that stores of object references will be visible
 591   // only after the object's initialization. So the callers of this
 592   // procedure must indicate that the store requires `release'
 593   // semantics, if the stored value is an object reference that might
 594   // point to a new object and may become externally visible.
 595   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 596     : MemNode(c, mem, adr, at, val), _mo(mo) {
 597     init_class_id(Class_Store);
 598   }
 599   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
 600     : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
 601     init_class_id(Class_Store);
 602   }
 603 
 604   inline bool is_unordered() const { return !is_release(); }
 605   inline bool is_release() const {
 606     assert((_mo == unordered || _mo == release), "unexpected");
 607     return _mo == release;
 608   }
 609 
 610   // Conservatively release stores of object references in order to
 611   // ensure visibility of object initialization.
 612   static inline MemOrd release_if_reference(const BasicType t) {
 613 #ifdef AARCH64
 614     // AArch64 doesn't need a release store here because object
 615     // initialization contains the necessary barriers.
 616     return unordered;
 617 #else
 618     const MemOrd mo = (t == T_ARRAY ||
 619                        t == T_ADDRESS || // Might be the address of an object reference (`boxing').
 620                        t == T_OBJECT) ? release : unordered;
 621     return mo;
 622 #endif
 623   }
 624 
 625   // Polymorphic factory method
 626   //
 627   // We must ensure that stores of object references will be visible
 628   // only after the object's initialization. So the callers of this
 629   // procedure must indicate that the store requires `release'
 630   // semantics, if the stored value is an object reference that might
 631   // point to a new object and may become externally visible.
 632   static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
 633                          const TypePtr* at, Node* val, BasicType bt,
 634                          MemOrd mo, bool require_atomic_access = false);
 635 
 636   virtual uint hash() const;    // Check the type
 637 
 638   // If the store is to Field memory and the pointer is non-null, we can
 639   // zero out the control input.
 640   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 641 
 642   // Compute a new Type for this node.  Basically we just do the pre-check,
 643   // then call the virtual add() to set the type.
 644   virtual const Type* Value(PhaseGVN* phase) const;
 645 
 646   // Check for identity function on memory (Load then Store at same address)
 647   virtual Node* Identity(PhaseGVN* phase);
 648 
 649   // Do not match memory edge
 650   virtual uint match_edge(uint idx) const;
 651 
 652   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 653 
 654   // Map a store opcode to its corresponding own opcode, trivially.
 655   virtual int store_Opcode() const { return Opcode(); }
 656 
 657   // have all possible loads of the value stored been optimized away?
 658   bool value_never_loaded(PhaseValues* phase) const;
 659 
 660   bool  has_reinterpret_variant(const Type* vt);
 661   Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt);
 662 
 663   MemBarNode* trailing_membar() const;
 664 
 665 private:
 666   virtual bool depends_only_on_test_impl() const { return false; }
 667 };
 668 
 669 //------------------------------StoreBNode-------------------------------------
 670 // Store byte to memory
 671 class StoreBNode : public StoreNode {
 672 public:
 673   StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 674     : StoreNode(c, mem, adr, at, val, mo) {}
 675   virtual int Opcode() const;
 676   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 677   virtual BasicType value_basic_type() const { return T_BYTE; }
 678 };
 679 
 680 //------------------------------StoreCNode-------------------------------------
 681 // Store char/short to memory
 682 class StoreCNode : public StoreNode {
 683 public:
 684   StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 685     : StoreNode(c, mem, adr, at, val, mo) {}
 686   virtual int Opcode() const;
 687   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 688   virtual BasicType value_basic_type() const { return T_CHAR; }
 689 };
 690 
 691 //------------------------------StoreINode-------------------------------------
 692 // Store int to memory
 693 class StoreINode : public StoreNode {
 694 public:
 695   StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 696     : StoreNode(c, mem, adr, at, val, mo) {}
 697   virtual int Opcode() const;
 698   virtual BasicType value_basic_type() const { return T_INT; }
 699 };
 700 
 701 //------------------------------StoreLNode-------------------------------------
 702 // Store long to memory
 703 class StoreLNode : public StoreNode {
 704   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 705   virtual bool cmp( const Node &n ) const {
 706     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
 707       && StoreNode::cmp(n);
 708   }
 709   virtual uint size_of() const { return sizeof(*this); }
 710   const bool _require_atomic_access;  // is piecewise store forbidden?
 711 
 712 public:
 713   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 714     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 715   virtual int Opcode() const;
 716   virtual BasicType value_basic_type() const { return T_LONG; }
 717   bool require_atomic_access() const { return _require_atomic_access; }
 718 
 719 #ifndef PRODUCT
 720   virtual void dump_spec(outputStream *st) const {
 721     StoreNode::dump_spec(st);
 722     if (_require_atomic_access)  st->print(" Atomic!");
 723   }
 724 #endif
 725 };
 726 
 727 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
 728 class StoreLSpecialNode : public StoreNode {
 729 
 730 public:
 731   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
 732     : StoreNode(c, mem, adr, at, val, mo) {
 733     set_mismatched_access();
 734     if (oop_off != nullptr) {
 735       add_req(oop_off);
 736     }
 737   }
 738   virtual int Opcode() const;
 739   virtual BasicType value_basic_type() const { return T_LONG; }
 740 
 741   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
 742                                                    idx == MemNode::ValueIn ||
 743                                                    idx == MemNode::ValueIn + 1; }
 744 };
 745 
 746 //------------------------------StoreFNode-------------------------------------
 747 // Store float to memory
 748 class StoreFNode : public StoreNode {
 749 public:
 750   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 751     : StoreNode(c, mem, adr, at, val, mo) {}
 752   virtual int Opcode() const;
 753   virtual BasicType value_basic_type() const { return T_FLOAT; }
 754 };
 755 
 756 //------------------------------StoreDNode-------------------------------------
 757 // Store double to memory
 758 class StoreDNode : public StoreNode {
 759   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 760   virtual bool cmp( const Node &n ) const {
 761     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 762       && StoreNode::cmp(n);
 763   }
 764   virtual uint size_of() const { return sizeof(*this); }
 765   const bool _require_atomic_access;  // is piecewise store forbidden?
 766 public:
 767   StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
 768              MemOrd mo, bool require_atomic_access = false)
 769     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 770   virtual int Opcode() const;
 771   virtual BasicType value_basic_type() const { return T_DOUBLE; }
 772   bool require_atomic_access() const { return _require_atomic_access; }
 773 
 774 #ifndef PRODUCT
 775   virtual void dump_spec(outputStream *st) const {
 776     StoreNode::dump_spec(st);
 777     if (_require_atomic_access)  st->print(" Atomic!");
 778   }
 779 #endif
 780 
 781 };
 782 
 783 //------------------------------StorePNode-------------------------------------
 784 // Store pointer to memory
 785 class StorePNode : public StoreNode {
 786 public:
 787   StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 788     : StoreNode(c, mem, adr, at, val, mo) {}
 789   virtual int Opcode() const;
 790   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 791 };
 792 
 793 //------------------------------StoreNNode-------------------------------------
 794 // Store narrow oop to memory
 795 class StoreNNode : public StoreNode {
 796 public:
 797   StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 798     : StoreNode(c, mem, adr, at, val, mo) {}
 799   virtual int Opcode() const;
 800   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 801 };
 802 
 803 //------------------------------StoreNKlassNode--------------------------------------
 804 // Store narrow klass to memory
 805 class StoreNKlassNode : public StoreNNode {
 806 public:
 807   StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 808     : StoreNNode(c, mem, adr, at, val, mo) {}
 809   virtual int Opcode() const;
 810   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 811 };
 812 
 813 //------------------------------SCMemProjNode---------------------------------------
 814 // This class defines a projection of the memory  state of a store conditional node.
 815 // These nodes return a value, but also update memory.
 816 class SCMemProjNode : public ProjNode {
 817 public:
 818   enum {SCMEMPROJCON = (uint)-2};
 819   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
 820   virtual int Opcode() const;
 821   virtual bool      is_CFG() const  { return false; }
 822   virtual const Type *bottom_type() const {return Type::MEMORY;}
 823   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
 824   virtual const Type* Value(PhaseGVN* phase) const;
 825 #ifndef PRODUCT
 826   virtual void dump_spec(outputStream *st) const {};
 827 #endif
 828 };
 829 
 830 //------------------------------LoadStoreNode---------------------------
 831 // Note: is_Mem() method returns 'true' for this class.
 832 class LoadStoreNode : public Node {
 833 private:
 834   const Type* const _type;      // What kind of value is loaded?
 835   uint8_t _barrier_data;        // Bit field with barrier information
 836   virtual uint size_of() const; // Size is bigger
 837 #ifdef ASSERT
 838   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 839 #endif // ASSERT
 840 public:
 841   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 842   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 843 
 844   virtual const Type *bottom_type() const { return _type; }
 845   virtual uint ideal_reg() const;
 846   virtual const TypePtr* adr_type() const;
 847   virtual const Type* Value(PhaseGVN* phase) const;
 848 
 849   bool result_not_used() const;
 850   MemBarNode* trailing_membar() const;
 851 
 852   uint8_t barrier_data() { return _barrier_data; }
 853   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 854 
 855 private:
 856   virtual bool depends_only_on_test_impl() const { return false; }
 857 };
 858 
 859 class LoadStoreConditionalNode : public LoadStoreNode {
 860 public:
 861   enum {
 862     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 863   };
 864   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 865   virtual const Type* Value(PhaseGVN* phase) const;
 866 };
 867 
 868 class CompareAndSwapNode : public LoadStoreConditionalNode {
 869 private:
 870   const MemNode::MemOrd _mem_ord;
 871 public:
 872   CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
 873   MemNode::MemOrd order() const {
 874     return _mem_ord;
 875   }
 876   virtual uint size_of() const { return sizeof(*this); }
 877 };
 878 
 879 class CompareAndExchangeNode : public LoadStoreNode {
 880 private:
 881   const MemNode::MemOrd _mem_ord;
 882 public:
 883   enum {
 884     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 885   };
 886   CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
 887     LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
 888      init_req(ExpectedIn, ex );
 889   }
 890 
 891   MemNode::MemOrd order() const {
 892     return _mem_ord;
 893   }
 894   virtual uint size_of() const { return sizeof(*this); }
 895 };
 896 
 897 //------------------------------CompareAndSwapBNode---------------------------
 898 class CompareAndSwapBNode : public CompareAndSwapNode {
 899 public:
 900   CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 901   virtual int Opcode() const;
 902 };
 903 
 904 //------------------------------CompareAndSwapSNode---------------------------
 905 class CompareAndSwapSNode : public CompareAndSwapNode {
 906 public:
 907   CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 908   virtual int Opcode() const;
 909 };
 910 
 911 //------------------------------CompareAndSwapINode---------------------------
 912 class CompareAndSwapINode : public CompareAndSwapNode {
 913 public:
 914   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 915   virtual int Opcode() const;
 916 };
 917 
 918 //------------------------------CompareAndSwapLNode---------------------------
 919 class CompareAndSwapLNode : public CompareAndSwapNode {
 920 public:
 921   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 922   virtual int Opcode() const;
 923 };
 924 
 925 //------------------------------CompareAndSwapPNode---------------------------
 926 class CompareAndSwapPNode : public CompareAndSwapNode {
 927 public:
 928   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 929   virtual int Opcode() const;
 930 };
 931 
 932 //------------------------------CompareAndSwapNNode---------------------------
 933 class CompareAndSwapNNode : public CompareAndSwapNode {
 934 public:
 935   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 936   virtual int Opcode() const;
 937 };
 938 
 939 //------------------------------WeakCompareAndSwapBNode---------------------------
 940 class WeakCompareAndSwapBNode : public CompareAndSwapNode {
 941 public:
 942   WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 943   virtual int Opcode() const;
 944 };
 945 
 946 //------------------------------WeakCompareAndSwapSNode---------------------------
 947 class WeakCompareAndSwapSNode : public CompareAndSwapNode {
 948 public:
 949   WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 950   virtual int Opcode() const;
 951 };
 952 
 953 //------------------------------WeakCompareAndSwapINode---------------------------
 954 class WeakCompareAndSwapINode : public CompareAndSwapNode {
 955 public:
 956   WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 957   virtual int Opcode() const;
 958 };
 959 
 960 //------------------------------WeakCompareAndSwapLNode---------------------------
 961 class WeakCompareAndSwapLNode : public CompareAndSwapNode {
 962 public:
 963   WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 964   virtual int Opcode() const;
 965 };
 966 
 967 //------------------------------WeakCompareAndSwapPNode---------------------------
 968 class WeakCompareAndSwapPNode : public CompareAndSwapNode {
 969 public:
 970   WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 971   virtual int Opcode() const;
 972 };
 973 
 974 //------------------------------WeakCompareAndSwapNNode---------------------------
 975 class WeakCompareAndSwapNNode : public CompareAndSwapNode {
 976 public:
 977   WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
 978   virtual int Opcode() const;
 979 };
 980 
 981 //------------------------------CompareAndExchangeBNode---------------------------
 982 class CompareAndExchangeBNode : public CompareAndExchangeNode {
 983 public:
 984   CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { }
 985   virtual int Opcode() const;
 986 };
 987 
 988 
 989 //------------------------------CompareAndExchangeSNode---------------------------
 990 class CompareAndExchangeSNode : public CompareAndExchangeNode {
 991 public:
 992   CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { }
 993   virtual int Opcode() const;
 994 };
 995 
 996 //------------------------------CompareAndExchangeLNode---------------------------
 997 class CompareAndExchangeLNode : public CompareAndExchangeNode {
 998 public:
 999   CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
1000   virtual int Opcode() const;
1001 };
1002 
1003 
1004 //------------------------------CompareAndExchangeINode---------------------------
1005 class CompareAndExchangeINode : public CompareAndExchangeNode {
1006 public:
1007   CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
1008   virtual int Opcode() const;
1009 };
1010 
1011 
1012 //------------------------------CompareAndExchangePNode---------------------------
1013 class CompareAndExchangePNode : public CompareAndExchangeNode {
1014 public:
1015   CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1016   virtual int Opcode() const;
1017 };
1018 
1019 //------------------------------CompareAndExchangeNNode---------------------------
1020 class CompareAndExchangeNNode : public CompareAndExchangeNode {
1021 public:
1022   CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1023   virtual int Opcode() const;
1024 };
1025 
1026 //------------------------------GetAndAddBNode---------------------------
1027 class GetAndAddBNode : public LoadStoreNode {
1028 public:
1029   GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1030   virtual int Opcode() const;
1031 };
1032 
1033 //------------------------------GetAndAddSNode---------------------------
1034 class GetAndAddSNode : public LoadStoreNode {
1035 public:
1036   GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1037   virtual int Opcode() const;
1038 };
1039 
1040 //------------------------------GetAndAddINode---------------------------
1041 class GetAndAddINode : public LoadStoreNode {
1042 public:
1043   GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1044   virtual int Opcode() const;
1045 };
1046 
1047 //------------------------------GetAndAddLNode---------------------------
1048 class GetAndAddLNode : public LoadStoreNode {
1049 public:
1050   GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1051   virtual int Opcode() const;
1052 };
1053 
1054 //------------------------------GetAndSetBNode---------------------------
1055 class GetAndSetBNode : public LoadStoreNode {
1056 public:
1057   GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1058   virtual int Opcode() const;
1059 };
1060 
1061 //------------------------------GetAndSetSNode---------------------------
1062 class GetAndSetSNode : public LoadStoreNode {
1063 public:
1064   GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1065   virtual int Opcode() const;
1066 };
1067 
1068 //------------------------------GetAndSetINode---------------------------
1069 class GetAndSetINode : public LoadStoreNode {
1070 public:
1071   GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1072   virtual int Opcode() const;
1073 };
1074 
1075 //------------------------------GetAndSetLNode---------------------------
1076 class GetAndSetLNode : public LoadStoreNode {
1077 public:
1078   GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1079   virtual int Opcode() const;
1080 };
1081 
1082 //------------------------------GetAndSetPNode---------------------------
1083 class GetAndSetPNode : public LoadStoreNode {
1084 public:
1085   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1086   virtual int Opcode() const;
1087 };
1088 
1089 //------------------------------GetAndSetNNode---------------------------
1090 class GetAndSetNNode : public LoadStoreNode {
1091 public:
1092   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1093   virtual int Opcode() const;
1094 };
1095 
1096 //------------------------------ClearArray-------------------------------------
1097 class ClearArrayNode: public Node {
1098 private:
1099   bool _is_large;
1100   bool _word_copy_only;
1101   static Node* make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase);
1102 public:
1103   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1104     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1105       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1106     init_class_id(Class_ClearArray);
1107   }
1108   virtual int         Opcode() const;
1109   virtual const Type *bottom_type() const { return Type::MEMORY; }
1110   // ClearArray modifies array elements, and so affects only the
1111   // array memory addressed by the bottom_type of its base address.
1112   virtual const class TypePtr *adr_type() const;
1113   virtual Node* Identity(PhaseGVN* phase);
1114   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1115   virtual uint match_edge(uint idx) const;
1116   bool is_large() const { return _is_large; }
1117   bool word_copy_only() const { return _word_copy_only; }
1118   virtual uint size_of() const { return sizeof(ClearArrayNode); }
1119   virtual uint hash() const { return Node::hash() + _is_large; }
1120   virtual bool cmp(const Node& n) const {
1121     return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1122   }
1123 
1124   // Clear the given area of an object or array.
1125   // The start offset must always be aligned mod BytesPerInt.
1126   // The end offset must always be aligned mod BytesPerLong.
1127   // Return the new memory.
1128   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1129                             Node* val,
1130                             Node* raw_val,
1131                             intptr_t start_offset,
1132                             intptr_t end_offset,
1133                             bool raw_base,
1134                             PhaseGVN* phase);
1135   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1136                             Node* val,
1137                             Node* raw_val,
1138                             intptr_t start_offset,
1139                             Node* end_offset,
1140                             bool raw_base,
1141                             PhaseGVN* phase);
1142   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1143                             Node* raw_val,
1144                             Node* start_offset,
1145                             Node* end_offset,
1146                             bool raw_base,
1147                             PhaseGVN* phase);
1148   // Return allocation input memory edge if it is different instance
1149   // or itself if it is the one we are looking for.
1150   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1151 
1152 private:
1153   virtual bool depends_only_on_test_impl() const { return false; }
1154 };
1155 
1156 //------------------------------MemBar-----------------------------------------
1157 // There are different flavors of Memory Barriers to match the Java Memory
1158 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1159 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1160 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1161 // preceding ref can be moved to after them.  We insert a MemBar-Release
1162 // before a FastUnlock or volatile-store.  All volatiles need to be
1163 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1164 // separate it from any following volatile-load.
1165 class MemBarNode: public MultiNode {
1166   virtual uint hash() const ;                  // { return NO_HASH; }
1167   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self
1168 
1169   virtual uint size_of() const { return sizeof(*this); }
1170   // Memory type this node is serializing.  Usually either rawptr or bottom.
1171   const TypePtr* _adr_type;
1172 
1173   // How is this membar related to a nearby memory access?
1174   enum {
1175     Standalone,
1176     TrailingLoad,
1177     TrailingStore,
1178     LeadingStore,
1179     TrailingLoadStore,
1180     LeadingLoadStore,
1181     TrailingExpandedArrayCopy
1182   } _kind;
1183 
1184 #ifdef ASSERT
1185   uint _pair_idx;
1186 #endif
1187 
1188 public:
1189   enum {
1190     Precedent = TypeFunc::Parms  // optional edge to force precedence
1191   };
1192   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1193   virtual int Opcode() const = 0;
1194   virtual const class TypePtr *adr_type() const { return _adr_type; }
1195   virtual const Type* Value(PhaseGVN* phase) const;
1196   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1197   virtual uint match_edge(uint idx) const { return 0; }
1198   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1199   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1200   // Factory method.  Builds a wide or narrow membar.
1201   // Optional 'precedent' becomes an extra edge if not null.
1202   static MemBarNode* make(Compile* C, int opcode,
1203                           int alias_idx = Compile::AliasIdxBot,
1204                           Node* precedent = nullptr);
1205 
1206   MemBarNode* trailing_membar() const;
1207   MemBarNode* leading_membar() const;
1208 
1209   void set_trailing_load() { _kind = TrailingLoad; }
1210   bool trailing_load() const { return _kind == TrailingLoad; }
1211   bool trailing_store() const { return _kind == TrailingStore; }
1212   bool leading_store() const { return _kind == LeadingStore; }
1213   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1214   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1215   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1216   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1217   bool standalone() const { return _kind == Standalone; }
1218   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1219   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
1220 
1221   static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
1222   static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
1223 
1224   void remove(PhaseIterGVN *igvn);
1225 };
1226 
1227 // "Acquire" - no following ref can move before (but earlier refs can
1228 // follow, like an early Load stalled in cache).  Requires multi-cpu
1229 // visibility.  Inserted after a volatile load.
1230 class MemBarAcquireNode: public MemBarNode {
1231 public:
1232   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1233     : MemBarNode(C, alias_idx, precedent) {}
1234   virtual int Opcode() const;
1235 };
1236 
1237 // "Acquire" - no following ref can move before (but earlier refs can
1238 // follow, like an early Load stalled in cache).  Requires multi-cpu
1239 // visibility.  Inserted independent of any load, as required
1240 // for intrinsic Unsafe.loadFence().
1241 class LoadFenceNode: public MemBarNode {
1242 public:
1243   LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1244     : MemBarNode(C, alias_idx, precedent) {}
1245   virtual int Opcode() const;
1246 };
1247 
1248 // "Release" - no earlier ref can move after (but later refs can move
1249 // up, like a speculative pipelined cache-hitting Load).  Requires
1250 // multi-cpu visibility.  Inserted before a volatile store.
1251 class MemBarReleaseNode: public MemBarNode {
1252 public:
1253   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1254     : MemBarNode(C, alias_idx, precedent) {}
1255   virtual int Opcode() const;
1256 };
1257 
1258 // "Release" - no earlier ref can move after (but later refs can move
1259 // up, like a speculative pipelined cache-hitting Load).  Requires
1260 // multi-cpu visibility.  Inserted independent of any store, as required
1261 // for intrinsic Unsafe.storeFence().
1262 class StoreFenceNode: public MemBarNode {
1263 public:
1264   StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1265     : MemBarNode(C, alias_idx, precedent) {}
1266   virtual int Opcode() const;
1267 };
1268 
1269 // "Acquire" - no following ref can move before (but earlier refs can
1270 // follow, like an early Load stalled in cache).  Requires multi-cpu
1271 // visibility.  Inserted after a FastLock.
1272 class MemBarAcquireLockNode: public MemBarNode {
1273 public:
1274   MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1275     : MemBarNode(C, alias_idx, precedent) {}
1276   virtual int Opcode() const;
1277 };
1278 
1279 // "Release" - no earlier ref can move after (but later refs can move
1280 // up, like a speculative pipelined cache-hitting Load).  Requires
1281 // multi-cpu visibility.  Inserted before a FastUnLock.
1282 class MemBarReleaseLockNode: public MemBarNode {
1283 public:
1284   MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1285     : MemBarNode(C, alias_idx, precedent) {}
1286   virtual int Opcode() const;
1287 };
1288 
1289 class MemBarStoreStoreNode: public MemBarNode {
1290 public:
1291   MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1292     : MemBarNode(C, alias_idx, precedent) {
1293     init_class_id(Class_MemBarStoreStore);
1294   }
1295   virtual int Opcode() const;
1296 };
1297 
1298 class StoreStoreFenceNode: public MemBarNode {
1299 public:
1300   StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1301     : MemBarNode(C, alias_idx, precedent) {}
1302   virtual int Opcode() const;
1303 };
1304 
1305 // Ordering between a volatile store and a following volatile load.
1306 // Requires multi-CPU visibility?
1307 class MemBarVolatileNode: public MemBarNode {
1308 public:
1309   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1310     : MemBarNode(C, alias_idx, precedent) {}
1311   virtual int Opcode() const;
1312 };
1313 
1314 // Ordering within the same CPU.  Used to order unsafe memory references
1315 // inside the compiler when we lack alias info.  Not needed "outside" the
1316 // compiler because the CPU does all the ordering for us.
1317 class MemBarCPUOrderNode: public MemBarNode {
1318 public:
1319   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1320     : MemBarNode(C, alias_idx, precedent) {}
1321   virtual int Opcode() const;
1322   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1323 };
1324 
1325 class OnSpinWaitNode: public MemBarNode {
1326 public:
1327   OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1328     : MemBarNode(C, alias_idx, precedent) {}
1329   virtual int Opcode() const;
1330 };
1331 
1332 // Isolation of object setup after an AllocateNode and before next safepoint.
1333 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1334 class InitializeNode: public MemBarNode {
1335   friend class AllocateNode;
1336 
1337   enum {
1338     Incomplete    = 0,
1339     Complete      = 1,
1340     WithArraycopy = 2
1341   };
1342   int _is_complete;
1343 
1344   bool _does_not_escape;
1345 
1346 public:
1347   enum {
1348     Control    = TypeFunc::Control,
1349     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
1350     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
1351     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
1352   };
1353 
1354   InitializeNode(Compile* C, int adr_type, Node* rawoop);
1355   virtual int Opcode() const;
1356   virtual uint size_of() const { return sizeof(*this); }
1357   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1358   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
1359 
1360   // Manage incoming memory edges via a MergeMem on in(Memory):
1361   Node* memory(uint alias_idx);
1362 
1363   // The raw memory edge coming directly from the Allocation.
1364   // The contents of this memory are *always* all-zero-bits.
1365   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1366 
1367   // Return the corresponding allocation for this initialization (or null if none).
1368   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1369   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1370   AllocateNode* allocation();
1371 
1372   // Anything other than zeroing in this init?
1373   bool is_non_zero();
1374 
1375   // An InitializeNode must completed before macro expansion is done.
1376   // Completion requires that the AllocateNode must be followed by
1377   // initialization of the new memory to zero, then to any initializers.
1378   bool is_complete() { return _is_complete != Incomplete; }
1379   bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1380 
1381   // Mark complete.  (Must not yet be complete.)
1382   void set_complete(PhaseGVN* phase);
1383   void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1384 
1385   bool does_not_escape() { return _does_not_escape; }
1386   void set_does_not_escape() { _does_not_escape = true; }
1387 
1388 #ifdef ASSERT
1389   // ensure all non-degenerate stores are ordered and non-overlapping
1390   bool stores_are_sane(PhaseValues* phase);
1391 #endif //ASSERT
1392 
1393   // See if this store can be captured; return offset where it initializes.
1394   // Return 0 if the store cannot be moved (any sort of problem).
1395   intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape);
1396 
1397   // Capture another store; reformat it to write my internal raw memory.
1398   // Return the captured copy, else null if there is some sort of problem.
1399   Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape);
1400 
1401   // Find captured store which corresponds to the range [start..start+size).
1402   // Return my own memory projection (meaning the initial zero bits)
1403   // if there is no such store.  Return null if there is a problem.
1404   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase);
1405 
1406   // Called when the associated AllocateNode is expanded into CFG.
1407   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1408                         intptr_t header_size, Node* size_in_bytes,
1409                         PhaseIterGVN* phase);
1410 
1411   // An Initialize node has multiple memory projections. Helper methods used when the node is removed.
1412   // For use at parse time
1413   void replace_mem_projs_by(Node* mem, Compile* C);
1414   // For use with IGVN
1415   void replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn);
1416 
1417   // Does a NarrowMemProj with this adr_type and this node as input already exist?
1418   bool already_has_narrow_mem_proj_with_adr_type(const TypePtr* adr_type) const;
1419 
1420   // Used during matching: find the MachProj memory projection if there's one. Expectation is that there should be at
1421   // most one.
1422   MachProjNode* mem_mach_proj() const;
1423 
1424 private:
1425   void remove_extra_zeroes();
1426 
1427   // Find out where a captured store should be placed (or already is placed).
1428   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1429                                      PhaseValues* phase);
1430 
1431   static intptr_t get_store_offset(Node* st, PhaseValues* phase);
1432 
1433   Node* make_raw_address(intptr_t offset, PhaseGVN* phase);
1434 
1435   bool detect_init_independence(Node* value, PhaseGVN* phase);
1436 
1437   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1438                                PhaseGVN* phase);
1439 
1440   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1441 
1442   // Iterate with i over all NarrowMemProj uses calling callback
1443   template <class Callback, class Iterator> NarrowMemProjNode* apply_to_narrow_mem_projs_any_iterator(Iterator i, Callback callback) const {
1444     auto filter = [&](ProjNode* proj) {
1445       if (proj->is_NarrowMemProj() && callback(proj->as_NarrowMemProj()) == BREAK_AND_RETURN_CURRENT_PROJ) {
1446         return BREAK_AND_RETURN_CURRENT_PROJ;
1447       }
1448       return CONTINUE;
1449     };
1450     ProjNode* res = apply_to_projs_any_iterator(i, filter);
1451     if (res == nullptr) {
1452       return nullptr;
1453     }
1454     return res->as_NarrowMemProj();
1455   }
1456 
1457 public:
1458 
1459   // callback is allowed to add new uses that will then be iterated over
1460   template <class Callback> void for_each_narrow_mem_proj_with_new_uses(Callback callback) const {
1461     auto callback_always_continue = [&](NarrowMemProjNode* proj) {
1462       callback(proj);
1463       return MultiNode::CONTINUE;
1464     };
1465     DUIterator i = outs();
1466     apply_to_narrow_mem_projs_any_iterator(UsesIterator(i, this), callback_always_continue);
1467   }
1468 };
1469 
1470 //------------------------------MergeMem---------------------------------------
1471 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1472 class MergeMemNode: public Node {
1473   virtual uint hash() const ;                  // { return NO_HASH; }
1474   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self
1475   friend class MergeMemStream;
1476   MergeMemNode(Node* def);  // clients use MergeMemNode::make
1477 
1478 public:
1479   // If the input is a whole memory state, clone it with all its slices intact.
1480   // Otherwise, make a new memory state with just that base memory input.
1481   // In either case, the result is a newly created MergeMem.
1482   static MergeMemNode* make(Node* base_memory);
1483 
1484   virtual int Opcode() const;
1485   virtual Node* Identity(PhaseGVN* phase);
1486   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1487   virtual uint ideal_reg() const { return NotAMachineReg; }
1488   virtual uint match_edge(uint idx) const { return 0; }
1489   virtual const RegMask &out_RegMask() const;
1490   virtual const Type *bottom_type() const { return Type::MEMORY; }
1491   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1492   // sparse accessors
1493   // Fetch the previously stored "set_memory_at", or else the base memory.
1494   // (Caller should clone it if it is a phi-nest.)
1495   Node* memory_at(uint alias_idx) const;
1496   // set the memory, regardless of its previous value
1497   void set_memory_at(uint alias_idx, Node* n);
1498   // the "base" is the memory that provides the non-finite support
1499   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
1500   // warning: setting the base can implicitly set any of the other slices too
1501   void set_base_memory(Node* def);
1502   // sentinel value which denotes a copy of the base memory:
1503   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
1504   static Node* make_empty_memory(); // where the sentinel comes from
1505   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1506   // hook for the iterator, to perform any necessary setup
1507   void iteration_setup(const MergeMemNode* other = nullptr);
1508   // push sentinels until I am at least as long as the other (semantic no-op)
1509   void grow_to_match(const MergeMemNode* other);
1510   bool verify_sparse() const PRODUCT_RETURN0;
1511 #ifndef PRODUCT
1512   virtual void dump_spec(outputStream *st) const;
1513 #endif
1514 };
1515 
1516 class MergeMemStream : public StackObj {
1517  private:
1518   MergeMemNode*       _mm;
1519   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
1520   Node*               _mm_base;  // loop-invariant base memory of _mm
1521   int                 _idx;
1522   int                 _cnt;
1523   Node*               _mem;
1524   Node*               _mem2;
1525   int                 _cnt2;
1526 
1527   void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) {
1528     // subsume_node will break sparseness at times, whenever a memory slice
1529     // folds down to a copy of the base ("fat") memory.  In such a case,
1530     // the raw edge will update to base, although it should be top.
1531     // This iterator will recognize either top or base_memory as an
1532     // "empty" slice.  See is_empty, is_empty2, and next below.
1533     //
1534     // The sparseness property is repaired in MergeMemNode::Ideal.
1535     // As long as access to a MergeMem goes through this iterator
1536     // or the memory_at accessor, flaws in the sparseness will
1537     // never be observed.
1538     //
1539     // Also, iteration_setup repairs sparseness.
1540     assert(mm->verify_sparse(), "please, no dups of base");
1541     assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base");
1542 
1543     _mm  = mm;
1544     _mm_base = mm->base_memory();
1545     _mm2 = mm2;
1546     _cnt = mm->req();
1547     _idx = Compile::AliasIdxBot-1; // start at the base memory
1548     _mem = nullptr;
1549     _mem2 = nullptr;
1550   }
1551 
1552 #ifdef ASSERT
1553   Node* check_memory() const {
1554     if (at_base_memory())
1555       return _mm->base_memory();
1556     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1557       return _mm->memory_at(_idx);
1558     else
1559       return _mm_base;
1560   }
1561   Node* check_memory2() const {
1562     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1563   }
1564 #endif
1565 
1566   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1567   void assert_synch() const {
1568     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1569            "no side-effects except through the stream");
1570   }
1571 
1572  public:
1573 
1574   // expected usages:
1575   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1576   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1577 
1578   // iterate over one merge
1579   MergeMemStream(MergeMemNode* mm) {
1580     mm->iteration_setup();
1581     init(mm);
1582     DEBUG_ONLY(_cnt2 = 999);
1583   }
1584   // iterate in parallel over two merges
1585   // only iterates through non-empty elements of mm2
1586   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1587     assert(mm2, "second argument must be a MergeMem also");
1588     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
1589     mm->iteration_setup(mm2);
1590     init(mm, mm2);
1591     _cnt2 = mm2->req();
1592   }
1593 #ifdef ASSERT
1594   ~MergeMemStream() {
1595     assert_synch();
1596   }
1597 #endif
1598 
1599   MergeMemNode* all_memory() const {
1600     return _mm;
1601   }
1602   Node* base_memory() const {
1603     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1604     return _mm_base;
1605   }
1606   const MergeMemNode* all_memory2() const {
1607     assert(_mm2 != nullptr, "");
1608     return _mm2;
1609   }
1610   bool at_base_memory() const {
1611     return _idx == Compile::AliasIdxBot;
1612   }
1613   int alias_idx() const {
1614     assert(_mem, "must call next 1st");
1615     return _idx;
1616   }
1617 
1618   const TypePtr* adr_type() const {
1619     return Compile::current()->get_adr_type(alias_idx());
1620   }
1621 
1622   const TypePtr* adr_type(Compile* C) const {
1623     return C->get_adr_type(alias_idx());
1624   }
1625   bool is_empty() const {
1626     assert(_mem, "must call next 1st");
1627     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1628     return _mem->is_top();
1629   }
1630   bool is_empty2() const {
1631     assert(_mem2, "must call next 1st");
1632     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1633     return _mem2->is_top();
1634   }
1635   Node* memory() const {
1636     assert(!is_empty(), "must not be empty");
1637     assert_synch();
1638     return _mem;
1639   }
1640   // get the current memory, regardless of empty or non-empty status
1641   Node* force_memory() const {
1642     assert(!is_empty() || !at_base_memory(), "");
1643     // Use _mm_base to defend against updates to _mem->base_memory().
1644     Node *mem = _mem->is_top() ? _mm_base : _mem;
1645     assert(mem == check_memory(), "");
1646     return mem;
1647   }
1648   Node* memory2() const {
1649     assert(_mem2 == check_memory2(), "");
1650     return _mem2;
1651   }
1652   void set_memory(Node* mem) {
1653     if (at_base_memory()) {
1654       // Note that this does not change the invariant _mm_base.
1655       _mm->set_base_memory(mem);
1656     } else {
1657       _mm->set_memory_at(_idx, mem);
1658     }
1659     _mem = mem;
1660     assert_synch();
1661   }
1662 
1663   // Recover from a side effect to the MergeMemNode.
1664   void set_memory() {
1665     _mem = _mm->in(_idx);
1666   }
1667 
1668   bool next()  { return next(false); }
1669   bool next2() { return next(true); }
1670 
1671   bool next_non_empty()  { return next_non_empty(false); }
1672   bool next_non_empty2() { return next_non_empty(true); }
1673   // next_non_empty2 can yield states where is_empty() is true
1674 
1675  private:
1676   // find the next item, which might be empty
1677   bool next(bool have_mm2) {
1678     assert((_mm2 != nullptr) == have_mm2, "use other next");
1679     assert_synch();
1680     if (++_idx < _cnt) {
1681       // Note:  This iterator allows _mm to be non-sparse.
1682       // It behaves the same whether _mem is top or base_memory.
1683       _mem = _mm->in(_idx);
1684       if (have_mm2)
1685         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1686       return true;
1687     }
1688     return false;
1689   }
1690 
1691   // find the next non-empty item
1692   bool next_non_empty(bool have_mm2) {
1693     while (next(have_mm2)) {
1694       if (!is_empty()) {
1695         // make sure _mem2 is filled in sensibly
1696         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
1697         return true;
1698       } else if (have_mm2 && !is_empty2()) {
1699         return true;   // is_empty() == true
1700       }
1701     }
1702     return false;
1703   }
1704 };
1705 
1706 // cachewb node for guaranteeing writeback of the cache line at a
1707 // given address to (non-volatile) RAM
1708 class CacheWBNode : public Node {
1709 public:
1710   CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {}
1711   virtual int Opcode() const;
1712   virtual uint ideal_reg() const { return NotAMachineReg; }
1713   virtual uint match_edge(uint idx) const { return (idx == 2); }
1714   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1715   virtual const Type *bottom_type() const { return Type::MEMORY; }
1716 
1717 private:
1718   virtual bool depends_only_on_test_impl() const { return false; }
1719 };
1720 
1721 // cachewb pre sync node for ensuring that writebacks are serialised
1722 // relative to preceding or following stores
1723 class CacheWBPreSyncNode : public Node {
1724 public:
1725   CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1726   virtual int Opcode() const;
1727   virtual uint ideal_reg() const { return NotAMachineReg; }
1728   virtual uint match_edge(uint idx) const { return false; }
1729   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1730   virtual const Type *bottom_type() const { return Type::MEMORY; }
1731 
1732 private:
1733   virtual bool depends_only_on_test_impl() const { return false; }
1734 };
1735 
1736 // cachewb pre sync node for ensuring that writebacks are serialised
1737 // relative to preceding or following stores
1738 class CacheWBPostSyncNode : public Node {
1739 public:
1740   CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1741   virtual int Opcode() const;
1742   virtual uint ideal_reg() const { return NotAMachineReg; }
1743   virtual uint match_edge(uint idx) const { return false; }
1744   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1745   virtual const Type *bottom_type() const { return Type::MEMORY; }
1746 
1747 private:
1748   virtual bool depends_only_on_test_impl() const { return false; }
1749 };
1750 
1751 //------------------------------Prefetch---------------------------------------
1752 
1753 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1754 class PrefetchAllocationNode : public Node {
1755 public:
1756   PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {}
1757   virtual int Opcode() const;
1758   virtual uint ideal_reg() const { return NotAMachineReg; }
1759   virtual uint match_edge(uint idx) const { return idx==2; }
1760   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1761 
1762 private:
1763   virtual bool depends_only_on_test_impl() const { return false; }
1764 };
1765 
1766 #endif // SHARE_OPTO_MEMNODE_HPP