1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_OPTO_MEMNODE_HPP 27 #define SHARE_OPTO_MEMNODE_HPP 28 29 #include "opto/multnode.hpp" 30 #include "opto/node.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/type.hpp" 33 34 // Portions of code courtesy of Clifford Click 35 36 class MultiNode; 37 class PhaseCCP; 38 class PhaseTransform; 39 40 //------------------------------MemNode---------------------------------------- 41 // Load or Store, possibly throwing a null pointer exception 42 class MemNode : public Node { 43 private: 44 bool _unaligned_access; // Unaligned access from unsafe 45 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 46 bool _unsafe_access; // Access of unsafe origin. 47 uint8_t _barrier_data; // Bit field with barrier information 48 49 protected: 50 #ifdef ASSERT 51 const TypePtr* _adr_type; // What kind of memory is being addressed? 52 #endif 53 virtual uint size_of() const; 54 public: 55 enum { Control, // When is it safe to do this load? 56 Memory, // Chunk of memory is being loaded from 57 Address, // Actually address, derived from base 58 ValueIn // Value to store 59 }; 60 typedef enum { unordered = 0, 61 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 62 release, // Store has to release or be preceded by MemBarRelease. 63 seqcst, // LoadStore has to have both acquire and release semantics. 64 unset // The memory ordering is not set (used for testing) 65 } MemOrd; 66 protected: 67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) : 68 Node(c0,c1,c2), 69 _unaligned_access(false), 70 _mismatched_access(false), 71 _unsafe_access(false), 72 _barrier_data(0) { 73 init_class_id(Class_Mem); 74 debug_only(_adr_type=at; adr_type();) 75 } 76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : 77 Node(c0,c1,c2,c3), 78 _unaligned_access(false), 79 _mismatched_access(false), 80 _unsafe_access(false), 81 _barrier_data(0) { 82 init_class_id(Class_Mem); 83 debug_only(_adr_type=at; adr_type();) 84 } 85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : 86 Node(c0,c1,c2,c3,c4), 87 _unaligned_access(false), 88 _mismatched_access(false), 89 _unsafe_access(false), 90 _barrier_data(0) { 91 init_class_id(Class_Mem); 92 debug_only(_adr_type=at; adr_type();) 93 } 94 95 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; } 96 ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const; 97 static bool check_if_adr_maybe_raw(Node* adr); 98 99 public: 100 // Helpers for the optimizer. Documented in memnode.cpp. 101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 102 Node* p2, AllocateNode* a2, 103 PhaseTransform* phase); 104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 105 106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 108 // The following two should probably be phase-specific functions: 109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub); 110 static bool all_controls_dominate(Node* dom, Node* sub) { 111 DomResult dom_result = maybe_all_controls_dominate(dom, sub); 112 return dom_result == DomResult::Dominate; 113 } 114 115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 116 117 // Shared code for Ideal methods: 118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null. 119 120 // Helper function for adr_type() implementations. 121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr); 122 123 // Raw access function, to allow copying of adr_type efficiently in 124 // product builds and retain the debug info for debug builds. 125 const TypePtr *raw_adr_type() const { 126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr); 127 } 128 129 // Return the barrier data of n, if available, or 0 otherwise. 130 static uint8_t barrier_data(const Node* n); 131 132 // Map a load or store opcode to its corresponding store opcode. 133 // (Return -1 if unknown.) 134 virtual int store_Opcode() const { return -1; } 135 136 // What is the type of the value in memory? (T_VOID mean "unspecified".) 137 virtual BasicType memory_type() const = 0; 138 virtual int memory_size() const { 139 #ifdef ASSERT 140 return type2aelembytes(memory_type(), true); 141 #else 142 return type2aelembytes(memory_type()); 143 #endif 144 } 145 146 uint8_t barrier_data() { return _barrier_data; } 147 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 148 149 // Search through memory states which precede this node (load or store). 150 // Look for an exact match for the address, with no intervening 151 // aliased stores. 152 Node* find_previous_store(PhaseValues* phase); 153 154 // Can this node (load or store) accurately see a stored value in 155 // the given memory state? (The state may or may not be in(Memory).) 156 Node* can_see_stored_value(Node* st, PhaseValues* phase) const; 157 158 void set_unaligned_access() { _unaligned_access = true; } 159 bool is_unaligned_access() const { return _unaligned_access; } 160 void set_mismatched_access() { _mismatched_access = true; } 161 bool is_mismatched_access() const { return _mismatched_access; } 162 void set_unsafe_access() { _unsafe_access = true; } 163 bool is_unsafe_access() const { return _unsafe_access; } 164 165 #ifndef PRODUCT 166 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 167 virtual void dump_spec(outputStream *st) const; 168 #endif 169 }; 170 171 //------------------------------LoadNode--------------------------------------- 172 // Load value; requires Memory and Address 173 class LoadNode : public MemNode { 174 public: 175 // Some loads (from unsafe) should be pinned: they don't depend only 176 // on the dominating test. The field _control_dependency below records 177 // whether that node depends only on the dominating test. 178 // Pinned and UnknownControl are similar, but differ in that Pinned 179 // loads are not allowed to float across safepoints, whereas UnknownControl 180 // loads are allowed to do that. Therefore, Pinned is stricter. 181 enum ControlDependency { 182 Pinned, 183 UnknownControl, 184 DependsOnlyOnTest 185 }; 186 187 private: 188 // LoadNode::hash() doesn't take the _control_dependency field 189 // into account: If the graph already has a non-pinned LoadNode and 190 // we add a pinned LoadNode with the same inputs, it's safe for GVN 191 // to replace the pinned LoadNode with the non-pinned LoadNode, 192 // otherwise it wouldn't be safe to have a non pinned LoadNode with 193 // those inputs in the first place. If the graph already has a 194 // pinned LoadNode and we add a non pinned LoadNode with the same 195 // inputs, it's safe (but suboptimal) for GVN to replace the 196 // non-pinned LoadNode by the pinned LoadNode. 197 ControlDependency _control_dependency; 198 199 // On platforms with weak memory ordering (e.g., PPC) we distinguish 200 // loads that can be reordered, and such requiring acquire semantics to 201 // adhere to the Java specification. The required behaviour is stored in 202 // this field. 203 const MemOrd _mo; 204 205 AllocateNode* is_new_object_mark_load() const; 206 207 protected: 208 virtual bool cmp(const Node &n) const; 209 virtual uint size_of() const; // Size is bigger 210 // Should LoadNode::Ideal() attempt to remove control edges? 211 virtual bool can_remove_control() const; 212 const Type* const _type; // What kind of value is loaded? 213 214 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 215 public: 216 217 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 218 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { 219 init_class_id(Class_Load); 220 } 221 inline bool is_unordered() const { return !is_acquire(); } 222 inline bool is_acquire() const { 223 assert(_mo == unordered || _mo == acquire, "unexpected"); 224 return _mo == acquire; 225 } 226 inline bool is_unsigned() const { 227 int lop = Opcode(); 228 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 229 } 230 231 // Polymorphic factory method: 232 static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, 233 const TypePtr* at, const Type* rt, BasicType bt, 234 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 235 bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false, 236 uint8_t barrier_data = 0); 237 238 virtual uint hash() const; // Check the type 239 240 // Handle algebraic identities here. If we have an identity, return the Node 241 // we are equivalent to. We look for Load of a Store. 242 virtual Node* Identity(PhaseGVN* phase); 243 244 // If the load is from Field memory and the pointer is non-null, it might be possible to 245 // zero out the control input. 246 // If the offset is constant and the base is an object allocation, 247 // try to hook me up to the exact initializing store. 248 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 249 250 // Return true if it's possible to split the Load through a Phi merging the bases 251 bool can_split_through_phi_base(PhaseGVN *phase); 252 253 // Split instance field load through Phi. 254 Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false); 255 256 // Recover original value from boxed values 257 Node *eliminate_autobox(PhaseIterGVN *igvn); 258 259 // Compute a new Type for this node. Basically we just do the pre-check, 260 // then call the virtual add() to set the type. 261 virtual const Type* Value(PhaseGVN* phase) const; 262 263 // Common methods for LoadKlass and LoadNKlass nodes. 264 const Type* klass_value_common(PhaseGVN* phase) const; 265 Node* klass_identity_common(PhaseGVN* phase); 266 267 virtual uint ideal_reg() const; 268 virtual const Type *bottom_type() const; 269 // Following method is copied from TypeNode: 270 void set_type(const Type* t) { 271 assert(t != nullptr, "sanity"); 272 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 273 *(const Type**)&_type = t; // cast away const-ness 274 // If this node is in the hash table, make sure it doesn't need a rehash. 275 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 276 } 277 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; }; 278 279 // Do not match memory edge 280 virtual uint match_edge(uint idx) const; 281 282 // Map a load opcode to its corresponding store opcode. 283 virtual int store_Opcode() const = 0; 284 285 // Check if the load's memory input is a Phi node with the same control. 286 bool is_instance_field_load_with_local_phi(Node* ctrl); 287 288 Node* convert_to_unsigned_load(PhaseGVN& gvn); 289 Node* convert_to_signed_load(PhaseGVN& gvn); 290 291 bool has_reinterpret_variant(const Type* rt); 292 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt); 293 294 ControlDependency control_dependency() const { return _control_dependency; } 295 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 296 bool has_pinned_control_dependency() const { return _control_dependency == Pinned; } 297 298 LoadNode* pin_array_access_node() const; 299 300 #ifndef PRODUCT 301 virtual void dump_spec(outputStream *st) const; 302 #endif 303 #ifdef ASSERT 304 // Helper function to allow a raw load without control edge for some cases 305 static bool is_immutable_value(Node* adr); 306 #endif 307 protected: 308 const Type* load_array_final_field(const TypeKlassPtr *tkls, 309 ciKlass* klass) const; 310 311 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 312 313 // depends_only_on_test is almost always true, and needs to be almost always 314 // true to enable key hoisting & commoning optimizations. However, for the 315 // special case of RawPtr loads from TLS top & end, and other loads performed by 316 // GC barriers, the control edge carries the dependence preventing hoisting past 317 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 318 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 319 // which produce results (new raw memory state) inside of loops preventing all 320 // manner of other optimizations). Basically, it's ugly but so is the alternative. 321 // See comment in macro.cpp, around line 125 expand_allocate_common(). 322 virtual bool depends_only_on_test() const { 323 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 324 } 325 326 LoadNode* clone_pinned() const; 327 }; 328 329 //------------------------------LoadBNode-------------------------------------- 330 // Load a byte (8bits signed) from memory 331 class LoadBNode : public LoadNode { 332 public: 333 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 334 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 335 virtual int Opcode() const; 336 virtual uint ideal_reg() const { return Op_RegI; } 337 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 338 virtual const Type* Value(PhaseGVN* phase) const; 339 virtual int store_Opcode() const { return Op_StoreB; } 340 virtual BasicType memory_type() const { return T_BYTE; } 341 }; 342 343 //------------------------------LoadUBNode------------------------------------- 344 // Load a unsigned byte (8bits unsigned) from memory 345 class LoadUBNode : public LoadNode { 346 public: 347 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 348 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 349 virtual int Opcode() const; 350 virtual uint ideal_reg() const { return Op_RegI; } 351 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 352 virtual const Type* Value(PhaseGVN* phase) const; 353 virtual int store_Opcode() const { return Op_StoreB; } 354 virtual BasicType memory_type() const { return T_BYTE; } 355 }; 356 357 //------------------------------LoadUSNode------------------------------------- 358 // Load an unsigned short/char (16bits unsigned) from memory 359 class LoadUSNode : public LoadNode { 360 public: 361 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 362 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 363 virtual int Opcode() const; 364 virtual uint ideal_reg() const { return Op_RegI; } 365 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 366 virtual const Type* Value(PhaseGVN* phase) const; 367 virtual int store_Opcode() const { return Op_StoreC; } 368 virtual BasicType memory_type() const { return T_CHAR; } 369 }; 370 371 //------------------------------LoadSNode-------------------------------------- 372 // Load a short (16bits signed) from memory 373 class LoadSNode : public LoadNode { 374 public: 375 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 376 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 377 virtual int Opcode() const; 378 virtual uint ideal_reg() const { return Op_RegI; } 379 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 380 virtual const Type* Value(PhaseGVN* phase) const; 381 virtual int store_Opcode() const { return Op_StoreC; } 382 virtual BasicType memory_type() const { return T_SHORT; } 383 }; 384 385 //------------------------------LoadINode-------------------------------------- 386 // Load an integer from memory 387 class LoadINode : public LoadNode { 388 public: 389 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 390 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 391 virtual int Opcode() const; 392 virtual uint ideal_reg() const { return Op_RegI; } 393 virtual int store_Opcode() const { return Op_StoreI; } 394 virtual BasicType memory_type() const { return T_INT; } 395 }; 396 397 //------------------------------LoadRangeNode---------------------------------- 398 // Load an array length from the array 399 class LoadRangeNode : public LoadINode { 400 public: 401 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 402 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 403 virtual int Opcode() const; 404 virtual const Type* Value(PhaseGVN* phase) const; 405 virtual Node* Identity(PhaseGVN* phase); 406 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 407 }; 408 409 //------------------------------LoadLNode-------------------------------------- 410 // Load a long from memory 411 class LoadLNode : public LoadNode { 412 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 413 virtual bool cmp( const Node &n ) const { 414 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 415 && LoadNode::cmp(n); 416 } 417 virtual uint size_of() const { return sizeof(*this); } 418 const bool _require_atomic_access; // is piecewise load forbidden? 419 420 public: 421 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 422 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 423 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 424 virtual int Opcode() const; 425 virtual uint ideal_reg() const { return Op_RegL; } 426 virtual int store_Opcode() const { return Op_StoreL; } 427 virtual BasicType memory_type() const { return T_LONG; } 428 bool require_atomic_access() const { return _require_atomic_access; } 429 430 #ifndef PRODUCT 431 virtual void dump_spec(outputStream *st) const { 432 LoadNode::dump_spec(st); 433 if (_require_atomic_access) st->print(" Atomic!"); 434 } 435 #endif 436 }; 437 438 //------------------------------LoadL_unalignedNode---------------------------- 439 // Load a long from unaligned memory 440 class LoadL_unalignedNode : public LoadLNode { 441 public: 442 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 443 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 444 virtual int Opcode() const; 445 }; 446 447 //------------------------------LoadFNode-------------------------------------- 448 // Load a float (64 bits) from memory 449 class LoadFNode : public LoadNode { 450 public: 451 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 452 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 453 virtual int Opcode() const; 454 virtual uint ideal_reg() const { return Op_RegF; } 455 virtual int store_Opcode() const { return Op_StoreF; } 456 virtual BasicType memory_type() const { return T_FLOAT; } 457 }; 458 459 //------------------------------LoadDNode-------------------------------------- 460 // Load a double (64 bits) from memory 461 class LoadDNode : public LoadNode { 462 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 463 virtual bool cmp( const Node &n ) const { 464 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 465 && LoadNode::cmp(n); 466 } 467 virtual uint size_of() const { return sizeof(*this); } 468 const bool _require_atomic_access; // is piecewise load forbidden? 469 470 public: 471 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 472 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 473 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 474 virtual int Opcode() const; 475 virtual uint ideal_reg() const { return Op_RegD; } 476 virtual int store_Opcode() const { return Op_StoreD; } 477 virtual BasicType memory_type() const { return T_DOUBLE; } 478 bool require_atomic_access() const { return _require_atomic_access; } 479 480 #ifndef PRODUCT 481 virtual void dump_spec(outputStream *st) const { 482 LoadNode::dump_spec(st); 483 if (_require_atomic_access) st->print(" Atomic!"); 484 } 485 #endif 486 }; 487 488 //------------------------------LoadD_unalignedNode---------------------------- 489 // Load a double from unaligned memory 490 class LoadD_unalignedNode : public LoadDNode { 491 public: 492 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 493 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 494 virtual int Opcode() const; 495 }; 496 497 //------------------------------LoadPNode-------------------------------------- 498 // Load a pointer from memory (either object or array) 499 class LoadPNode : public LoadNode { 500 public: 501 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 502 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 503 virtual int Opcode() const; 504 virtual uint ideal_reg() const { return Op_RegP; } 505 virtual int store_Opcode() const { return Op_StoreP; } 506 virtual BasicType memory_type() const { return T_ADDRESS; } 507 }; 508 509 510 //------------------------------LoadNNode-------------------------------------- 511 // Load a narrow oop from memory (either object or array) 512 class LoadNNode : public LoadNode { 513 public: 514 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 515 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 516 virtual int Opcode() const; 517 virtual uint ideal_reg() const { return Op_RegN; } 518 virtual int store_Opcode() const { return Op_StoreN; } 519 virtual BasicType memory_type() const { return T_NARROWOOP; } 520 }; 521 522 //------------------------------LoadKlassNode---------------------------------- 523 // Load a Klass from an object 524 class LoadKlassNode : public LoadPNode { 525 protected: 526 // In most cases, LoadKlassNode does not have the control input set. If the control 527 // input is set, it must not be removed (by LoadNode::Ideal()). 528 virtual bool can_remove_control() const; 529 public: 530 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 531 : LoadPNode(c, mem, adr, at, tk, mo) {} 532 virtual int Opcode() const; 533 virtual const Type* Value(PhaseGVN* phase) const; 534 virtual Node* Identity(PhaseGVN* phase); 535 virtual bool depends_only_on_test() const { return true; } 536 537 // Polymorphic factory method: 538 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 539 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT); 540 }; 541 542 //------------------------------LoadNKlassNode--------------------------------- 543 // Load a narrow Klass from an object. 544 // With compact headers, the input address (adr) does not point at the exact 545 // header position where the (narrow) class pointer is located, but into the 546 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node 547 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to 548 // extract the actual class pointer. C2's type system is agnostic on whether the 549 // input address directly points into the class pointer. 550 class LoadNKlassNode : public LoadNNode { 551 public: 552 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 553 : LoadNNode(c, mem, adr, at, tk, mo) {} 554 virtual int Opcode() const; 555 virtual uint ideal_reg() const { return Op_RegN; } 556 virtual int store_Opcode() const { return Op_StoreNKlass; } 557 virtual BasicType memory_type() const { return T_NARROWKLASS; } 558 559 virtual const Type* Value(PhaseGVN* phase) const; 560 virtual Node* Identity(PhaseGVN* phase); 561 virtual bool depends_only_on_test() const { return true; } 562 }; 563 564 565 //------------------------------StoreNode-------------------------------------- 566 // Store value; requires Store, Address and Value 567 class StoreNode : public MemNode { 568 private: 569 // On platforms with weak memory ordering (e.g., PPC) we distinguish 570 // stores that can be reordered, and such requiring release semantics to 571 // adhere to the Java specification. The required behaviour is stored in 572 // this field. 573 const MemOrd _mo; 574 // Needed for proper cloning. 575 virtual uint size_of() const { return sizeof(*this); } 576 protected: 577 virtual bool cmp( const Node &n ) const; 578 virtual bool depends_only_on_test() const { return false; } 579 580 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 581 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 582 583 public: 584 // We must ensure that stores of object references will be visible 585 // only after the object's initialization. So the callers of this 586 // procedure must indicate that the store requires `release' 587 // semantics, if the stored value is an object reference that might 588 // point to a new object and may become externally visible. 589 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 590 : MemNode(c, mem, adr, at, val), _mo(mo) { 591 init_class_id(Class_Store); 592 } 593 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 594 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 595 init_class_id(Class_Store); 596 } 597 598 inline bool is_unordered() const { return !is_release(); } 599 inline bool is_release() const { 600 assert((_mo == unordered || _mo == release), "unexpected"); 601 return _mo == release; 602 } 603 604 // Conservatively release stores of object references in order to 605 // ensure visibility of object initialization. 606 static inline MemOrd release_if_reference(const BasicType t) { 607 #ifdef AARCH64 608 // AArch64 doesn't need a release store here because object 609 // initialization contains the necessary barriers. 610 return unordered; 611 #else 612 const MemOrd mo = (t == T_ARRAY || 613 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 614 t == T_OBJECT) ? release : unordered; 615 return mo; 616 #endif 617 } 618 619 // Polymorphic factory method 620 // 621 // We must ensure that stores of object references will be visible 622 // only after the object's initialization. So the callers of this 623 // procedure must indicate that the store requires `release' 624 // semantics, if the stored value is an object reference that might 625 // point to a new object and may become externally visible. 626 static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, 627 const TypePtr* at, Node* val, BasicType bt, 628 MemOrd mo, bool require_atomic_access = false); 629 630 virtual uint hash() const; // Check the type 631 632 // If the store is to Field memory and the pointer is non-null, we can 633 // zero out the control input. 634 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 635 636 // Compute a new Type for this node. Basically we just do the pre-check, 637 // then call the virtual add() to set the type. 638 virtual const Type* Value(PhaseGVN* phase) const; 639 640 // Check for identity function on memory (Load then Store at same address) 641 virtual Node* Identity(PhaseGVN* phase); 642 643 // Do not match memory edge 644 virtual uint match_edge(uint idx) const; 645 646 virtual const Type *bottom_type() const; // returns Type::MEMORY 647 648 // Map a store opcode to its corresponding own opcode, trivially. 649 virtual int store_Opcode() const { return Opcode(); } 650 651 // have all possible loads of the value stored been optimized away? 652 bool value_never_loaded(PhaseValues* phase) const; 653 654 bool has_reinterpret_variant(const Type* vt); 655 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt); 656 657 MemBarNode* trailing_membar() const; 658 }; 659 660 //------------------------------StoreBNode------------------------------------- 661 // Store byte to memory 662 class StoreBNode : public StoreNode { 663 public: 664 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 665 : StoreNode(c, mem, adr, at, val, mo) {} 666 virtual int Opcode() const; 667 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 668 virtual BasicType memory_type() const { return T_BYTE; } 669 }; 670 671 //------------------------------StoreCNode------------------------------------- 672 // Store char/short to memory 673 class StoreCNode : public StoreNode { 674 public: 675 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 676 : StoreNode(c, mem, adr, at, val, mo) {} 677 virtual int Opcode() const; 678 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 679 virtual BasicType memory_type() const { return T_CHAR; } 680 }; 681 682 //------------------------------StoreINode------------------------------------- 683 // Store int to memory 684 class StoreINode : public StoreNode { 685 public: 686 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 687 : StoreNode(c, mem, adr, at, val, mo) {} 688 virtual int Opcode() const; 689 virtual BasicType memory_type() const { return T_INT; } 690 }; 691 692 //------------------------------StoreLNode------------------------------------- 693 // Store long to memory 694 class StoreLNode : public StoreNode { 695 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 696 virtual bool cmp( const Node &n ) const { 697 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 698 && StoreNode::cmp(n); 699 } 700 virtual uint size_of() const { return sizeof(*this); } 701 const bool _require_atomic_access; // is piecewise store forbidden? 702 703 public: 704 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 705 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 706 virtual int Opcode() const; 707 virtual BasicType memory_type() const { return T_LONG; } 708 bool require_atomic_access() const { return _require_atomic_access; } 709 710 #ifndef PRODUCT 711 virtual void dump_spec(outputStream *st) const { 712 StoreNode::dump_spec(st); 713 if (_require_atomic_access) st->print(" Atomic!"); 714 } 715 #endif 716 }; 717 718 //------------------------------StoreFNode------------------------------------- 719 // Store float to memory 720 class StoreFNode : public StoreNode { 721 public: 722 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 723 : StoreNode(c, mem, adr, at, val, mo) {} 724 virtual int Opcode() const; 725 virtual BasicType memory_type() const { return T_FLOAT; } 726 }; 727 728 //------------------------------StoreDNode------------------------------------- 729 // Store double to memory 730 class StoreDNode : public StoreNode { 731 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 732 virtual bool cmp( const Node &n ) const { 733 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 734 && StoreNode::cmp(n); 735 } 736 virtual uint size_of() const { return sizeof(*this); } 737 const bool _require_atomic_access; // is piecewise store forbidden? 738 public: 739 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 740 MemOrd mo, bool require_atomic_access = false) 741 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 742 virtual int Opcode() const; 743 virtual BasicType memory_type() const { return T_DOUBLE; } 744 bool require_atomic_access() const { return _require_atomic_access; } 745 746 #ifndef PRODUCT 747 virtual void dump_spec(outputStream *st) const { 748 StoreNode::dump_spec(st); 749 if (_require_atomic_access) st->print(" Atomic!"); 750 } 751 #endif 752 753 }; 754 755 //------------------------------StorePNode------------------------------------- 756 // Store pointer to memory 757 class StorePNode : public StoreNode { 758 public: 759 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 760 : StoreNode(c, mem, adr, at, val, mo) {} 761 virtual int Opcode() const; 762 virtual BasicType memory_type() const { return T_ADDRESS; } 763 }; 764 765 //------------------------------StoreNNode------------------------------------- 766 // Store narrow oop to memory 767 class StoreNNode : public StoreNode { 768 public: 769 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 770 : StoreNode(c, mem, adr, at, val, mo) {} 771 virtual int Opcode() const; 772 virtual BasicType memory_type() const { return T_NARROWOOP; } 773 }; 774 775 //------------------------------StoreNKlassNode-------------------------------------- 776 // Store narrow klass to memory 777 class StoreNKlassNode : public StoreNNode { 778 public: 779 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 780 : StoreNNode(c, mem, adr, at, val, mo) {} 781 virtual int Opcode() const; 782 virtual BasicType memory_type() const { return T_NARROWKLASS; } 783 }; 784 785 //------------------------------SCMemProjNode--------------------------------------- 786 // This class defines a projection of the memory state of a store conditional node. 787 // These nodes return a value, but also update memory. 788 class SCMemProjNode : public ProjNode { 789 public: 790 enum {SCMEMPROJCON = (uint)-2}; 791 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 792 virtual int Opcode() const; 793 virtual bool is_CFG() const { return false; } 794 virtual const Type *bottom_type() const {return Type::MEMORY;} 795 virtual const TypePtr *adr_type() const { 796 Node* ctrl = in(0); 797 if (ctrl == nullptr) return nullptr; // node is dead 798 return ctrl->in(MemNode::Memory)->adr_type(); 799 } 800 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 801 virtual const Type* Value(PhaseGVN* phase) const; 802 #ifndef PRODUCT 803 virtual void dump_spec(outputStream *st) const {}; 804 #endif 805 }; 806 807 //------------------------------LoadStoreNode--------------------------- 808 // Note: is_Mem() method returns 'true' for this class. 809 class LoadStoreNode : public Node { 810 private: 811 const Type* const _type; // What kind of value is loaded? 812 const TypePtr* _adr_type; // What kind of memory is being addressed? 813 uint8_t _barrier_data; // Bit field with barrier information 814 virtual uint size_of() const; // Size is bigger 815 public: 816 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 817 virtual bool depends_only_on_test() const { return false; } 818 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 819 820 virtual const Type *bottom_type() const { return _type; } 821 virtual uint ideal_reg() const; 822 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 823 virtual const Type* Value(PhaseGVN* phase) const; 824 825 bool result_not_used() const; 826 MemBarNode* trailing_membar() const; 827 828 uint8_t barrier_data() { return _barrier_data; } 829 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 830 }; 831 832 class LoadStoreConditionalNode : public LoadStoreNode { 833 public: 834 enum { 835 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 836 }; 837 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 838 virtual const Type* Value(PhaseGVN* phase) const; 839 }; 840 841 class CompareAndSwapNode : public LoadStoreConditionalNode { 842 private: 843 const MemNode::MemOrd _mem_ord; 844 public: 845 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 846 MemNode::MemOrd order() const { 847 return _mem_ord; 848 } 849 virtual uint size_of() const { return sizeof(*this); } 850 }; 851 852 class CompareAndExchangeNode : public LoadStoreNode { 853 private: 854 const MemNode::MemOrd _mem_ord; 855 public: 856 enum { 857 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 858 }; 859 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 860 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 861 init_req(ExpectedIn, ex ); 862 } 863 864 MemNode::MemOrd order() const { 865 return _mem_ord; 866 } 867 virtual uint size_of() const { return sizeof(*this); } 868 }; 869 870 //------------------------------CompareAndSwapBNode--------------------------- 871 class CompareAndSwapBNode : public CompareAndSwapNode { 872 public: 873 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 874 virtual int Opcode() const; 875 }; 876 877 //------------------------------CompareAndSwapSNode--------------------------- 878 class CompareAndSwapSNode : public CompareAndSwapNode { 879 public: 880 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 881 virtual int Opcode() const; 882 }; 883 884 //------------------------------CompareAndSwapINode--------------------------- 885 class CompareAndSwapINode : public CompareAndSwapNode { 886 public: 887 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 888 virtual int Opcode() const; 889 }; 890 891 //------------------------------CompareAndSwapLNode--------------------------- 892 class CompareAndSwapLNode : public CompareAndSwapNode { 893 public: 894 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 895 virtual int Opcode() const; 896 }; 897 898 //------------------------------CompareAndSwapPNode--------------------------- 899 class CompareAndSwapPNode : public CompareAndSwapNode { 900 public: 901 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 902 virtual int Opcode() const; 903 }; 904 905 //------------------------------CompareAndSwapNNode--------------------------- 906 class CompareAndSwapNNode : public CompareAndSwapNode { 907 public: 908 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 909 virtual int Opcode() const; 910 }; 911 912 //------------------------------WeakCompareAndSwapBNode--------------------------- 913 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 914 public: 915 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 916 virtual int Opcode() const; 917 }; 918 919 //------------------------------WeakCompareAndSwapSNode--------------------------- 920 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 921 public: 922 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 923 virtual int Opcode() const; 924 }; 925 926 //------------------------------WeakCompareAndSwapINode--------------------------- 927 class WeakCompareAndSwapINode : public CompareAndSwapNode { 928 public: 929 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 930 virtual int Opcode() const; 931 }; 932 933 //------------------------------WeakCompareAndSwapLNode--------------------------- 934 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 935 public: 936 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 937 virtual int Opcode() const; 938 }; 939 940 //------------------------------WeakCompareAndSwapPNode--------------------------- 941 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 942 public: 943 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 944 virtual int Opcode() const; 945 }; 946 947 //------------------------------WeakCompareAndSwapNNode--------------------------- 948 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 949 public: 950 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 951 virtual int Opcode() const; 952 }; 953 954 //------------------------------CompareAndExchangeBNode--------------------------- 955 class CompareAndExchangeBNode : public CompareAndExchangeNode { 956 public: 957 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 958 virtual int Opcode() const; 959 }; 960 961 962 //------------------------------CompareAndExchangeSNode--------------------------- 963 class CompareAndExchangeSNode : public CompareAndExchangeNode { 964 public: 965 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 966 virtual int Opcode() const; 967 }; 968 969 //------------------------------CompareAndExchangeLNode--------------------------- 970 class CompareAndExchangeLNode : public CompareAndExchangeNode { 971 public: 972 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 973 virtual int Opcode() const; 974 }; 975 976 977 //------------------------------CompareAndExchangeINode--------------------------- 978 class CompareAndExchangeINode : public CompareAndExchangeNode { 979 public: 980 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 981 virtual int Opcode() const; 982 }; 983 984 985 //------------------------------CompareAndExchangePNode--------------------------- 986 class CompareAndExchangePNode : public CompareAndExchangeNode { 987 public: 988 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 989 virtual int Opcode() const; 990 }; 991 992 //------------------------------CompareAndExchangeNNode--------------------------- 993 class CompareAndExchangeNNode : public CompareAndExchangeNode { 994 public: 995 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 996 virtual int Opcode() const; 997 }; 998 999 //------------------------------GetAndAddBNode--------------------------- 1000 class GetAndAddBNode : public LoadStoreNode { 1001 public: 1002 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1003 virtual int Opcode() const; 1004 }; 1005 1006 //------------------------------GetAndAddSNode--------------------------- 1007 class GetAndAddSNode : public LoadStoreNode { 1008 public: 1009 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1010 virtual int Opcode() const; 1011 }; 1012 1013 //------------------------------GetAndAddINode--------------------------- 1014 class GetAndAddINode : public LoadStoreNode { 1015 public: 1016 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1017 virtual int Opcode() const; 1018 }; 1019 1020 //------------------------------GetAndAddLNode--------------------------- 1021 class GetAndAddLNode : public LoadStoreNode { 1022 public: 1023 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1024 virtual int Opcode() const; 1025 }; 1026 1027 //------------------------------GetAndSetBNode--------------------------- 1028 class GetAndSetBNode : public LoadStoreNode { 1029 public: 1030 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1031 virtual int Opcode() const; 1032 }; 1033 1034 //------------------------------GetAndSetSNode--------------------------- 1035 class GetAndSetSNode : public LoadStoreNode { 1036 public: 1037 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1038 virtual int Opcode() const; 1039 }; 1040 1041 //------------------------------GetAndSetINode--------------------------- 1042 class GetAndSetINode : public LoadStoreNode { 1043 public: 1044 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1045 virtual int Opcode() const; 1046 }; 1047 1048 //------------------------------GetAndSetLNode--------------------------- 1049 class GetAndSetLNode : public LoadStoreNode { 1050 public: 1051 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1052 virtual int Opcode() const; 1053 }; 1054 1055 //------------------------------GetAndSetPNode--------------------------- 1056 class GetAndSetPNode : public LoadStoreNode { 1057 public: 1058 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1059 virtual int Opcode() const; 1060 }; 1061 1062 //------------------------------GetAndSetNNode--------------------------- 1063 class GetAndSetNNode : public LoadStoreNode { 1064 public: 1065 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1066 virtual int Opcode() const; 1067 }; 1068 1069 //------------------------------ClearArray------------------------------------- 1070 class ClearArrayNode: public Node { 1071 private: 1072 bool _is_large; 1073 public: 1074 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1075 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1076 init_class_id(Class_ClearArray); 1077 } 1078 virtual int Opcode() const; 1079 virtual const Type *bottom_type() const { return Type::MEMORY; } 1080 // ClearArray modifies array elements, and so affects only the 1081 // array memory addressed by the bottom_type of its base address. 1082 virtual const class TypePtr *adr_type() const; 1083 virtual Node* Identity(PhaseGVN* phase); 1084 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1085 virtual uint match_edge(uint idx) const; 1086 bool is_large() const { return _is_large; } 1087 1088 // Clear the given area of an object or array. 1089 // The start offset must always be aligned mod BytesPerInt. 1090 // The end offset must always be aligned mod BytesPerLong. 1091 // Return the new memory. 1092 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1093 intptr_t start_offset, 1094 intptr_t end_offset, 1095 PhaseGVN* phase); 1096 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1097 intptr_t start_offset, 1098 Node* end_offset, 1099 PhaseGVN* phase); 1100 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1101 Node* start_offset, 1102 Node* end_offset, 1103 PhaseGVN* phase); 1104 // Return allocation input memory edge if it is different instance 1105 // or itself if it is the one we are looking for. 1106 static bool step_through(Node** np, uint instance_id, PhaseValues* phase); 1107 }; 1108 1109 //------------------------------MemBar----------------------------------------- 1110 // There are different flavors of Memory Barriers to match the Java Memory 1111 // Model. Monitor-enter and volatile-load act as Acquires: no following ref 1112 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1113 // volatile-load. Monitor-exit and volatile-store act as Release: no 1114 // preceding ref can be moved to after them. We insert a MemBar-Release 1115 // before a FastUnlock or volatile-store. All volatiles need to be 1116 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1117 // separate it from any following volatile-load. 1118 class MemBarNode: public MultiNode { 1119 virtual uint hash() const ; // { return NO_HASH; } 1120 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1121 1122 virtual uint size_of() const { return sizeof(*this); } 1123 // Memory type this node is serializing. Usually either rawptr or bottom. 1124 const TypePtr* _adr_type; 1125 1126 // How is this membar related to a nearby memory access? 1127 enum { 1128 Standalone, 1129 TrailingLoad, 1130 TrailingStore, 1131 LeadingStore, 1132 TrailingLoadStore, 1133 LeadingLoadStore, 1134 TrailingPartialArrayCopy 1135 } _kind; 1136 1137 #ifdef ASSERT 1138 uint _pair_idx; 1139 #endif 1140 1141 public: 1142 enum { 1143 Precedent = TypeFunc::Parms // optional edge to force precedence 1144 }; 1145 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1146 virtual int Opcode() const = 0; 1147 virtual const class TypePtr *adr_type() const { return _adr_type; } 1148 virtual const Type* Value(PhaseGVN* phase) const; 1149 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1150 virtual uint match_edge(uint idx) const { return 0; } 1151 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1152 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1153 // Factory method. Builds a wide or narrow membar. 1154 // Optional 'precedent' becomes an extra edge if not null. 1155 static MemBarNode* make(Compile* C, int opcode, 1156 int alias_idx = Compile::AliasIdxBot, 1157 Node* precedent = nullptr); 1158 1159 MemBarNode* trailing_membar() const; 1160 MemBarNode* leading_membar() const; 1161 1162 void set_trailing_load() { _kind = TrailingLoad; } 1163 bool trailing_load() const { return _kind == TrailingLoad; } 1164 bool trailing_store() const { return _kind == TrailingStore; } 1165 bool leading_store() const { return _kind == LeadingStore; } 1166 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1167 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1168 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1169 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1170 bool standalone() const { return _kind == Standalone; } 1171 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; } 1172 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; } 1173 1174 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1175 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1176 1177 void remove(PhaseIterGVN *igvn); 1178 }; 1179 1180 // "Acquire" - no following ref can move before (but earlier refs can 1181 // follow, like an early Load stalled in cache). Requires multi-cpu 1182 // visibility. Inserted after a volatile load. 1183 class MemBarAcquireNode: public MemBarNode { 1184 public: 1185 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1186 : MemBarNode(C, alias_idx, precedent) {} 1187 virtual int Opcode() const; 1188 }; 1189 1190 // "Acquire" - no following ref can move before (but earlier refs can 1191 // follow, like an early Load stalled in cache). Requires multi-cpu 1192 // visibility. Inserted independent of any load, as required 1193 // for intrinsic Unsafe.loadFence(). 1194 class LoadFenceNode: public MemBarNode { 1195 public: 1196 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1197 : MemBarNode(C, alias_idx, precedent) {} 1198 virtual int Opcode() const; 1199 }; 1200 1201 // "Release" - no earlier ref can move after (but later refs can move 1202 // up, like a speculative pipelined cache-hitting Load). Requires 1203 // multi-cpu visibility. Inserted before a volatile store. 1204 class MemBarReleaseNode: public MemBarNode { 1205 public: 1206 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1207 : MemBarNode(C, alias_idx, precedent) {} 1208 virtual int Opcode() const; 1209 }; 1210 1211 // "Release" - no earlier ref can move after (but later refs can move 1212 // up, like a speculative pipelined cache-hitting Load). Requires 1213 // multi-cpu visibility. Inserted independent of any store, as required 1214 // for intrinsic Unsafe.storeFence(). 1215 class StoreFenceNode: public MemBarNode { 1216 public: 1217 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1218 : MemBarNode(C, alias_idx, precedent) {} 1219 virtual int Opcode() const; 1220 }; 1221 1222 // "Acquire" - no following ref can move before (but earlier refs can 1223 // follow, like an early Load stalled in cache). Requires multi-cpu 1224 // visibility. Inserted after a FastLock. 1225 class MemBarAcquireLockNode: public MemBarNode { 1226 public: 1227 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1228 : MemBarNode(C, alias_idx, precedent) {} 1229 virtual int Opcode() const; 1230 }; 1231 1232 // "Release" - no earlier ref can move after (but later refs can move 1233 // up, like a speculative pipelined cache-hitting Load). Requires 1234 // multi-cpu visibility. Inserted before a FastUnLock. 1235 class MemBarReleaseLockNode: public MemBarNode { 1236 public: 1237 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1238 : MemBarNode(C, alias_idx, precedent) {} 1239 virtual int Opcode() const; 1240 }; 1241 1242 class MemBarStoreStoreNode: public MemBarNode { 1243 public: 1244 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1245 : MemBarNode(C, alias_idx, precedent) { 1246 init_class_id(Class_MemBarStoreStore); 1247 } 1248 virtual int Opcode() const; 1249 }; 1250 1251 class StoreStoreFenceNode: public MemBarNode { 1252 public: 1253 StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1254 : MemBarNode(C, alias_idx, precedent) {} 1255 virtual int Opcode() const; 1256 }; 1257 1258 // Ordering between a volatile store and a following volatile load. 1259 // Requires multi-CPU visibility? 1260 class MemBarVolatileNode: public MemBarNode { 1261 public: 1262 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1263 : MemBarNode(C, alias_idx, precedent) {} 1264 virtual int Opcode() const; 1265 }; 1266 1267 // Ordering within the same CPU. Used to order unsafe memory references 1268 // inside the compiler when we lack alias info. Not needed "outside" the 1269 // compiler because the CPU does all the ordering for us. 1270 class MemBarCPUOrderNode: public MemBarNode { 1271 public: 1272 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1273 : MemBarNode(C, alias_idx, precedent) {} 1274 virtual int Opcode() const; 1275 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1276 }; 1277 1278 class OnSpinWaitNode: public MemBarNode { 1279 public: 1280 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1281 : MemBarNode(C, alias_idx, precedent) {} 1282 virtual int Opcode() const; 1283 }; 1284 1285 // Isolation of object setup after an AllocateNode and before next safepoint. 1286 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1287 class InitializeNode: public MemBarNode { 1288 friend class AllocateNode; 1289 1290 enum { 1291 Incomplete = 0, 1292 Complete = 1, 1293 WithArraycopy = 2 1294 }; 1295 int _is_complete; 1296 1297 bool _does_not_escape; 1298 1299 public: 1300 enum { 1301 Control = TypeFunc::Control, 1302 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1303 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1304 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1305 }; 1306 1307 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1308 virtual int Opcode() const; 1309 virtual uint size_of() const { return sizeof(*this); } 1310 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1311 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1312 1313 // Manage incoming memory edges via a MergeMem on in(Memory): 1314 Node* memory(uint alias_idx); 1315 1316 // The raw memory edge coming directly from the Allocation. 1317 // The contents of this memory are *always* all-zero-bits. 1318 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1319 1320 // Return the corresponding allocation for this initialization (or null if none). 1321 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1322 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1323 AllocateNode* allocation(); 1324 1325 // Anything other than zeroing in this init? 1326 bool is_non_zero(); 1327 1328 // An InitializeNode must completed before macro expansion is done. 1329 // Completion requires that the AllocateNode must be followed by 1330 // initialization of the new memory to zero, then to any initializers. 1331 bool is_complete() { return _is_complete != Incomplete; } 1332 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1333 1334 // Mark complete. (Must not yet be complete.) 1335 void set_complete(PhaseGVN* phase); 1336 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1337 1338 bool does_not_escape() { return _does_not_escape; } 1339 void set_does_not_escape() { _does_not_escape = true; } 1340 1341 #ifdef ASSERT 1342 // ensure all non-degenerate stores are ordered and non-overlapping 1343 bool stores_are_sane(PhaseValues* phase); 1344 #endif //ASSERT 1345 1346 // See if this store can be captured; return offset where it initializes. 1347 // Return 0 if the store cannot be moved (any sort of problem). 1348 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape); 1349 1350 // Capture another store; reformat it to write my internal raw memory. 1351 // Return the captured copy, else null if there is some sort of problem. 1352 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape); 1353 1354 // Find captured store which corresponds to the range [start..start+size). 1355 // Return my own memory projection (meaning the initial zero bits) 1356 // if there is no such store. Return null if there is a problem. 1357 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase); 1358 1359 // Called when the associated AllocateNode is expanded into CFG. 1360 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1361 intptr_t header_size, Node* size_in_bytes, 1362 PhaseIterGVN* phase); 1363 1364 private: 1365 void remove_extra_zeroes(); 1366 1367 // Find out where a captured store should be placed (or already is placed). 1368 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1369 PhaseValues* phase); 1370 1371 static intptr_t get_store_offset(Node* st, PhaseValues* phase); 1372 1373 Node* make_raw_address(intptr_t offset, PhaseGVN* phase); 1374 1375 bool detect_init_independence(Node* value, PhaseGVN* phase); 1376 1377 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1378 PhaseGVN* phase); 1379 1380 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1381 }; 1382 1383 //------------------------------MergeMem--------------------------------------- 1384 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1385 class MergeMemNode: public Node { 1386 virtual uint hash() const ; // { return NO_HASH; } 1387 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1388 friend class MergeMemStream; 1389 MergeMemNode(Node* def); // clients use MergeMemNode::make 1390 1391 public: 1392 // If the input is a whole memory state, clone it with all its slices intact. 1393 // Otherwise, make a new memory state with just that base memory input. 1394 // In either case, the result is a newly created MergeMem. 1395 static MergeMemNode* make(Node* base_memory); 1396 1397 virtual int Opcode() const; 1398 virtual Node* Identity(PhaseGVN* phase); 1399 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1400 virtual uint ideal_reg() const { return NotAMachineReg; } 1401 virtual uint match_edge(uint idx) const { return 0; } 1402 virtual const RegMask &out_RegMask() const; 1403 virtual const Type *bottom_type() const { return Type::MEMORY; } 1404 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1405 // sparse accessors 1406 // Fetch the previously stored "set_memory_at", or else the base memory. 1407 // (Caller should clone it if it is a phi-nest.) 1408 Node* memory_at(uint alias_idx) const; 1409 // set the memory, regardless of its previous value 1410 void set_memory_at(uint alias_idx, Node* n); 1411 // the "base" is the memory that provides the non-finite support 1412 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1413 // warning: setting the base can implicitly set any of the other slices too 1414 void set_base_memory(Node* def); 1415 // sentinel value which denotes a copy of the base memory: 1416 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1417 static Node* make_empty_memory(); // where the sentinel comes from 1418 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1419 // hook for the iterator, to perform any necessary setup 1420 void iteration_setup(const MergeMemNode* other = nullptr); 1421 // push sentinels until I am at least as long as the other (semantic no-op) 1422 void grow_to_match(const MergeMemNode* other); 1423 bool verify_sparse() const PRODUCT_RETURN0; 1424 #ifndef PRODUCT 1425 virtual void dump_spec(outputStream *st) const; 1426 #endif 1427 }; 1428 1429 class MergeMemStream : public StackObj { 1430 private: 1431 MergeMemNode* _mm; 1432 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1433 Node* _mm_base; // loop-invariant base memory of _mm 1434 int _idx; 1435 int _cnt; 1436 Node* _mem; 1437 Node* _mem2; 1438 int _cnt2; 1439 1440 void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) { 1441 // subsume_node will break sparseness at times, whenever a memory slice 1442 // folds down to a copy of the base ("fat") memory. In such a case, 1443 // the raw edge will update to base, although it should be top. 1444 // This iterator will recognize either top or base_memory as an 1445 // "empty" slice. See is_empty, is_empty2, and next below. 1446 // 1447 // The sparseness property is repaired in MergeMemNode::Ideal. 1448 // As long as access to a MergeMem goes through this iterator 1449 // or the memory_at accessor, flaws in the sparseness will 1450 // never be observed. 1451 // 1452 // Also, iteration_setup repairs sparseness. 1453 assert(mm->verify_sparse(), "please, no dups of base"); 1454 assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base"); 1455 1456 _mm = mm; 1457 _mm_base = mm->base_memory(); 1458 _mm2 = mm2; 1459 _cnt = mm->req(); 1460 _idx = Compile::AliasIdxBot-1; // start at the base memory 1461 _mem = nullptr; 1462 _mem2 = nullptr; 1463 } 1464 1465 #ifdef ASSERT 1466 Node* check_memory() const { 1467 if (at_base_memory()) 1468 return _mm->base_memory(); 1469 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1470 return _mm->memory_at(_idx); 1471 else 1472 return _mm_base; 1473 } 1474 Node* check_memory2() const { 1475 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1476 } 1477 #endif 1478 1479 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1480 void assert_synch() const { 1481 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1482 "no side-effects except through the stream"); 1483 } 1484 1485 public: 1486 1487 // expected usages: 1488 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1489 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1490 1491 // iterate over one merge 1492 MergeMemStream(MergeMemNode* mm) { 1493 mm->iteration_setup(); 1494 init(mm); 1495 debug_only(_cnt2 = 999); 1496 } 1497 // iterate in parallel over two merges 1498 // only iterates through non-empty elements of mm2 1499 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1500 assert(mm2, "second argument must be a MergeMem also"); 1501 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1502 mm->iteration_setup(mm2); 1503 init(mm, mm2); 1504 _cnt2 = mm2->req(); 1505 } 1506 #ifdef ASSERT 1507 ~MergeMemStream() { 1508 assert_synch(); 1509 } 1510 #endif 1511 1512 MergeMemNode* all_memory() const { 1513 return _mm; 1514 } 1515 Node* base_memory() const { 1516 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1517 return _mm_base; 1518 } 1519 const MergeMemNode* all_memory2() const { 1520 assert(_mm2 != nullptr, ""); 1521 return _mm2; 1522 } 1523 bool at_base_memory() const { 1524 return _idx == Compile::AliasIdxBot; 1525 } 1526 int alias_idx() const { 1527 assert(_mem, "must call next 1st"); 1528 return _idx; 1529 } 1530 1531 const TypePtr* adr_type() const { 1532 return Compile::current()->get_adr_type(alias_idx()); 1533 } 1534 1535 const TypePtr* adr_type(Compile* C) const { 1536 return C->get_adr_type(alias_idx()); 1537 } 1538 bool is_empty() const { 1539 assert(_mem, "must call next 1st"); 1540 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1541 return _mem->is_top(); 1542 } 1543 bool is_empty2() const { 1544 assert(_mem2, "must call next 1st"); 1545 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1546 return _mem2->is_top(); 1547 } 1548 Node* memory() const { 1549 assert(!is_empty(), "must not be empty"); 1550 assert_synch(); 1551 return _mem; 1552 } 1553 // get the current memory, regardless of empty or non-empty status 1554 Node* force_memory() const { 1555 assert(!is_empty() || !at_base_memory(), ""); 1556 // Use _mm_base to defend against updates to _mem->base_memory(). 1557 Node *mem = _mem->is_top() ? _mm_base : _mem; 1558 assert(mem == check_memory(), ""); 1559 return mem; 1560 } 1561 Node* memory2() const { 1562 assert(_mem2 == check_memory2(), ""); 1563 return _mem2; 1564 } 1565 void set_memory(Node* mem) { 1566 if (at_base_memory()) { 1567 // Note that this does not change the invariant _mm_base. 1568 _mm->set_base_memory(mem); 1569 } else { 1570 _mm->set_memory_at(_idx, mem); 1571 } 1572 _mem = mem; 1573 assert_synch(); 1574 } 1575 1576 // Recover from a side effect to the MergeMemNode. 1577 void set_memory() { 1578 _mem = _mm->in(_idx); 1579 } 1580 1581 bool next() { return next(false); } 1582 bool next2() { return next(true); } 1583 1584 bool next_non_empty() { return next_non_empty(false); } 1585 bool next_non_empty2() { return next_non_empty(true); } 1586 // next_non_empty2 can yield states where is_empty() is true 1587 1588 private: 1589 // find the next item, which might be empty 1590 bool next(bool have_mm2) { 1591 assert((_mm2 != nullptr) == have_mm2, "use other next"); 1592 assert_synch(); 1593 if (++_idx < _cnt) { 1594 // Note: This iterator allows _mm to be non-sparse. 1595 // It behaves the same whether _mem is top or base_memory. 1596 _mem = _mm->in(_idx); 1597 if (have_mm2) 1598 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1599 return true; 1600 } 1601 return false; 1602 } 1603 1604 // find the next non-empty item 1605 bool next_non_empty(bool have_mm2) { 1606 while (next(have_mm2)) { 1607 if (!is_empty()) { 1608 // make sure _mem2 is filled in sensibly 1609 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1610 return true; 1611 } else if (have_mm2 && !is_empty2()) { 1612 return true; // is_empty() == true 1613 } 1614 } 1615 return false; 1616 } 1617 }; 1618 1619 // cachewb node for guaranteeing writeback of the cache line at a 1620 // given address to (non-volatile) RAM 1621 class CacheWBNode : public Node { 1622 public: 1623 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1624 virtual int Opcode() const; 1625 virtual uint ideal_reg() const { return NotAMachineReg; } 1626 virtual uint match_edge(uint idx) const { return (idx == 2); } 1627 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1628 virtual const Type *bottom_type() const { return Type::MEMORY; } 1629 }; 1630 1631 // cachewb pre sync node for ensuring that writebacks are serialised 1632 // relative to preceding or following stores 1633 class CacheWBPreSyncNode : public Node { 1634 public: 1635 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1636 virtual int Opcode() const; 1637 virtual uint ideal_reg() const { return NotAMachineReg; } 1638 virtual uint match_edge(uint idx) const { return false; } 1639 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1640 virtual const Type *bottom_type() const { return Type::MEMORY; } 1641 }; 1642 1643 // cachewb pre sync node for ensuring that writebacks are serialised 1644 // relative to preceding or following stores 1645 class CacheWBPostSyncNode : public Node { 1646 public: 1647 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1648 virtual int Opcode() const; 1649 virtual uint ideal_reg() const { return NotAMachineReg; } 1650 virtual uint match_edge(uint idx) const { return false; } 1651 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1652 virtual const Type *bottom_type() const { return Type::MEMORY; } 1653 }; 1654 1655 //------------------------------Prefetch--------------------------------------- 1656 1657 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1658 class PrefetchAllocationNode : public Node { 1659 public: 1660 PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {} 1661 virtual int Opcode() const; 1662 virtual uint ideal_reg() const { return NotAMachineReg; } 1663 virtual uint match_edge(uint idx) const { return idx==2; } 1664 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1665 }; 1666 1667 #endif // SHARE_OPTO_MEMNODE_HPP