1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_OPTO_MEMNODE_HPP 27 #define SHARE_OPTO_MEMNODE_HPP 28 29 #include "opto/multnode.hpp" 30 #include "opto/node.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/type.hpp" 33 34 // Portions of code courtesy of Clifford Click 35 36 class MultiNode; 37 class PhaseCCP; 38 class PhaseTransform; 39 40 //------------------------------MemNode---------------------------------------- 41 // Load or Store, possibly throwing a null pointer exception 42 class MemNode : public Node { 43 private: 44 bool _unaligned_access; // Unaligned access from unsafe 45 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 46 bool _unsafe_access; // Access of unsafe origin. 47 uint8_t _barrier_data; // Bit field with barrier information 48 49 protected: 50 #ifdef ASSERT 51 const TypePtr* _adr_type; // What kind of memory is being addressed? 52 #endif 53 virtual uint size_of() const; 54 public: 55 enum { Control, // When is it safe to do this load? 56 Memory, // Chunk of memory is being loaded from 57 Address, // Actually address, derived from base 58 ValueIn // Value to store 59 }; 60 typedef enum { unordered = 0, 61 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 62 release, // Store has to release or be preceded by MemBarRelease. 63 seqcst, // LoadStore has to have both acquire and release semantics. 64 unset // The memory ordering is not set (used for testing) 65 } MemOrd; 66 protected: 67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) : 68 Node(c0,c1,c2), 69 _unaligned_access(false), 70 _mismatched_access(false), 71 _unsafe_access(false), 72 _barrier_data(0) { 73 init_class_id(Class_Mem); 74 debug_only(_adr_type=at; adr_type();) 75 } 76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : 77 Node(c0,c1,c2,c3), 78 _unaligned_access(false), 79 _mismatched_access(false), 80 _unsafe_access(false), 81 _barrier_data(0) { 82 init_class_id(Class_Mem); 83 debug_only(_adr_type=at; adr_type();) 84 } 85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : 86 Node(c0,c1,c2,c3,c4), 87 _unaligned_access(false), 88 _mismatched_access(false), 89 _unsafe_access(false), 90 _barrier_data(0) { 91 init_class_id(Class_Mem); 92 debug_only(_adr_type=at; adr_type();) 93 } 94 95 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; } 96 ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const; 97 static bool check_if_adr_maybe_raw(Node* adr); 98 99 public: 100 // Helpers for the optimizer. Documented in memnode.cpp. 101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 102 Node* p2, AllocateNode* a2, 103 PhaseTransform* phase); 104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 105 106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 108 // The following two should probably be phase-specific functions: 109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub); 110 static bool all_controls_dominate(Node* dom, Node* sub) { 111 DomResult dom_result = maybe_all_controls_dominate(dom, sub); 112 return dom_result == DomResult::Dominate; 113 } 114 115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 116 117 // Shared code for Ideal methods: 118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null. 119 120 // Helper function for adr_type() implementations. 121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr); 122 123 // Raw access function, to allow copying of adr_type efficiently in 124 // product builds and retain the debug info for debug builds. 125 const TypePtr *raw_adr_type() const { 126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr); 127 } 128 129 #ifdef ASSERT 130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; } 131 #endif 132 133 // Return the barrier data of n, if available, or 0 otherwise. 134 static uint8_t barrier_data(const Node* n); 135 136 // Map a load or store opcode to its corresponding store opcode. 137 // (Return -1 if unknown.) 138 virtual int store_Opcode() const { return -1; } 139 140 // What is the type of the value in memory? (T_VOID mean "unspecified".) 141 virtual BasicType memory_type() const = 0; 142 virtual int memory_size() const { 143 #ifdef ASSERT 144 return type2aelembytes(memory_type(), true); 145 #else 146 return type2aelembytes(memory_type()); 147 #endif 148 } 149 150 uint8_t barrier_data() { return _barrier_data; } 151 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 152 153 // Search through memory states which precede this node (load or store). 154 // Look for an exact match for the address, with no intervening 155 // aliased stores. 156 Node* find_previous_store(PhaseValues* phase); 157 158 // Can this node (load or store) accurately see a stored value in 159 // the given memory state? (The state may or may not be in(Memory).) 160 Node* can_see_stored_value(Node* st, PhaseValues* phase) const; 161 162 void set_unaligned_access() { _unaligned_access = true; } 163 bool is_unaligned_access() const { return _unaligned_access; } 164 void set_mismatched_access() { _mismatched_access = true; } 165 bool is_mismatched_access() const { return _mismatched_access; } 166 void set_unsafe_access() { _unsafe_access = true; } 167 bool is_unsafe_access() const { return _unsafe_access; } 168 169 #ifndef PRODUCT 170 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 171 virtual void dump_spec(outputStream *st) const; 172 #endif 173 }; 174 175 //------------------------------LoadNode--------------------------------------- 176 // Load value; requires Memory and Address 177 class LoadNode : public MemNode { 178 public: 179 // Some loads (from unsafe) should be pinned: they don't depend only 180 // on the dominating test. The field _control_dependency below records 181 // whether that node depends only on the dominating test. 182 // Pinned and UnknownControl are similar, but differ in that Pinned 183 // loads are not allowed to float across safepoints, whereas UnknownControl 184 // loads are allowed to do that. Therefore, Pinned is stricter. 185 enum ControlDependency { 186 Pinned, 187 UnknownControl, 188 DependsOnlyOnTest 189 }; 190 191 private: 192 // LoadNode::hash() doesn't take the _control_dependency field 193 // into account: If the graph already has a non-pinned LoadNode and 194 // we add a pinned LoadNode with the same inputs, it's safe for GVN 195 // to replace the pinned LoadNode with the non-pinned LoadNode, 196 // otherwise it wouldn't be safe to have a non pinned LoadNode with 197 // those inputs in the first place. If the graph already has a 198 // pinned LoadNode and we add a non pinned LoadNode with the same 199 // inputs, it's safe (but suboptimal) for GVN to replace the 200 // non-pinned LoadNode by the pinned LoadNode. 201 ControlDependency _control_dependency; 202 203 // On platforms with weak memory ordering (e.g., PPC) we distinguish 204 // loads that can be reordered, and such requiring acquire semantics to 205 // adhere to the Java specification. The required behaviour is stored in 206 // this field. 207 const MemOrd _mo; 208 209 AllocateNode* is_new_object_mark_load() const; 210 211 protected: 212 virtual bool cmp(const Node &n) const; 213 virtual uint size_of() const; // Size is bigger 214 // Should LoadNode::Ideal() attempt to remove control edges? 215 virtual bool can_remove_control() const; 216 const Type* const _type; // What kind of value is loaded? 217 218 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 219 public: 220 221 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 222 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { 223 init_class_id(Class_Load); 224 } 225 inline bool is_unordered() const { return !is_acquire(); } 226 inline bool is_acquire() const { 227 assert(_mo == unordered || _mo == acquire, "unexpected"); 228 return _mo == acquire; 229 } 230 inline bool is_unsigned() const { 231 int lop = Opcode(); 232 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 233 } 234 235 // Polymorphic factory method: 236 static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, 237 const TypePtr* at, const Type* rt, BasicType bt, 238 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 239 bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false, 240 uint8_t barrier_data = 0); 241 242 virtual uint hash() const; // Check the type 243 244 // Handle algebraic identities here. If we have an identity, return the Node 245 // we are equivalent to. We look for Load of a Store. 246 virtual Node* Identity(PhaseGVN* phase); 247 248 // If the load is from Field memory and the pointer is non-null, it might be possible to 249 // zero out the control input. 250 // If the offset is constant and the base is an object allocation, 251 // try to hook me up to the exact initializing store. 252 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 253 254 // Return true if it's possible to split the Load through a Phi merging the bases 255 bool can_split_through_phi_base(PhaseGVN *phase); 256 257 // Split instance field load through Phi. 258 Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false); 259 260 // Recover original value from boxed values 261 Node *eliminate_autobox(PhaseIterGVN *igvn); 262 263 // Compute a new Type for this node. Basically we just do the pre-check, 264 // then call the virtual add() to set the type. 265 virtual const Type* Value(PhaseGVN* phase) const; 266 267 // Common methods for LoadKlass and LoadNKlass nodes. 268 const Type* klass_value_common(PhaseGVN* phase) const; 269 Node* klass_identity_common(PhaseGVN* phase); 270 271 virtual uint ideal_reg() const; 272 virtual const Type *bottom_type() const; 273 // Following method is copied from TypeNode: 274 void set_type(const Type* t) { 275 assert(t != nullptr, "sanity"); 276 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 277 *(const Type**)&_type = t; // cast away const-ness 278 // If this node is in the hash table, make sure it doesn't need a rehash. 279 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 280 } 281 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; }; 282 283 // Do not match memory edge 284 virtual uint match_edge(uint idx) const; 285 286 // Map a load opcode to its corresponding store opcode. 287 virtual int store_Opcode() const = 0; 288 289 // Check if the load's memory input is a Phi node with the same control. 290 bool is_instance_field_load_with_local_phi(Node* ctrl); 291 292 Node* convert_to_unsigned_load(PhaseGVN& gvn); 293 Node* convert_to_signed_load(PhaseGVN& gvn); 294 295 bool has_reinterpret_variant(const Type* rt); 296 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt); 297 298 ControlDependency control_dependency() const { return _control_dependency; } 299 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 300 bool has_pinned_control_dependency() const { return _control_dependency == Pinned; } 301 302 LoadNode* pin_array_access_node() const; 303 304 #ifndef PRODUCT 305 virtual void dump_spec(outputStream *st) const; 306 #endif 307 #ifdef ASSERT 308 // Helper function to allow a raw load without control edge for some cases 309 static bool is_immutable_value(Node* adr); 310 #endif 311 protected: 312 const Type* load_array_final_field(const TypeKlassPtr *tkls, 313 ciKlass* klass) const; 314 315 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 316 317 // depends_only_on_test is almost always true, and needs to be almost always 318 // true to enable key hoisting & commoning optimizations. However, for the 319 // special case of RawPtr loads from TLS top & end, and other loads performed by 320 // GC barriers, the control edge carries the dependence preventing hoisting past 321 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 322 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 323 // which produce results (new raw memory state) inside of loops preventing all 324 // manner of other optimizations). Basically, it's ugly but so is the alternative. 325 // See comment in macro.cpp, around line 125 expand_allocate_common(). 326 virtual bool depends_only_on_test() const { 327 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 328 } 329 330 LoadNode* clone_pinned() const; 331 }; 332 333 //------------------------------LoadBNode-------------------------------------- 334 // Load a byte (8bits signed) from memory 335 class LoadBNode : public LoadNode { 336 public: 337 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 338 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 339 virtual int Opcode() const; 340 virtual uint ideal_reg() const { return Op_RegI; } 341 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 342 virtual const Type* Value(PhaseGVN* phase) const; 343 virtual int store_Opcode() const { return Op_StoreB; } 344 virtual BasicType memory_type() const { return T_BYTE; } 345 }; 346 347 //------------------------------LoadUBNode------------------------------------- 348 // Load a unsigned byte (8bits unsigned) from memory 349 class LoadUBNode : public LoadNode { 350 public: 351 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 352 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 353 virtual int Opcode() const; 354 virtual uint ideal_reg() const { return Op_RegI; } 355 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 356 virtual const Type* Value(PhaseGVN* phase) const; 357 virtual int store_Opcode() const { return Op_StoreB; } 358 virtual BasicType memory_type() const { return T_BYTE; } 359 }; 360 361 //------------------------------LoadUSNode------------------------------------- 362 // Load an unsigned short/char (16bits unsigned) from memory 363 class LoadUSNode : public LoadNode { 364 public: 365 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 366 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 367 virtual int Opcode() const; 368 virtual uint ideal_reg() const { return Op_RegI; } 369 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 370 virtual const Type* Value(PhaseGVN* phase) const; 371 virtual int store_Opcode() const { return Op_StoreC; } 372 virtual BasicType memory_type() const { return T_CHAR; } 373 }; 374 375 //------------------------------LoadSNode-------------------------------------- 376 // Load a short (16bits signed) from memory 377 class LoadSNode : public LoadNode { 378 public: 379 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 380 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 381 virtual int Opcode() const; 382 virtual uint ideal_reg() const { return Op_RegI; } 383 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 384 virtual const Type* Value(PhaseGVN* phase) const; 385 virtual int store_Opcode() const { return Op_StoreC; } 386 virtual BasicType memory_type() const { return T_SHORT; } 387 }; 388 389 //------------------------------LoadINode-------------------------------------- 390 // Load an integer from memory 391 class LoadINode : public LoadNode { 392 public: 393 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 394 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 395 virtual int Opcode() const; 396 virtual uint ideal_reg() const { return Op_RegI; } 397 virtual int store_Opcode() const { return Op_StoreI; } 398 virtual BasicType memory_type() const { return T_INT; } 399 }; 400 401 //------------------------------LoadRangeNode---------------------------------- 402 // Load an array length from the array 403 class LoadRangeNode : public LoadINode { 404 public: 405 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 406 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 407 virtual int Opcode() const; 408 virtual const Type* Value(PhaseGVN* phase) const; 409 virtual Node* Identity(PhaseGVN* phase); 410 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 411 }; 412 413 //------------------------------LoadLNode-------------------------------------- 414 // Load a long from memory 415 class LoadLNode : public LoadNode { 416 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 417 virtual bool cmp( const Node &n ) const { 418 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 419 && LoadNode::cmp(n); 420 } 421 virtual uint size_of() const { return sizeof(*this); } 422 const bool _require_atomic_access; // is piecewise load forbidden? 423 424 public: 425 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 426 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 427 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 428 virtual int Opcode() const; 429 virtual uint ideal_reg() const { return Op_RegL; } 430 virtual int store_Opcode() const { return Op_StoreL; } 431 virtual BasicType memory_type() const { return T_LONG; } 432 bool require_atomic_access() const { return _require_atomic_access; } 433 434 #ifndef PRODUCT 435 virtual void dump_spec(outputStream *st) const { 436 LoadNode::dump_spec(st); 437 if (_require_atomic_access) st->print(" Atomic!"); 438 } 439 #endif 440 }; 441 442 //------------------------------LoadL_unalignedNode---------------------------- 443 // Load a long from unaligned memory 444 class LoadL_unalignedNode : public LoadLNode { 445 public: 446 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 447 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 448 virtual int Opcode() const; 449 }; 450 451 //------------------------------LoadFNode-------------------------------------- 452 // Load a float (64 bits) from memory 453 class LoadFNode : public LoadNode { 454 public: 455 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 456 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 457 virtual int Opcode() const; 458 virtual uint ideal_reg() const { return Op_RegF; } 459 virtual int store_Opcode() const { return Op_StoreF; } 460 virtual BasicType memory_type() const { return T_FLOAT; } 461 }; 462 463 //------------------------------LoadDNode-------------------------------------- 464 // Load a double (64 bits) from memory 465 class LoadDNode : public LoadNode { 466 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 467 virtual bool cmp( const Node &n ) const { 468 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 469 && LoadNode::cmp(n); 470 } 471 virtual uint size_of() const { return sizeof(*this); } 472 const bool _require_atomic_access; // is piecewise load forbidden? 473 474 public: 475 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 476 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 477 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 478 virtual int Opcode() const; 479 virtual uint ideal_reg() const { return Op_RegD; } 480 virtual int store_Opcode() const { return Op_StoreD; } 481 virtual BasicType memory_type() const { return T_DOUBLE; } 482 bool require_atomic_access() const { return _require_atomic_access; } 483 484 #ifndef PRODUCT 485 virtual void dump_spec(outputStream *st) const { 486 LoadNode::dump_spec(st); 487 if (_require_atomic_access) st->print(" Atomic!"); 488 } 489 #endif 490 }; 491 492 //------------------------------LoadD_unalignedNode---------------------------- 493 // Load a double from unaligned memory 494 class LoadD_unalignedNode : public LoadDNode { 495 public: 496 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 497 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 498 virtual int Opcode() const; 499 }; 500 501 //------------------------------LoadPNode-------------------------------------- 502 // Load a pointer from memory (either object or array) 503 class LoadPNode : public LoadNode { 504 public: 505 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 506 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 507 virtual int Opcode() const; 508 virtual uint ideal_reg() const { return Op_RegP; } 509 virtual int store_Opcode() const { return Op_StoreP; } 510 virtual BasicType memory_type() const { return T_ADDRESS; } 511 }; 512 513 514 //------------------------------LoadNNode-------------------------------------- 515 // Load a narrow oop from memory (either object or array) 516 class LoadNNode : public LoadNode { 517 public: 518 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 519 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 520 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 521 virtual int Opcode() const; 522 virtual uint ideal_reg() const { return Op_RegN; } 523 virtual int store_Opcode() const { return Op_StoreN; } 524 virtual BasicType memory_type() const { return T_NARROWOOP; } 525 }; 526 527 //------------------------------LoadKlassNode---------------------------------- 528 // Load a Klass from an object 529 class LoadKlassNode : public LoadPNode { 530 private: 531 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo) 532 : LoadPNode(nullptr, mem, adr, at, tk, mo) {} 533 534 public: 535 virtual int Opcode() const; 536 virtual const Type* Value(PhaseGVN* phase) const; 537 virtual Node* Identity(PhaseGVN* phase); 538 virtual bool depends_only_on_test() const { return true; } 539 540 // Polymorphic factory method: 541 static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, 542 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT); 543 }; 544 545 //------------------------------LoadNKlassNode--------------------------------- 546 // Load a narrow Klass from an object. 547 // With compact headers, the input address (adr) does not point at the exact 548 // header position where the (narrow) class pointer is located, but into the 549 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node 550 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to 551 // extract the actual class pointer. C2's type system is agnostic on whether the 552 // input address directly points into the class pointer. 553 class LoadNKlassNode : public LoadNNode { 554 private: 555 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*); 556 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo) 557 : LoadNNode(nullptr, mem, adr, at, tk, mo) {} 558 559 public: 560 virtual int Opcode() const; 561 virtual uint ideal_reg() const { return Op_RegN; } 562 virtual int store_Opcode() const { return Op_StoreNKlass; } 563 virtual BasicType memory_type() const { return T_NARROWKLASS; } 564 565 virtual const Type* Value(PhaseGVN* phase) const; 566 virtual Node* Identity(PhaseGVN* phase); 567 virtual bool depends_only_on_test() const { return true; } 568 }; 569 570 //------------------------------StoreNode-------------------------------------- 571 // Store value; requires Store, Address and Value 572 class StoreNode : public MemNode { 573 private: 574 // On platforms with weak memory ordering (e.g., PPC) we distinguish 575 // stores that can be reordered, and such requiring release semantics to 576 // adhere to the Java specification. The required behaviour is stored in 577 // this field. 578 const MemOrd _mo; 579 // Needed for proper cloning. 580 virtual uint size_of() const { return sizeof(*this); } 581 protected: 582 virtual bool cmp( const Node &n ) const; 583 virtual bool depends_only_on_test() const { return false; } 584 585 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 586 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits); 587 588 public: 589 // We must ensure that stores of object references will be visible 590 // only after the object's initialization. So the callers of this 591 // procedure must indicate that the store requires `release' 592 // semantics, if the stored value is an object reference that might 593 // point to a new object and may become externally visible. 594 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 595 : MemNode(c, mem, adr, at, val), _mo(mo) { 596 init_class_id(Class_Store); 597 } 598 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 599 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 600 init_class_id(Class_Store); 601 } 602 603 inline bool is_unordered() const { return !is_release(); } 604 inline bool is_release() const { 605 assert((_mo == unordered || _mo == release), "unexpected"); 606 return _mo == release; 607 } 608 609 // Conservatively release stores of object references in order to 610 // ensure visibility of object initialization. 611 static inline MemOrd release_if_reference(const BasicType t) { 612 #ifdef AARCH64 613 // AArch64 doesn't need a release store here because object 614 // initialization contains the necessary barriers. 615 return unordered; 616 #else 617 const MemOrd mo = (t == T_ARRAY || 618 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 619 t == T_OBJECT) ? release : unordered; 620 return mo; 621 #endif 622 } 623 624 // Polymorphic factory method 625 // 626 // We must ensure that stores of object references will be visible 627 // only after the object's initialization. So the callers of this 628 // procedure must indicate that the store requires `release' 629 // semantics, if the stored value is an object reference that might 630 // point to a new object and may become externally visible. 631 static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, 632 const TypePtr* at, Node* val, BasicType bt, 633 MemOrd mo, bool require_atomic_access = false); 634 635 virtual uint hash() const; // Check the type 636 637 // If the store is to Field memory and the pointer is non-null, we can 638 // zero out the control input. 639 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 640 641 // Compute a new Type for this node. Basically we just do the pre-check, 642 // then call the virtual add() to set the type. 643 virtual const Type* Value(PhaseGVN* phase) const; 644 645 // Check for identity function on memory (Load then Store at same address) 646 virtual Node* Identity(PhaseGVN* phase); 647 648 // Do not match memory edge 649 virtual uint match_edge(uint idx) const; 650 651 virtual const Type *bottom_type() const; // returns Type::MEMORY 652 653 // Map a store opcode to its corresponding own opcode, trivially. 654 virtual int store_Opcode() const { return Opcode(); } 655 656 // have all possible loads of the value stored been optimized away? 657 bool value_never_loaded(PhaseValues* phase) const; 658 659 bool has_reinterpret_variant(const Type* vt); 660 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt); 661 662 MemBarNode* trailing_membar() const; 663 }; 664 665 //------------------------------StoreBNode------------------------------------- 666 // Store byte to memory 667 class StoreBNode : public StoreNode { 668 public: 669 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 670 : StoreNode(c, mem, adr, at, val, mo) {} 671 virtual int Opcode() const; 672 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 673 virtual BasicType memory_type() const { return T_BYTE; } 674 }; 675 676 //------------------------------StoreCNode------------------------------------- 677 // Store char/short to memory 678 class StoreCNode : public StoreNode { 679 public: 680 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 681 : StoreNode(c, mem, adr, at, val, mo) {} 682 virtual int Opcode() const; 683 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 684 virtual BasicType memory_type() const { return T_CHAR; } 685 }; 686 687 //------------------------------StoreINode------------------------------------- 688 // Store int to memory 689 class StoreINode : public StoreNode { 690 public: 691 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 692 : StoreNode(c, mem, adr, at, val, mo) {} 693 virtual int Opcode() const; 694 virtual BasicType memory_type() const { return T_INT; } 695 }; 696 697 //------------------------------StoreLNode------------------------------------- 698 // Store long to memory 699 class StoreLNode : public StoreNode { 700 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 701 virtual bool cmp( const Node &n ) const { 702 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 703 && StoreNode::cmp(n); 704 } 705 virtual uint size_of() const { return sizeof(*this); } 706 const bool _require_atomic_access; // is piecewise store forbidden? 707 708 public: 709 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 710 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 711 virtual int Opcode() const; 712 virtual BasicType memory_type() const { return T_LONG; } 713 bool require_atomic_access() const { return _require_atomic_access; } 714 715 #ifndef PRODUCT 716 virtual void dump_spec(outputStream *st) const { 717 StoreNode::dump_spec(st); 718 if (_require_atomic_access) st->print(" Atomic!"); 719 } 720 #endif 721 }; 722 723 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend 724 class StoreLSpecialNode : public StoreNode { 725 726 public: 727 StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo) 728 : StoreNode(c, mem, adr, at, val, mo) { 729 set_mismatched_access(); 730 if (oop_off != nullptr) { 731 add_req(oop_off); 732 } 733 } 734 virtual int Opcode() const; 735 virtual BasicType memory_type() const { return T_LONG; } 736 737 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || 738 idx == MemNode::ValueIn || 739 idx == MemNode::ValueIn + 1; } 740 }; 741 742 //------------------------------StoreFNode------------------------------------- 743 // Store float to memory 744 class StoreFNode : public StoreNode { 745 public: 746 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 747 : StoreNode(c, mem, adr, at, val, mo) {} 748 virtual int Opcode() const; 749 virtual BasicType memory_type() const { return T_FLOAT; } 750 }; 751 752 //------------------------------StoreDNode------------------------------------- 753 // Store double to memory 754 class StoreDNode : public StoreNode { 755 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 756 virtual bool cmp( const Node &n ) const { 757 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 758 && StoreNode::cmp(n); 759 } 760 virtual uint size_of() const { return sizeof(*this); } 761 const bool _require_atomic_access; // is piecewise store forbidden? 762 public: 763 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 764 MemOrd mo, bool require_atomic_access = false) 765 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 766 virtual int Opcode() const; 767 virtual BasicType memory_type() const { return T_DOUBLE; } 768 bool require_atomic_access() const { return _require_atomic_access; } 769 770 #ifndef PRODUCT 771 virtual void dump_spec(outputStream *st) const { 772 StoreNode::dump_spec(st); 773 if (_require_atomic_access) st->print(" Atomic!"); 774 } 775 #endif 776 777 }; 778 779 //------------------------------StorePNode------------------------------------- 780 // Store pointer to memory 781 class StorePNode : public StoreNode { 782 public: 783 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 784 : StoreNode(c, mem, adr, at, val, mo) {} 785 virtual int Opcode() const; 786 virtual BasicType memory_type() const { return T_ADDRESS; } 787 }; 788 789 //------------------------------StoreNNode------------------------------------- 790 // Store narrow oop to memory 791 class StoreNNode : public StoreNode { 792 public: 793 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 794 : StoreNode(c, mem, adr, at, val, mo) {} 795 virtual int Opcode() const; 796 virtual BasicType memory_type() const { return T_NARROWOOP; } 797 }; 798 799 //------------------------------StoreNKlassNode-------------------------------------- 800 // Store narrow klass to memory 801 class StoreNKlassNode : public StoreNNode { 802 public: 803 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 804 : StoreNNode(c, mem, adr, at, val, mo) {} 805 virtual int Opcode() const; 806 virtual BasicType memory_type() const { return T_NARROWKLASS; } 807 }; 808 809 //------------------------------SCMemProjNode--------------------------------------- 810 // This class defines a projection of the memory state of a store conditional node. 811 // These nodes return a value, but also update memory. 812 class SCMemProjNode : public ProjNode { 813 public: 814 enum {SCMEMPROJCON = (uint)-2}; 815 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 816 virtual int Opcode() const; 817 virtual bool is_CFG() const { return false; } 818 virtual const Type *bottom_type() const {return Type::MEMORY;} 819 virtual const TypePtr *adr_type() const { 820 Node* ctrl = in(0); 821 if (ctrl == nullptr) return nullptr; // node is dead 822 return ctrl->in(MemNode::Memory)->adr_type(); 823 } 824 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 825 virtual const Type* Value(PhaseGVN* phase) const; 826 #ifndef PRODUCT 827 virtual void dump_spec(outputStream *st) const {}; 828 #endif 829 }; 830 831 //------------------------------LoadStoreNode--------------------------- 832 // Note: is_Mem() method returns 'true' for this class. 833 class LoadStoreNode : public Node { 834 private: 835 const Type* const _type; // What kind of value is loaded? 836 const TypePtr* _adr_type; // What kind of memory is being addressed? 837 uint8_t _barrier_data; // Bit field with barrier information 838 virtual uint size_of() const; // Size is bigger 839 public: 840 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 841 virtual bool depends_only_on_test() const { return false; } 842 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 843 844 virtual const Type *bottom_type() const { return _type; } 845 virtual uint ideal_reg() const; 846 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 847 virtual const Type* Value(PhaseGVN* phase) const; 848 849 bool result_not_used() const; 850 MemBarNode* trailing_membar() const; 851 852 uint8_t barrier_data() { return _barrier_data; } 853 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 854 }; 855 856 class LoadStoreConditionalNode : public LoadStoreNode { 857 public: 858 enum { 859 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 860 }; 861 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 862 virtual const Type* Value(PhaseGVN* phase) const; 863 }; 864 865 class CompareAndSwapNode : public LoadStoreConditionalNode { 866 private: 867 const MemNode::MemOrd _mem_ord; 868 public: 869 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 870 MemNode::MemOrd order() const { 871 return _mem_ord; 872 } 873 virtual uint size_of() const { return sizeof(*this); } 874 }; 875 876 class CompareAndExchangeNode : public LoadStoreNode { 877 private: 878 const MemNode::MemOrd _mem_ord; 879 public: 880 enum { 881 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 882 }; 883 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 884 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 885 init_req(ExpectedIn, ex ); 886 } 887 888 MemNode::MemOrd order() const { 889 return _mem_ord; 890 } 891 virtual uint size_of() const { return sizeof(*this); } 892 }; 893 894 //------------------------------CompareAndSwapBNode--------------------------- 895 class CompareAndSwapBNode : public CompareAndSwapNode { 896 public: 897 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 898 virtual int Opcode() const; 899 }; 900 901 //------------------------------CompareAndSwapSNode--------------------------- 902 class CompareAndSwapSNode : public CompareAndSwapNode { 903 public: 904 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 905 virtual int Opcode() const; 906 }; 907 908 //------------------------------CompareAndSwapINode--------------------------- 909 class CompareAndSwapINode : public CompareAndSwapNode { 910 public: 911 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 912 virtual int Opcode() const; 913 }; 914 915 //------------------------------CompareAndSwapLNode--------------------------- 916 class CompareAndSwapLNode : public CompareAndSwapNode { 917 public: 918 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 919 virtual int Opcode() const; 920 }; 921 922 //------------------------------CompareAndSwapPNode--------------------------- 923 class CompareAndSwapPNode : public CompareAndSwapNode { 924 public: 925 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 926 virtual int Opcode() const; 927 }; 928 929 //------------------------------CompareAndSwapNNode--------------------------- 930 class CompareAndSwapNNode : public CompareAndSwapNode { 931 public: 932 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 933 virtual int Opcode() const; 934 }; 935 936 //------------------------------WeakCompareAndSwapBNode--------------------------- 937 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 938 public: 939 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 940 virtual int Opcode() const; 941 }; 942 943 //------------------------------WeakCompareAndSwapSNode--------------------------- 944 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 945 public: 946 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 947 virtual int Opcode() const; 948 }; 949 950 //------------------------------WeakCompareAndSwapINode--------------------------- 951 class WeakCompareAndSwapINode : public CompareAndSwapNode { 952 public: 953 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 954 virtual int Opcode() const; 955 }; 956 957 //------------------------------WeakCompareAndSwapLNode--------------------------- 958 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 959 public: 960 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 961 virtual int Opcode() const; 962 }; 963 964 //------------------------------WeakCompareAndSwapPNode--------------------------- 965 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 966 public: 967 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 968 virtual int Opcode() const; 969 }; 970 971 //------------------------------WeakCompareAndSwapNNode--------------------------- 972 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 973 public: 974 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 975 virtual int Opcode() const; 976 }; 977 978 //------------------------------CompareAndExchangeBNode--------------------------- 979 class CompareAndExchangeBNode : public CompareAndExchangeNode { 980 public: 981 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 982 virtual int Opcode() const; 983 }; 984 985 986 //------------------------------CompareAndExchangeSNode--------------------------- 987 class CompareAndExchangeSNode : public CompareAndExchangeNode { 988 public: 989 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 990 virtual int Opcode() const; 991 }; 992 993 //------------------------------CompareAndExchangeLNode--------------------------- 994 class CompareAndExchangeLNode : public CompareAndExchangeNode { 995 public: 996 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 997 virtual int Opcode() const; 998 }; 999 1000 1001 //------------------------------CompareAndExchangeINode--------------------------- 1002 class CompareAndExchangeINode : public CompareAndExchangeNode { 1003 public: 1004 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 1005 virtual int Opcode() const; 1006 }; 1007 1008 1009 //------------------------------CompareAndExchangePNode--------------------------- 1010 class CompareAndExchangePNode : public CompareAndExchangeNode { 1011 public: 1012 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1013 virtual int Opcode() const; 1014 }; 1015 1016 //------------------------------CompareAndExchangeNNode--------------------------- 1017 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1018 public: 1019 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1020 virtual int Opcode() const; 1021 }; 1022 1023 //------------------------------GetAndAddBNode--------------------------- 1024 class GetAndAddBNode : public LoadStoreNode { 1025 public: 1026 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1027 virtual int Opcode() const; 1028 }; 1029 1030 //------------------------------GetAndAddSNode--------------------------- 1031 class GetAndAddSNode : public LoadStoreNode { 1032 public: 1033 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1034 virtual int Opcode() const; 1035 }; 1036 1037 //------------------------------GetAndAddINode--------------------------- 1038 class GetAndAddINode : public LoadStoreNode { 1039 public: 1040 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1041 virtual int Opcode() const; 1042 }; 1043 1044 //------------------------------GetAndAddLNode--------------------------- 1045 class GetAndAddLNode : public LoadStoreNode { 1046 public: 1047 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1048 virtual int Opcode() const; 1049 }; 1050 1051 //------------------------------GetAndSetBNode--------------------------- 1052 class GetAndSetBNode : public LoadStoreNode { 1053 public: 1054 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1055 virtual int Opcode() const; 1056 }; 1057 1058 //------------------------------GetAndSetSNode--------------------------- 1059 class GetAndSetSNode : public LoadStoreNode { 1060 public: 1061 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1062 virtual int Opcode() const; 1063 }; 1064 1065 //------------------------------GetAndSetINode--------------------------- 1066 class GetAndSetINode : public LoadStoreNode { 1067 public: 1068 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1069 virtual int Opcode() const; 1070 }; 1071 1072 //------------------------------GetAndSetLNode--------------------------- 1073 class GetAndSetLNode : public LoadStoreNode { 1074 public: 1075 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1076 virtual int Opcode() const; 1077 }; 1078 1079 //------------------------------GetAndSetPNode--------------------------- 1080 class GetAndSetPNode : public LoadStoreNode { 1081 public: 1082 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1083 virtual int Opcode() const; 1084 }; 1085 1086 //------------------------------GetAndSetNNode--------------------------- 1087 class GetAndSetNNode : public LoadStoreNode { 1088 public: 1089 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1090 virtual int Opcode() const; 1091 }; 1092 1093 //------------------------------ClearArray------------------------------------- 1094 class ClearArrayNode: public Node { 1095 private: 1096 bool _is_large; 1097 bool _word_copy_only; 1098 public: 1099 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large) 1100 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large), 1101 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) { 1102 init_class_id(Class_ClearArray); 1103 } 1104 virtual int Opcode() const; 1105 virtual const Type *bottom_type() const { return Type::MEMORY; } 1106 // ClearArray modifies array elements, and so affects only the 1107 // array memory addressed by the bottom_type of its base address. 1108 virtual const class TypePtr *adr_type() const; 1109 virtual Node* Identity(PhaseGVN* phase); 1110 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1111 virtual uint match_edge(uint idx) const; 1112 bool is_large() const { return _is_large; } 1113 bool word_copy_only() const { return _word_copy_only; } 1114 1115 // Clear the given area of an object or array. 1116 // The start offset must always be aligned mod BytesPerInt. 1117 // The end offset must always be aligned mod BytesPerLong. 1118 // Return the new memory. 1119 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1120 Node* val, 1121 Node* raw_val, 1122 intptr_t start_offset, 1123 intptr_t end_offset, 1124 PhaseGVN* phase); 1125 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1126 Node* val, 1127 Node* raw_val, 1128 intptr_t start_offset, 1129 Node* end_offset, 1130 PhaseGVN* phase); 1131 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1132 Node* raw_val, 1133 Node* start_offset, 1134 Node* end_offset, 1135 PhaseGVN* phase); 1136 // Return allocation input memory edge if it is different instance 1137 // or itself if it is the one we are looking for. 1138 static bool step_through(Node** np, uint instance_id, PhaseValues* phase); 1139 }; 1140 1141 //------------------------------MemBar----------------------------------------- 1142 // There are different flavors of Memory Barriers to match the Java Memory 1143 // Model. Monitor-enter and volatile-load act as Acquires: no following ref 1144 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1145 // volatile-load. Monitor-exit and volatile-store act as Release: no 1146 // preceding ref can be moved to after them. We insert a MemBar-Release 1147 // before a FastUnlock or volatile-store. All volatiles need to be 1148 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1149 // separate it from any following volatile-load. 1150 class MemBarNode: public MultiNode { 1151 virtual uint hash() const ; // { return NO_HASH; } 1152 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1153 1154 virtual uint size_of() const { return sizeof(*this); } 1155 // Memory type this node is serializing. Usually either rawptr or bottom. 1156 const TypePtr* _adr_type; 1157 1158 // How is this membar related to a nearby memory access? 1159 enum { 1160 Standalone, 1161 TrailingLoad, 1162 TrailingStore, 1163 LeadingStore, 1164 TrailingLoadStore, 1165 LeadingLoadStore, 1166 TrailingPartialArrayCopy 1167 } _kind; 1168 1169 #ifdef ASSERT 1170 uint _pair_idx; 1171 #endif 1172 1173 public: 1174 enum { 1175 Precedent = TypeFunc::Parms // optional edge to force precedence 1176 }; 1177 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1178 virtual int Opcode() const = 0; 1179 virtual const class TypePtr *adr_type() const { return _adr_type; } 1180 virtual const Type* Value(PhaseGVN* phase) const; 1181 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1182 virtual uint match_edge(uint idx) const { return 0; } 1183 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1184 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 1185 // Factory method. Builds a wide or narrow membar. 1186 // Optional 'precedent' becomes an extra edge if not null. 1187 static MemBarNode* make(Compile* C, int opcode, 1188 int alias_idx = Compile::AliasIdxBot, 1189 Node* precedent = nullptr); 1190 1191 MemBarNode* trailing_membar() const; 1192 MemBarNode* leading_membar() const; 1193 1194 void set_trailing_load() { _kind = TrailingLoad; } 1195 bool trailing_load() const { return _kind == TrailingLoad; } 1196 bool trailing_store() const { return _kind == TrailingStore; } 1197 bool leading_store() const { return _kind == LeadingStore; } 1198 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1199 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1200 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1201 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1202 bool standalone() const { return _kind == Standalone; } 1203 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; } 1204 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; } 1205 1206 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1207 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1208 1209 void remove(PhaseIterGVN *igvn); 1210 }; 1211 1212 // "Acquire" - no following ref can move before (but earlier refs can 1213 // follow, like an early Load stalled in cache). Requires multi-cpu 1214 // visibility. Inserted after a volatile load. 1215 class MemBarAcquireNode: public MemBarNode { 1216 public: 1217 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1218 : MemBarNode(C, alias_idx, precedent) {} 1219 virtual int Opcode() const; 1220 }; 1221 1222 // "Acquire" - no following ref can move before (but earlier refs can 1223 // follow, like an early Load stalled in cache). Requires multi-cpu 1224 // visibility. Inserted independent of any load, as required 1225 // for intrinsic Unsafe.loadFence(). 1226 class LoadFenceNode: public MemBarNode { 1227 public: 1228 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1229 : MemBarNode(C, alias_idx, precedent) {} 1230 virtual int Opcode() const; 1231 }; 1232 1233 // "Release" - no earlier ref can move after (but later refs can move 1234 // up, like a speculative pipelined cache-hitting Load). Requires 1235 // multi-cpu visibility. Inserted before a volatile store. 1236 class MemBarReleaseNode: public MemBarNode { 1237 public: 1238 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1239 : MemBarNode(C, alias_idx, precedent) {} 1240 virtual int Opcode() const; 1241 }; 1242 1243 // "Release" - no earlier ref can move after (but later refs can move 1244 // up, like a speculative pipelined cache-hitting Load). Requires 1245 // multi-cpu visibility. Inserted independent of any store, as required 1246 // for intrinsic Unsafe.storeFence(). 1247 class StoreFenceNode: public MemBarNode { 1248 public: 1249 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1250 : MemBarNode(C, alias_idx, precedent) {} 1251 virtual int Opcode() const; 1252 }; 1253 1254 // "Acquire" - no following ref can move before (but earlier refs can 1255 // follow, like an early Load stalled in cache). Requires multi-cpu 1256 // visibility. Inserted after a FastLock. 1257 class MemBarAcquireLockNode: public MemBarNode { 1258 public: 1259 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1260 : MemBarNode(C, alias_idx, precedent) {} 1261 virtual int Opcode() const; 1262 }; 1263 1264 // "Release" - no earlier ref can move after (but later refs can move 1265 // up, like a speculative pipelined cache-hitting Load). Requires 1266 // multi-cpu visibility. Inserted before a FastUnLock. 1267 class MemBarReleaseLockNode: public MemBarNode { 1268 public: 1269 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1270 : MemBarNode(C, alias_idx, precedent) {} 1271 virtual int Opcode() const; 1272 }; 1273 1274 class MemBarStoreStoreNode: public MemBarNode { 1275 public: 1276 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1277 : MemBarNode(C, alias_idx, precedent) { 1278 init_class_id(Class_MemBarStoreStore); 1279 } 1280 virtual int Opcode() const; 1281 }; 1282 1283 class StoreStoreFenceNode: public MemBarNode { 1284 public: 1285 StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1286 : MemBarNode(C, alias_idx, precedent) {} 1287 virtual int Opcode() const; 1288 }; 1289 1290 // Ordering between a volatile store and a following volatile load. 1291 // Requires multi-CPU visibility? 1292 class MemBarVolatileNode: public MemBarNode { 1293 public: 1294 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1295 : MemBarNode(C, alias_idx, precedent) {} 1296 virtual int Opcode() const; 1297 }; 1298 1299 // Ordering within the same CPU. Used to order unsafe memory references 1300 // inside the compiler when we lack alias info. Not needed "outside" the 1301 // compiler because the CPU does all the ordering for us. 1302 class MemBarCPUOrderNode: public MemBarNode { 1303 public: 1304 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1305 : MemBarNode(C, alias_idx, precedent) {} 1306 virtual int Opcode() const; 1307 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1308 }; 1309 1310 class OnSpinWaitNode: public MemBarNode { 1311 public: 1312 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1313 : MemBarNode(C, alias_idx, precedent) {} 1314 virtual int Opcode() const; 1315 }; 1316 1317 // Isolation of object setup after an AllocateNode and before next safepoint. 1318 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1319 class InitializeNode: public MemBarNode { 1320 friend class AllocateNode; 1321 1322 enum { 1323 Incomplete = 0, 1324 Complete = 1, 1325 WithArraycopy = 2 1326 }; 1327 int _is_complete; 1328 1329 bool _does_not_escape; 1330 1331 public: 1332 enum { 1333 Control = TypeFunc::Control, 1334 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1335 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1336 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1337 }; 1338 1339 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1340 virtual int Opcode() const; 1341 virtual uint size_of() const { return sizeof(*this); } 1342 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1343 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1344 1345 // Manage incoming memory edges via a MergeMem on in(Memory): 1346 Node* memory(uint alias_idx); 1347 1348 // The raw memory edge coming directly from the Allocation. 1349 // The contents of this memory are *always* all-zero-bits. 1350 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1351 1352 // Return the corresponding allocation for this initialization (or null if none). 1353 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1354 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1355 AllocateNode* allocation(); 1356 1357 // Anything other than zeroing in this init? 1358 bool is_non_zero(); 1359 1360 // An InitializeNode must completed before macro expansion is done. 1361 // Completion requires that the AllocateNode must be followed by 1362 // initialization of the new memory to zero, then to any initializers. 1363 bool is_complete() { return _is_complete != Incomplete; } 1364 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1365 1366 // Mark complete. (Must not yet be complete.) 1367 void set_complete(PhaseGVN* phase); 1368 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1369 1370 bool does_not_escape() { return _does_not_escape; } 1371 void set_does_not_escape() { _does_not_escape = true; } 1372 1373 #ifdef ASSERT 1374 // ensure all non-degenerate stores are ordered and non-overlapping 1375 bool stores_are_sane(PhaseValues* phase); 1376 #endif //ASSERT 1377 1378 // See if this store can be captured; return offset where it initializes. 1379 // Return 0 if the store cannot be moved (any sort of problem). 1380 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape); 1381 1382 // Capture another store; reformat it to write my internal raw memory. 1383 // Return the captured copy, else null if there is some sort of problem. 1384 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape); 1385 1386 // Find captured store which corresponds to the range [start..start+size). 1387 // Return my own memory projection (meaning the initial zero bits) 1388 // if there is no such store. Return null if there is a problem. 1389 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase); 1390 1391 // Called when the associated AllocateNode is expanded into CFG. 1392 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1393 intptr_t header_size, Node* size_in_bytes, 1394 PhaseIterGVN* phase); 1395 1396 private: 1397 void remove_extra_zeroes(); 1398 1399 // Find out where a captured store should be placed (or already is placed). 1400 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1401 PhaseValues* phase); 1402 1403 static intptr_t get_store_offset(Node* st, PhaseValues* phase); 1404 1405 Node* make_raw_address(intptr_t offset, PhaseGVN* phase); 1406 1407 bool detect_init_independence(Node* value, PhaseGVN* phase); 1408 1409 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1410 PhaseGVN* phase); 1411 1412 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1413 }; 1414 1415 //------------------------------MergeMem--------------------------------------- 1416 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1417 class MergeMemNode: public Node { 1418 virtual uint hash() const ; // { return NO_HASH; } 1419 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1420 friend class MergeMemStream; 1421 MergeMemNode(Node* def); // clients use MergeMemNode::make 1422 1423 public: 1424 // If the input is a whole memory state, clone it with all its slices intact. 1425 // Otherwise, make a new memory state with just that base memory input. 1426 // In either case, the result is a newly created MergeMem. 1427 static MergeMemNode* make(Node* base_memory); 1428 1429 virtual int Opcode() const; 1430 virtual Node* Identity(PhaseGVN* phase); 1431 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1432 virtual uint ideal_reg() const { return NotAMachineReg; } 1433 virtual uint match_edge(uint idx) const { return 0; } 1434 virtual const RegMask &out_RegMask() const; 1435 virtual const Type *bottom_type() const { return Type::MEMORY; } 1436 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1437 // sparse accessors 1438 // Fetch the previously stored "set_memory_at", or else the base memory. 1439 // (Caller should clone it if it is a phi-nest.) 1440 Node* memory_at(uint alias_idx) const; 1441 // set the memory, regardless of its previous value 1442 void set_memory_at(uint alias_idx, Node* n); 1443 // the "base" is the memory that provides the non-finite support 1444 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1445 // warning: setting the base can implicitly set any of the other slices too 1446 void set_base_memory(Node* def); 1447 // sentinel value which denotes a copy of the base memory: 1448 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1449 static Node* make_empty_memory(); // where the sentinel comes from 1450 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1451 // hook for the iterator, to perform any necessary setup 1452 void iteration_setup(const MergeMemNode* other = nullptr); 1453 // push sentinels until I am at least as long as the other (semantic no-op) 1454 void grow_to_match(const MergeMemNode* other); 1455 bool verify_sparse() const PRODUCT_RETURN0; 1456 #ifndef PRODUCT 1457 virtual void dump_spec(outputStream *st) const; 1458 #endif 1459 }; 1460 1461 class MergeMemStream : public StackObj { 1462 private: 1463 MergeMemNode* _mm; 1464 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1465 Node* _mm_base; // loop-invariant base memory of _mm 1466 int _idx; 1467 int _cnt; 1468 Node* _mem; 1469 Node* _mem2; 1470 int _cnt2; 1471 1472 void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) { 1473 // subsume_node will break sparseness at times, whenever a memory slice 1474 // folds down to a copy of the base ("fat") memory. In such a case, 1475 // the raw edge will update to base, although it should be top. 1476 // This iterator will recognize either top or base_memory as an 1477 // "empty" slice. See is_empty, is_empty2, and next below. 1478 // 1479 // The sparseness property is repaired in MergeMemNode::Ideal. 1480 // As long as access to a MergeMem goes through this iterator 1481 // or the memory_at accessor, flaws in the sparseness will 1482 // never be observed. 1483 // 1484 // Also, iteration_setup repairs sparseness. 1485 assert(mm->verify_sparse(), "please, no dups of base"); 1486 assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base"); 1487 1488 _mm = mm; 1489 _mm_base = mm->base_memory(); 1490 _mm2 = mm2; 1491 _cnt = mm->req(); 1492 _idx = Compile::AliasIdxBot-1; // start at the base memory 1493 _mem = nullptr; 1494 _mem2 = nullptr; 1495 } 1496 1497 #ifdef ASSERT 1498 Node* check_memory() const { 1499 if (at_base_memory()) 1500 return _mm->base_memory(); 1501 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1502 return _mm->memory_at(_idx); 1503 else 1504 return _mm_base; 1505 } 1506 Node* check_memory2() const { 1507 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1508 } 1509 #endif 1510 1511 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1512 void assert_synch() const { 1513 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1514 "no side-effects except through the stream"); 1515 } 1516 1517 public: 1518 1519 // expected usages: 1520 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1521 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1522 1523 // iterate over one merge 1524 MergeMemStream(MergeMemNode* mm) { 1525 mm->iteration_setup(); 1526 init(mm); 1527 debug_only(_cnt2 = 999); 1528 } 1529 // iterate in parallel over two merges 1530 // only iterates through non-empty elements of mm2 1531 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1532 assert(mm2, "second argument must be a MergeMem also"); 1533 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1534 mm->iteration_setup(mm2); 1535 init(mm, mm2); 1536 _cnt2 = mm2->req(); 1537 } 1538 #ifdef ASSERT 1539 ~MergeMemStream() { 1540 assert_synch(); 1541 } 1542 #endif 1543 1544 MergeMemNode* all_memory() const { 1545 return _mm; 1546 } 1547 Node* base_memory() const { 1548 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1549 return _mm_base; 1550 } 1551 const MergeMemNode* all_memory2() const { 1552 assert(_mm2 != nullptr, ""); 1553 return _mm2; 1554 } 1555 bool at_base_memory() const { 1556 return _idx == Compile::AliasIdxBot; 1557 } 1558 int alias_idx() const { 1559 assert(_mem, "must call next 1st"); 1560 return _idx; 1561 } 1562 1563 const TypePtr* adr_type() const { 1564 return Compile::current()->get_adr_type(alias_idx()); 1565 } 1566 1567 const TypePtr* adr_type(Compile* C) const { 1568 return C->get_adr_type(alias_idx()); 1569 } 1570 bool is_empty() const { 1571 assert(_mem, "must call next 1st"); 1572 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1573 return _mem->is_top(); 1574 } 1575 bool is_empty2() const { 1576 assert(_mem2, "must call next 1st"); 1577 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1578 return _mem2->is_top(); 1579 } 1580 Node* memory() const { 1581 assert(!is_empty(), "must not be empty"); 1582 assert_synch(); 1583 return _mem; 1584 } 1585 // get the current memory, regardless of empty or non-empty status 1586 Node* force_memory() const { 1587 assert(!is_empty() || !at_base_memory(), ""); 1588 // Use _mm_base to defend against updates to _mem->base_memory(). 1589 Node *mem = _mem->is_top() ? _mm_base : _mem; 1590 assert(mem == check_memory(), ""); 1591 return mem; 1592 } 1593 Node* memory2() const { 1594 assert(_mem2 == check_memory2(), ""); 1595 return _mem2; 1596 } 1597 void set_memory(Node* mem) { 1598 if (at_base_memory()) { 1599 // Note that this does not change the invariant _mm_base. 1600 _mm->set_base_memory(mem); 1601 } else { 1602 _mm->set_memory_at(_idx, mem); 1603 } 1604 _mem = mem; 1605 assert_synch(); 1606 } 1607 1608 // Recover from a side effect to the MergeMemNode. 1609 void set_memory() { 1610 _mem = _mm->in(_idx); 1611 } 1612 1613 bool next() { return next(false); } 1614 bool next2() { return next(true); } 1615 1616 bool next_non_empty() { return next_non_empty(false); } 1617 bool next_non_empty2() { return next_non_empty(true); } 1618 // next_non_empty2 can yield states where is_empty() is true 1619 1620 private: 1621 // find the next item, which might be empty 1622 bool next(bool have_mm2) { 1623 assert((_mm2 != nullptr) == have_mm2, "use other next"); 1624 assert_synch(); 1625 if (++_idx < _cnt) { 1626 // Note: This iterator allows _mm to be non-sparse. 1627 // It behaves the same whether _mem is top or base_memory. 1628 _mem = _mm->in(_idx); 1629 if (have_mm2) 1630 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1631 return true; 1632 } 1633 return false; 1634 } 1635 1636 // find the next non-empty item 1637 bool next_non_empty(bool have_mm2) { 1638 while (next(have_mm2)) { 1639 if (!is_empty()) { 1640 // make sure _mem2 is filled in sensibly 1641 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1642 return true; 1643 } else if (have_mm2 && !is_empty2()) { 1644 return true; // is_empty() == true 1645 } 1646 } 1647 return false; 1648 } 1649 }; 1650 1651 // cachewb node for guaranteeing writeback of the cache line at a 1652 // given address to (non-volatile) RAM 1653 class CacheWBNode : public Node { 1654 public: 1655 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1656 virtual int Opcode() const; 1657 virtual uint ideal_reg() const { return NotAMachineReg; } 1658 virtual uint match_edge(uint idx) const { return (idx == 2); } 1659 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1660 virtual const Type *bottom_type() const { return Type::MEMORY; } 1661 }; 1662 1663 // cachewb pre sync node for ensuring that writebacks are serialised 1664 // relative to preceding or following stores 1665 class CacheWBPreSyncNode : public Node { 1666 public: 1667 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1668 virtual int Opcode() const; 1669 virtual uint ideal_reg() const { return NotAMachineReg; } 1670 virtual uint match_edge(uint idx) const { return false; } 1671 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1672 virtual const Type *bottom_type() const { return Type::MEMORY; } 1673 }; 1674 1675 // cachewb pre sync node for ensuring that writebacks are serialised 1676 // relative to preceding or following stores 1677 class CacheWBPostSyncNode : public Node { 1678 public: 1679 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1680 virtual int Opcode() const; 1681 virtual uint ideal_reg() const { return NotAMachineReg; } 1682 virtual uint match_edge(uint idx) const { return false; } 1683 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1684 virtual const Type *bottom_type() const { return Type::MEMORY; } 1685 }; 1686 1687 //------------------------------Prefetch--------------------------------------- 1688 1689 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1690 class PrefetchAllocationNode : public Node { 1691 public: 1692 PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {} 1693 virtual int Opcode() const; 1694 virtual uint ideal_reg() const { return NotAMachineReg; } 1695 virtual uint match_edge(uint idx) const { return idx==2; } 1696 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1697 }; 1698 1699 #endif // SHARE_OPTO_MEMNODE_HPP