1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MEMNODE_HPP 26 #define SHARE_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a null pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 bool _unsafe_access; // Access of unsafe origin. 46 uint8_t _barrier_data; // Bit field with barrier information 47 48 protected: 49 #ifdef ASSERT 50 const TypePtr* _adr_type; // What kind of memory is being addressed? 51 #endif 52 virtual uint size_of() const; 53 public: 54 enum { Control, // When is it safe to do this load? 55 Memory, // Chunk of memory is being loaded from 56 Address, // Actually address, derived from base 57 ValueIn, // Value to store 58 OopStore // Preceding oop store, only in StoreCM 59 }; 60 typedef enum { unordered = 0, 61 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 62 release, // Store has to release or be preceded by MemBarRelease. 63 seqcst, // LoadStore has to have both acquire and release semantics. 64 unset // The memory ordering is not set (used for testing) 65 } MemOrd; 66 protected: 67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) : 68 Node(c0,c1,c2), 69 _unaligned_access(false), 70 _mismatched_access(false), 71 _unsafe_access(false), 72 _barrier_data(0) { 73 init_class_id(Class_Mem); 74 debug_only(_adr_type=at; adr_type();) 75 } 76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : 77 Node(c0,c1,c2,c3), 78 _unaligned_access(false), 79 _mismatched_access(false), 80 _unsafe_access(false), 81 _barrier_data(0) { 82 init_class_id(Class_Mem); 83 debug_only(_adr_type=at; adr_type();) 84 } 85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : 86 Node(c0,c1,c2,c3,c4), 87 _unaligned_access(false), 88 _mismatched_access(false), 89 _unsafe_access(false), 90 _barrier_data(0) { 91 init_class_id(Class_Mem); 92 debug_only(_adr_type=at; adr_type();) 93 } 94 95 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; } 96 ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const; 97 static bool check_if_adr_maybe_raw(Node* adr); 98 99 public: 100 // Helpers for the optimizer. Documented in memnode.cpp. 101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 102 Node* p2, AllocateNode* a2, 103 PhaseTransform* phase); 104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 105 106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 108 // This one should probably be a phase-specific function: 109 static bool all_controls_dominate(Node* dom, Node* sub); 110 111 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 112 113 // Shared code for Ideal methods: 114 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null. 115 116 // Helper function for adr_type() implementations. 117 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr); 118 119 // Raw access function, to allow copying of adr_type efficiently in 120 // product builds and retain the debug info for debug builds. 121 const TypePtr *raw_adr_type() const { 122 #ifdef ASSERT 123 return _adr_type; 124 #else 125 return 0; 126 #endif 127 } 128 129 // Map a load or store opcode to its corresponding store opcode. 130 // (Return -1 if unknown.) 131 virtual int store_Opcode() const { return -1; } 132 133 // What is the type of the value in memory? (T_VOID mean "unspecified".) 134 virtual BasicType memory_type() const = 0; 135 virtual int memory_size() const { 136 #ifdef ASSERT 137 return type2aelembytes(memory_type(), true); 138 #else 139 return type2aelembytes(memory_type()); 140 #endif 141 } 142 143 uint8_t barrier_data() { return _barrier_data; } 144 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 145 146 // Search through memory states which precede this node (load or store). 147 // Look for an exact match for the address, with no intervening 148 // aliased stores. 149 Node* find_previous_store(PhaseValues* phase); 150 151 // Can this node (load or store) accurately see a stored value in 152 // the given memory state? (The state may or may not be in(Memory).) 153 Node* can_see_stored_value(Node* st, PhaseValues* phase) const; 154 155 void set_unaligned_access() { _unaligned_access = true; } 156 bool is_unaligned_access() const { return _unaligned_access; } 157 void set_mismatched_access() { _mismatched_access = true; } 158 bool is_mismatched_access() const { return _mismatched_access; } 159 void set_unsafe_access() { _unsafe_access = true; } 160 bool is_unsafe_access() const { return _unsafe_access; } 161 162 #ifndef PRODUCT 163 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 164 virtual void dump_spec(outputStream *st) const; 165 #endif 166 }; 167 168 //------------------------------LoadNode--------------------------------------- 169 // Load value; requires Memory and Address 170 class LoadNode : public MemNode { 171 public: 172 // Some loads (from unsafe) should be pinned: they don't depend only 173 // on the dominating test. The field _control_dependency below records 174 // whether that node depends only on the dominating test. 175 // Pinned and UnknownControl are similar, but differ in that Pinned 176 // loads are not allowed to float across safepoints, whereas UnknownControl 177 // loads are allowed to do that. Therefore, Pinned is stricter. 178 enum ControlDependency { 179 Pinned, 180 UnknownControl, 181 DependsOnlyOnTest 182 }; 183 184 private: 185 // LoadNode::hash() doesn't take the _control_dependency field 186 // into account: If the graph already has a non-pinned LoadNode and 187 // we add a pinned LoadNode with the same inputs, it's safe for GVN 188 // to replace the pinned LoadNode with the non-pinned LoadNode, 189 // otherwise it wouldn't be safe to have a non pinned LoadNode with 190 // those inputs in the first place. If the graph already has a 191 // pinned LoadNode and we add a non pinned LoadNode with the same 192 // inputs, it's safe (but suboptimal) for GVN to replace the 193 // non-pinned LoadNode by the pinned LoadNode. 194 ControlDependency _control_dependency; 195 196 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 197 // loads that can be reordered, and such requiring acquire semantics to 198 // adhere to the Java specification. The required behaviour is stored in 199 // this field. 200 const MemOrd _mo; 201 202 AllocateNode* is_new_object_mark_load() const; 203 204 protected: 205 virtual bool cmp(const Node &n) const; 206 virtual uint size_of() const; // Size is bigger 207 // Should LoadNode::Ideal() attempt to remove control edges? 208 virtual bool can_remove_control() const; 209 const Type* const _type; // What kind of value is loaded? 210 211 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 212 public: 213 214 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 215 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { 216 init_class_id(Class_Load); 217 } 218 inline bool is_unordered() const { return !is_acquire(); } 219 inline bool is_acquire() const { 220 assert(_mo == unordered || _mo == acquire, "unexpected"); 221 return _mo == acquire; 222 } 223 inline bool is_unsigned() const { 224 int lop = Opcode(); 225 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 226 } 227 228 // Polymorphic factory method: 229 static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, 230 const TypePtr* at, const Type* rt, BasicType bt, 231 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 232 bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false, 233 uint8_t barrier_data = 0); 234 235 virtual uint hash() const; // Check the type 236 237 // Handle algebraic identities here. If we have an identity, return the Node 238 // we are equivalent to. We look for Load of a Store. 239 virtual Node* Identity(PhaseGVN* phase); 240 241 // If the load is from Field memory and the pointer is non-null, it might be possible to 242 // zero out the control input. 243 // If the offset is constant and the base is an object allocation, 244 // try to hook me up to the exact initializing store. 245 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 246 247 // Return true if it's possible to split the Load through a Phi merging the bases 248 bool can_split_through_phi_base(PhaseGVN *phase); 249 250 // Split instance field load through Phi. 251 Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false); 252 253 // Recover original value from boxed values 254 Node *eliminate_autobox(PhaseIterGVN *igvn); 255 256 // Compute a new Type for this node. Basically we just do the pre-check, 257 // then call the virtual add() to set the type. 258 virtual const Type* Value(PhaseGVN* phase) const; 259 260 // Common methods for LoadKlass and LoadNKlass nodes. 261 const Type* klass_value_common(PhaseGVN* phase) const; 262 Node* klass_identity_common(PhaseGVN* phase); 263 264 virtual uint ideal_reg() const; 265 virtual const Type *bottom_type() const; 266 // Following method is copied from TypeNode: 267 void set_type(const Type* t) { 268 assert(t != nullptr, "sanity"); 269 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 270 *(const Type**)&_type = t; // cast away const-ness 271 // If this node is in the hash table, make sure it doesn't need a rehash. 272 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 273 } 274 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; }; 275 276 // Do not match memory edge 277 virtual uint match_edge(uint idx) const; 278 279 // Map a load opcode to its corresponding store opcode. 280 virtual int store_Opcode() const = 0; 281 282 // Check if the load's memory input is a Phi node with the same control. 283 bool is_instance_field_load_with_local_phi(Node* ctrl); 284 285 Node* convert_to_unsigned_load(PhaseGVN& gvn); 286 Node* convert_to_signed_load(PhaseGVN& gvn); 287 288 bool has_reinterpret_variant(const Type* rt); 289 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt); 290 291 ControlDependency control_dependency() const { return _control_dependency; } 292 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 293 bool has_pinned_control_dependency() const { return _control_dependency == Pinned; } 294 295 #ifndef PRODUCT 296 virtual void dump_spec(outputStream *st) const; 297 #endif 298 #ifdef ASSERT 299 // Helper function to allow a raw load without control edge for some cases 300 static bool is_immutable_value(Node* adr); 301 #endif 302 protected: 303 const Type* load_array_final_field(const TypeKlassPtr *tkls, 304 ciKlass* klass) const; 305 306 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 307 308 // depends_only_on_test is almost always true, and needs to be almost always 309 // true to enable key hoisting & commoning optimizations. However, for the 310 // special case of RawPtr loads from TLS top & end, and other loads performed by 311 // GC barriers, the control edge carries the dependence preventing hoisting past 312 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 313 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 314 // which produce results (new raw memory state) inside of loops preventing all 315 // manner of other optimizations). Basically, it's ugly but so is the alternative. 316 // See comment in macro.cpp, around line 125 expand_allocate_common(). 317 virtual bool depends_only_on_test() const { 318 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 319 } 320 }; 321 322 //------------------------------LoadBNode-------------------------------------- 323 // Load a byte (8bits signed) from memory 324 class LoadBNode : public LoadNode { 325 public: 326 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 327 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 328 virtual int Opcode() const; 329 virtual uint ideal_reg() const { return Op_RegI; } 330 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 331 virtual const Type* Value(PhaseGVN* phase) const; 332 virtual int store_Opcode() const { return Op_StoreB; } 333 virtual BasicType memory_type() const { return T_BYTE; } 334 }; 335 336 //------------------------------LoadUBNode------------------------------------- 337 // Load a unsigned byte (8bits unsigned) from memory 338 class LoadUBNode : public LoadNode { 339 public: 340 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 341 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 342 virtual int Opcode() const; 343 virtual uint ideal_reg() const { return Op_RegI; } 344 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 345 virtual const Type* Value(PhaseGVN* phase) const; 346 virtual int store_Opcode() const { return Op_StoreB; } 347 virtual BasicType memory_type() const { return T_BYTE; } 348 }; 349 350 //------------------------------LoadUSNode------------------------------------- 351 // Load an unsigned short/char (16bits unsigned) from memory 352 class LoadUSNode : public LoadNode { 353 public: 354 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 355 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 356 virtual int Opcode() const; 357 virtual uint ideal_reg() const { return Op_RegI; } 358 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 359 virtual const Type* Value(PhaseGVN* phase) const; 360 virtual int store_Opcode() const { return Op_StoreC; } 361 virtual BasicType memory_type() const { return T_CHAR; } 362 }; 363 364 //------------------------------LoadSNode-------------------------------------- 365 // Load a short (16bits signed) from memory 366 class LoadSNode : public LoadNode { 367 public: 368 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 369 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 370 virtual int Opcode() const; 371 virtual uint ideal_reg() const { return Op_RegI; } 372 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 373 virtual const Type* Value(PhaseGVN* phase) const; 374 virtual int store_Opcode() const { return Op_StoreC; } 375 virtual BasicType memory_type() const { return T_SHORT; } 376 }; 377 378 //------------------------------LoadINode-------------------------------------- 379 // Load an integer from memory 380 class LoadINode : public LoadNode { 381 public: 382 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 383 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 384 virtual int Opcode() const; 385 virtual uint ideal_reg() const { return Op_RegI; } 386 virtual int store_Opcode() const { return Op_StoreI; } 387 virtual BasicType memory_type() const { return T_INT; } 388 }; 389 390 //------------------------------LoadRangeNode---------------------------------- 391 // Load an array length from the array 392 class LoadRangeNode : public LoadINode { 393 public: 394 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 395 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 396 virtual int Opcode() const; 397 virtual const Type* Value(PhaseGVN* phase) const; 398 virtual Node* Identity(PhaseGVN* phase); 399 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 400 }; 401 402 //------------------------------LoadLNode-------------------------------------- 403 // Load a long from memory 404 class LoadLNode : public LoadNode { 405 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 406 virtual bool cmp( const Node &n ) const { 407 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 408 && LoadNode::cmp(n); 409 } 410 virtual uint size_of() const { return sizeof(*this); } 411 const bool _require_atomic_access; // is piecewise load forbidden? 412 413 public: 414 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 415 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 416 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 417 virtual int Opcode() const; 418 virtual uint ideal_reg() const { return Op_RegL; } 419 virtual int store_Opcode() const { return Op_StoreL; } 420 virtual BasicType memory_type() const { return T_LONG; } 421 bool require_atomic_access() const { return _require_atomic_access; } 422 423 #ifndef PRODUCT 424 virtual void dump_spec(outputStream *st) const { 425 LoadNode::dump_spec(st); 426 if (_require_atomic_access) st->print(" Atomic!"); 427 } 428 #endif 429 }; 430 431 //------------------------------LoadL_unalignedNode---------------------------- 432 // Load a long from unaligned memory 433 class LoadL_unalignedNode : public LoadLNode { 434 public: 435 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 436 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 437 virtual int Opcode() const; 438 }; 439 440 //------------------------------LoadFNode-------------------------------------- 441 // Load a float (64 bits) from memory 442 class LoadFNode : public LoadNode { 443 public: 444 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 445 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 446 virtual int Opcode() const; 447 virtual uint ideal_reg() const { return Op_RegF; } 448 virtual int store_Opcode() const { return Op_StoreF; } 449 virtual BasicType memory_type() const { return T_FLOAT; } 450 }; 451 452 //------------------------------LoadDNode-------------------------------------- 453 // Load a double (64 bits) from memory 454 class LoadDNode : public LoadNode { 455 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 456 virtual bool cmp( const Node &n ) const { 457 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 458 && LoadNode::cmp(n); 459 } 460 virtual uint size_of() const { return sizeof(*this); } 461 const bool _require_atomic_access; // is piecewise load forbidden? 462 463 public: 464 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 465 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 466 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 467 virtual int Opcode() const; 468 virtual uint ideal_reg() const { return Op_RegD; } 469 virtual int store_Opcode() const { return Op_StoreD; } 470 virtual BasicType memory_type() const { return T_DOUBLE; } 471 bool require_atomic_access() const { return _require_atomic_access; } 472 473 #ifndef PRODUCT 474 virtual void dump_spec(outputStream *st) const { 475 LoadNode::dump_spec(st); 476 if (_require_atomic_access) st->print(" Atomic!"); 477 } 478 #endif 479 }; 480 481 //------------------------------LoadD_unalignedNode---------------------------- 482 // Load a double from unaligned memory 483 class LoadD_unalignedNode : public LoadDNode { 484 public: 485 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 486 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 487 virtual int Opcode() const; 488 }; 489 490 //------------------------------LoadPNode-------------------------------------- 491 // Load a pointer from memory (either object or array) 492 class LoadPNode : public LoadNode { 493 public: 494 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 495 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 496 virtual int Opcode() const; 497 virtual uint ideal_reg() const { return Op_RegP; } 498 virtual int store_Opcode() const { return Op_StoreP; } 499 virtual BasicType memory_type() const { return T_ADDRESS; } 500 }; 501 502 503 //------------------------------LoadNNode-------------------------------------- 504 // Load a narrow oop from memory (either object or array) 505 class LoadNNode : public LoadNode { 506 public: 507 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 508 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 509 virtual int Opcode() const; 510 virtual uint ideal_reg() const { return Op_RegN; } 511 virtual int store_Opcode() const { return Op_StoreN; } 512 virtual BasicType memory_type() const { return T_NARROWOOP; } 513 }; 514 515 //------------------------------LoadKlassNode---------------------------------- 516 // Load a Klass from an object 517 class LoadKlassNode : public LoadPNode { 518 protected: 519 // In most cases, LoadKlassNode does not have the control input set. If the control 520 // input is set, it must not be removed (by LoadNode::Ideal()). 521 virtual bool can_remove_control() const; 522 public: 523 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 524 : LoadPNode(c, mem, adr, at, tk, mo) {} 525 virtual int Opcode() const; 526 virtual const Type* Value(PhaseGVN* phase) const; 527 virtual Node* Identity(PhaseGVN* phase); 528 virtual bool depends_only_on_test() const { return true; } 529 530 // Polymorphic factory method: 531 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 532 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT); 533 }; 534 535 //------------------------------LoadNKlassNode--------------------------------- 536 // Load a narrow Klass from an object. 537 class LoadNKlassNode : public LoadNNode { 538 public: 539 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 540 : LoadNNode(c, mem, adr, at, tk, mo) {} 541 virtual int Opcode() const; 542 virtual uint ideal_reg() const { return Op_RegN; } 543 virtual int store_Opcode() const { return Op_StoreNKlass; } 544 virtual BasicType memory_type() const { return T_NARROWKLASS; } 545 546 virtual const Type* Value(PhaseGVN* phase) const; 547 virtual Node* Identity(PhaseGVN* phase); 548 virtual bool depends_only_on_test() const { return true; } 549 }; 550 551 552 //------------------------------StoreNode-------------------------------------- 553 // Store value; requires Store, Address and Value 554 class StoreNode : public MemNode { 555 private: 556 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 557 // stores that can be reordered, and such requiring release semantics to 558 // adhere to the Java specification. The required behaviour is stored in 559 // this field. 560 const MemOrd _mo; 561 // Needed for proper cloning. 562 virtual uint size_of() const { return sizeof(*this); } 563 protected: 564 virtual bool cmp( const Node &n ) const; 565 virtual bool depends_only_on_test() const { return false; } 566 567 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 568 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 569 570 public: 571 // We must ensure that stores of object references will be visible 572 // only after the object's initialization. So the callers of this 573 // procedure must indicate that the store requires `release' 574 // semantics, if the stored value is an object reference that might 575 // point to a new object and may become externally visible. 576 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 577 : MemNode(c, mem, adr, at, val), _mo(mo) { 578 init_class_id(Class_Store); 579 } 580 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 581 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 582 init_class_id(Class_Store); 583 } 584 585 inline bool is_unordered() const { return !is_release(); } 586 inline bool is_release() const { 587 assert((_mo == unordered || _mo == release), "unexpected"); 588 return _mo == release; 589 } 590 591 // Conservatively release stores of object references in order to 592 // ensure visibility of object initialization. 593 static inline MemOrd release_if_reference(const BasicType t) { 594 #ifdef AARCH64 595 // AArch64 doesn't need a release store here because object 596 // initialization contains the necessary barriers. 597 return unordered; 598 #else 599 const MemOrd mo = (t == T_ARRAY || 600 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 601 t == T_OBJECT) ? release : unordered; 602 return mo; 603 #endif 604 } 605 606 // Polymorphic factory method 607 // 608 // We must ensure that stores of object references will be visible 609 // only after the object's initialization. So the callers of this 610 // procedure must indicate that the store requires `release' 611 // semantics, if the stored value is an object reference that might 612 // point to a new object and may become externally visible. 613 static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, 614 const TypePtr* at, Node* val, BasicType bt, 615 MemOrd mo, bool require_atomic_access = false); 616 617 virtual uint hash() const; // Check the type 618 619 // If the store is to Field memory and the pointer is non-null, we can 620 // zero out the control input. 621 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 622 623 // Compute a new Type for this node. Basically we just do the pre-check, 624 // then call the virtual add() to set the type. 625 virtual const Type* Value(PhaseGVN* phase) const; 626 627 // Check for identity function on memory (Load then Store at same address) 628 virtual Node* Identity(PhaseGVN* phase); 629 630 // Do not match memory edge 631 virtual uint match_edge(uint idx) const; 632 633 virtual const Type *bottom_type() const; // returns Type::MEMORY 634 635 // Map a store opcode to its corresponding own opcode, trivially. 636 virtual int store_Opcode() const { return Opcode(); } 637 638 // have all possible loads of the value stored been optimized away? 639 bool value_never_loaded(PhaseValues* phase) const; 640 641 bool has_reinterpret_variant(const Type* vt); 642 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt); 643 644 MemBarNode* trailing_membar() const; 645 }; 646 647 //------------------------------StoreBNode------------------------------------- 648 // Store byte to memory 649 class StoreBNode : public StoreNode { 650 public: 651 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 652 : StoreNode(c, mem, adr, at, val, mo) {} 653 virtual int Opcode() const; 654 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 655 virtual BasicType memory_type() const { return T_BYTE; } 656 }; 657 658 //------------------------------StoreCNode------------------------------------- 659 // Store char/short to memory 660 class StoreCNode : public StoreNode { 661 public: 662 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 663 : StoreNode(c, mem, adr, at, val, mo) {} 664 virtual int Opcode() const; 665 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 666 virtual BasicType memory_type() const { return T_CHAR; } 667 }; 668 669 //------------------------------StoreINode------------------------------------- 670 // Store int to memory 671 class StoreINode : public StoreNode { 672 public: 673 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 674 : StoreNode(c, mem, adr, at, val, mo) {} 675 virtual int Opcode() const; 676 virtual BasicType memory_type() const { return T_INT; } 677 }; 678 679 //------------------------------StoreLNode------------------------------------- 680 // Store long to memory 681 class StoreLNode : public StoreNode { 682 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 683 virtual bool cmp( const Node &n ) const { 684 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 685 && StoreNode::cmp(n); 686 } 687 virtual uint size_of() const { return sizeof(*this); } 688 const bool _require_atomic_access; // is piecewise store forbidden? 689 690 public: 691 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 692 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 693 virtual int Opcode() const; 694 virtual BasicType memory_type() const { return T_LONG; } 695 bool require_atomic_access() const { return _require_atomic_access; } 696 697 #ifndef PRODUCT 698 virtual void dump_spec(outputStream *st) const { 699 StoreNode::dump_spec(st); 700 if (_require_atomic_access) st->print(" Atomic!"); 701 } 702 #endif 703 }; 704 705 //------------------------------StoreFNode------------------------------------- 706 // Store float to memory 707 class StoreFNode : public StoreNode { 708 public: 709 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 710 : StoreNode(c, mem, adr, at, val, mo) {} 711 virtual int Opcode() const; 712 virtual BasicType memory_type() const { return T_FLOAT; } 713 }; 714 715 //------------------------------StoreDNode------------------------------------- 716 // Store double to memory 717 class StoreDNode : public StoreNode { 718 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 719 virtual bool cmp( const Node &n ) const { 720 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 721 && StoreNode::cmp(n); 722 } 723 virtual uint size_of() const { return sizeof(*this); } 724 const bool _require_atomic_access; // is piecewise store forbidden? 725 public: 726 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 727 MemOrd mo, bool require_atomic_access = false) 728 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 729 virtual int Opcode() const; 730 virtual BasicType memory_type() const { return T_DOUBLE; } 731 bool require_atomic_access() const { return _require_atomic_access; } 732 733 #ifndef PRODUCT 734 virtual void dump_spec(outputStream *st) const { 735 StoreNode::dump_spec(st); 736 if (_require_atomic_access) st->print(" Atomic!"); 737 } 738 #endif 739 740 }; 741 742 //------------------------------StorePNode------------------------------------- 743 // Store pointer to memory 744 class StorePNode : public StoreNode { 745 public: 746 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 747 : StoreNode(c, mem, adr, at, val, mo) {} 748 virtual int Opcode() const; 749 virtual BasicType memory_type() const { return T_ADDRESS; } 750 }; 751 752 //------------------------------StoreNNode------------------------------------- 753 // Store narrow oop to memory 754 class StoreNNode : public StoreNode { 755 public: 756 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 757 : StoreNode(c, mem, adr, at, val, mo) {} 758 virtual int Opcode() const; 759 virtual BasicType memory_type() const { return T_NARROWOOP; } 760 }; 761 762 //------------------------------StoreNKlassNode-------------------------------------- 763 // Store narrow klass to memory 764 class StoreNKlassNode : public StoreNNode { 765 public: 766 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 767 : StoreNNode(c, mem, adr, at, val, mo) {} 768 virtual int Opcode() const; 769 virtual BasicType memory_type() const { return T_NARROWKLASS; } 770 }; 771 772 //------------------------------StoreCMNode----------------------------------- 773 // Store card-mark byte to memory for CM 774 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 775 // Preceding equivalent StoreCMs may be eliminated. 776 class StoreCMNode : public StoreNode { 777 private: 778 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 779 virtual bool cmp( const Node &n ) const { 780 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 781 && StoreNode::cmp(n); 782 } 783 virtual uint size_of() const { return sizeof(*this); } 784 int _oop_alias_idx; // The alias_idx of OopStore 785 786 public: 787 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 788 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 789 _oop_alias_idx(oop_alias_idx) { 790 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 791 _oop_alias_idx == Compile::AliasIdxBot && !Compile::current()->do_aliasing(), 792 "bad oop alias idx"); 793 } 794 virtual int Opcode() const; 795 virtual Node* Identity(PhaseGVN* phase); 796 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 797 virtual const Type* Value(PhaseGVN* phase) const; 798 virtual BasicType memory_type() const { return T_VOID; } // unspecific 799 int oop_alias_idx() const { return _oop_alias_idx; } 800 }; 801 802 //------------------------------SCMemProjNode--------------------------------------- 803 // This class defines a projection of the memory state of a store conditional node. 804 // These nodes return a value, but also update memory. 805 class SCMemProjNode : public ProjNode { 806 public: 807 enum {SCMEMPROJCON = (uint)-2}; 808 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 809 virtual int Opcode() const; 810 virtual bool is_CFG() const { return false; } 811 virtual const Type *bottom_type() const {return Type::MEMORY;} 812 virtual const TypePtr *adr_type() const { 813 Node* ctrl = in(0); 814 if (ctrl == nullptr) return nullptr; // node is dead 815 return ctrl->in(MemNode::Memory)->adr_type(); 816 } 817 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 818 virtual const Type* Value(PhaseGVN* phase) const; 819 #ifndef PRODUCT 820 virtual void dump_spec(outputStream *st) const {}; 821 #endif 822 }; 823 824 //------------------------------LoadStoreNode--------------------------- 825 // Note: is_Mem() method returns 'true' for this class. 826 class LoadStoreNode : public Node { 827 private: 828 const Type* const _type; // What kind of value is loaded? 829 const TypePtr* _adr_type; // What kind of memory is being addressed? 830 uint8_t _barrier_data; // Bit field with barrier information 831 virtual uint size_of() const; // Size is bigger 832 public: 833 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 834 virtual bool depends_only_on_test() const { return false; } 835 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 836 837 virtual const Type *bottom_type() const { return _type; } 838 virtual uint ideal_reg() const; 839 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 840 virtual const Type* Value(PhaseGVN* phase) const; 841 842 bool result_not_used() const; 843 MemBarNode* trailing_membar() const; 844 845 uint8_t barrier_data() { return _barrier_data; } 846 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 847 }; 848 849 class LoadStoreConditionalNode : public LoadStoreNode { 850 public: 851 enum { 852 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 853 }; 854 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 855 virtual const Type* Value(PhaseGVN* phase) const; 856 }; 857 858 class CompareAndSwapNode : public LoadStoreConditionalNode { 859 private: 860 const MemNode::MemOrd _mem_ord; 861 public: 862 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 863 MemNode::MemOrd order() const { 864 return _mem_ord; 865 } 866 virtual uint size_of() const { return sizeof(*this); } 867 }; 868 869 class CompareAndExchangeNode : public LoadStoreNode { 870 private: 871 const MemNode::MemOrd _mem_ord; 872 public: 873 enum { 874 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 875 }; 876 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 877 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 878 init_req(ExpectedIn, ex ); 879 } 880 881 MemNode::MemOrd order() const { 882 return _mem_ord; 883 } 884 virtual uint size_of() const { return sizeof(*this); } 885 }; 886 887 //------------------------------CompareAndSwapBNode--------------------------- 888 class CompareAndSwapBNode : public CompareAndSwapNode { 889 public: 890 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 891 virtual int Opcode() const; 892 }; 893 894 //------------------------------CompareAndSwapSNode--------------------------- 895 class CompareAndSwapSNode : public CompareAndSwapNode { 896 public: 897 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 898 virtual int Opcode() const; 899 }; 900 901 //------------------------------CompareAndSwapINode--------------------------- 902 class CompareAndSwapINode : public CompareAndSwapNode { 903 public: 904 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 905 virtual int Opcode() const; 906 }; 907 908 //------------------------------CompareAndSwapLNode--------------------------- 909 class CompareAndSwapLNode : public CompareAndSwapNode { 910 public: 911 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 912 virtual int Opcode() const; 913 }; 914 915 //------------------------------CompareAndSwapPNode--------------------------- 916 class CompareAndSwapPNode : public CompareAndSwapNode { 917 public: 918 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 919 virtual int Opcode() const; 920 }; 921 922 //------------------------------CompareAndSwapNNode--------------------------- 923 class CompareAndSwapNNode : public CompareAndSwapNode { 924 public: 925 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 926 virtual int Opcode() const; 927 }; 928 929 //------------------------------WeakCompareAndSwapBNode--------------------------- 930 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 931 public: 932 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 933 virtual int Opcode() const; 934 }; 935 936 //------------------------------WeakCompareAndSwapSNode--------------------------- 937 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 938 public: 939 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 940 virtual int Opcode() const; 941 }; 942 943 //------------------------------WeakCompareAndSwapINode--------------------------- 944 class WeakCompareAndSwapINode : public CompareAndSwapNode { 945 public: 946 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 947 virtual int Opcode() const; 948 }; 949 950 //------------------------------WeakCompareAndSwapLNode--------------------------- 951 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 952 public: 953 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 954 virtual int Opcode() const; 955 }; 956 957 //------------------------------WeakCompareAndSwapPNode--------------------------- 958 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 959 public: 960 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 961 virtual int Opcode() const; 962 }; 963 964 //------------------------------WeakCompareAndSwapNNode--------------------------- 965 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 966 public: 967 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 968 virtual int Opcode() const; 969 }; 970 971 //------------------------------CompareAndExchangeBNode--------------------------- 972 class CompareAndExchangeBNode : public CompareAndExchangeNode { 973 public: 974 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 975 virtual int Opcode() const; 976 }; 977 978 979 //------------------------------CompareAndExchangeSNode--------------------------- 980 class CompareAndExchangeSNode : public CompareAndExchangeNode { 981 public: 982 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 983 virtual int Opcode() const; 984 }; 985 986 //------------------------------CompareAndExchangeLNode--------------------------- 987 class CompareAndExchangeLNode : public CompareAndExchangeNode { 988 public: 989 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 990 virtual int Opcode() const; 991 }; 992 993 994 //------------------------------CompareAndExchangeINode--------------------------- 995 class CompareAndExchangeINode : public CompareAndExchangeNode { 996 public: 997 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 998 virtual int Opcode() const; 999 }; 1000 1001 1002 //------------------------------CompareAndExchangePNode--------------------------- 1003 class CompareAndExchangePNode : public CompareAndExchangeNode { 1004 public: 1005 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1006 virtual int Opcode() const; 1007 }; 1008 1009 //------------------------------CompareAndExchangeNNode--------------------------- 1010 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1011 public: 1012 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1013 virtual int Opcode() const; 1014 }; 1015 1016 //------------------------------GetAndAddBNode--------------------------- 1017 class GetAndAddBNode : public LoadStoreNode { 1018 public: 1019 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1020 virtual int Opcode() const; 1021 }; 1022 1023 //------------------------------GetAndAddSNode--------------------------- 1024 class GetAndAddSNode : public LoadStoreNode { 1025 public: 1026 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1027 virtual int Opcode() const; 1028 }; 1029 1030 //------------------------------GetAndAddINode--------------------------- 1031 class GetAndAddINode : public LoadStoreNode { 1032 public: 1033 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1034 virtual int Opcode() const; 1035 }; 1036 1037 //------------------------------GetAndAddLNode--------------------------- 1038 class GetAndAddLNode : public LoadStoreNode { 1039 public: 1040 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1041 virtual int Opcode() const; 1042 }; 1043 1044 //------------------------------GetAndSetBNode--------------------------- 1045 class GetAndSetBNode : public LoadStoreNode { 1046 public: 1047 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1048 virtual int Opcode() const; 1049 }; 1050 1051 //------------------------------GetAndSetSNode--------------------------- 1052 class GetAndSetSNode : public LoadStoreNode { 1053 public: 1054 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1055 virtual int Opcode() const; 1056 }; 1057 1058 //------------------------------GetAndSetINode--------------------------- 1059 class GetAndSetINode : public LoadStoreNode { 1060 public: 1061 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1062 virtual int Opcode() const; 1063 }; 1064 1065 //------------------------------GetAndSetLNode--------------------------- 1066 class GetAndSetLNode : public LoadStoreNode { 1067 public: 1068 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1069 virtual int Opcode() const; 1070 }; 1071 1072 //------------------------------GetAndSetPNode--------------------------- 1073 class GetAndSetPNode : public LoadStoreNode { 1074 public: 1075 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1076 virtual int Opcode() const; 1077 }; 1078 1079 //------------------------------GetAndSetNNode--------------------------- 1080 class GetAndSetNNode : public LoadStoreNode { 1081 public: 1082 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1083 virtual int Opcode() const; 1084 }; 1085 1086 //------------------------------ClearArray------------------------------------- 1087 class ClearArrayNode: public Node { 1088 private: 1089 bool _is_large; 1090 public: 1091 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1092 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1093 init_class_id(Class_ClearArray); 1094 } 1095 virtual int Opcode() const; 1096 virtual const Type *bottom_type() const { return Type::MEMORY; } 1097 // ClearArray modifies array elements, and so affects only the 1098 // array memory addressed by the bottom_type of its base address. 1099 virtual const class TypePtr *adr_type() const; 1100 virtual Node* Identity(PhaseGVN* phase); 1101 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1102 virtual uint match_edge(uint idx) const; 1103 bool is_large() const { return _is_large; } 1104 1105 // Clear the given area of an object or array. 1106 // The start offset must always be aligned mod BytesPerInt. 1107 // The end offset must always be aligned mod BytesPerLong. 1108 // Return the new memory. 1109 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1110 intptr_t start_offset, 1111 intptr_t end_offset, 1112 PhaseGVN* phase); 1113 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1114 intptr_t start_offset, 1115 Node* end_offset, 1116 PhaseGVN* phase); 1117 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1118 Node* start_offset, 1119 Node* end_offset, 1120 PhaseGVN* phase); 1121 // Return allocation input memory edge if it is different instance 1122 // or itself if it is the one we are looking for. 1123 static bool step_through(Node** np, uint instance_id, PhaseValues* phase); 1124 }; 1125 1126 //------------------------------MemBar----------------------------------------- 1127 // There are different flavors of Memory Barriers to match the Java Memory 1128 // Model. Monitor-enter and volatile-load act as Acquires: no following ref 1129 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1130 // volatile-load. Monitor-exit and volatile-store act as Release: no 1131 // preceding ref can be moved to after them. We insert a MemBar-Release 1132 // before a FastUnlock or volatile-store. All volatiles need to be 1133 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1134 // separate it from any following volatile-load. 1135 class MemBarNode: public MultiNode { 1136 virtual uint hash() const ; // { return NO_HASH; } 1137 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1138 1139 virtual uint size_of() const { return sizeof(*this); } 1140 // Memory type this node is serializing. Usually either rawptr or bottom. 1141 const TypePtr* _adr_type; 1142 1143 // How is this membar related to a nearby memory access? 1144 enum { 1145 Standalone, 1146 TrailingLoad, 1147 TrailingStore, 1148 LeadingStore, 1149 TrailingLoadStore, 1150 LeadingLoadStore, 1151 TrailingPartialArrayCopy 1152 } _kind; 1153 1154 #ifdef ASSERT 1155 uint _pair_idx; 1156 #endif 1157 1158 public: 1159 enum { 1160 Precedent = TypeFunc::Parms // optional edge to force precedence 1161 }; 1162 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1163 virtual int Opcode() const = 0; 1164 virtual const class TypePtr *adr_type() const { return _adr_type; } 1165 virtual const Type* Value(PhaseGVN* phase) const; 1166 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1167 virtual uint match_edge(uint idx) const { return 0; } 1168 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1169 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1170 // Factory method. Builds a wide or narrow membar. 1171 // Optional 'precedent' becomes an extra edge if not null. 1172 static MemBarNode* make(Compile* C, int opcode, 1173 int alias_idx = Compile::AliasIdxBot, 1174 Node* precedent = nullptr); 1175 1176 MemBarNode* trailing_membar() const; 1177 MemBarNode* leading_membar() const; 1178 1179 void set_trailing_load() { _kind = TrailingLoad; } 1180 bool trailing_load() const { return _kind == TrailingLoad; } 1181 bool trailing_store() const { return _kind == TrailingStore; } 1182 bool leading_store() const { return _kind == LeadingStore; } 1183 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1184 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1185 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1186 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1187 bool standalone() const { return _kind == Standalone; } 1188 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; } 1189 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; } 1190 1191 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1192 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1193 1194 void remove(PhaseIterGVN *igvn); 1195 }; 1196 1197 // "Acquire" - no following ref can move before (but earlier refs can 1198 // follow, like an early Load stalled in cache). Requires multi-cpu 1199 // visibility. Inserted after a volatile load. 1200 class MemBarAcquireNode: public MemBarNode { 1201 public: 1202 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1203 : MemBarNode(C, alias_idx, precedent) {} 1204 virtual int Opcode() const; 1205 }; 1206 1207 // "Acquire" - no following ref can move before (but earlier refs can 1208 // follow, like an early Load stalled in cache). Requires multi-cpu 1209 // visibility. Inserted independent of any load, as required 1210 // for intrinsic Unsafe.loadFence(). 1211 class LoadFenceNode: public MemBarNode { 1212 public: 1213 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1214 : MemBarNode(C, alias_idx, precedent) {} 1215 virtual int Opcode() const; 1216 }; 1217 1218 // "Release" - no earlier ref can move after (but later refs can move 1219 // up, like a speculative pipelined cache-hitting Load). Requires 1220 // multi-cpu visibility. Inserted before a volatile store. 1221 class MemBarReleaseNode: public MemBarNode { 1222 public: 1223 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1224 : MemBarNode(C, alias_idx, precedent) {} 1225 virtual int Opcode() const; 1226 }; 1227 1228 // "Release" - no earlier ref can move after (but later refs can move 1229 // up, like a speculative pipelined cache-hitting Load). Requires 1230 // multi-cpu visibility. Inserted independent of any store, as required 1231 // for intrinsic Unsafe.storeFence(). 1232 class StoreFenceNode: public MemBarNode { 1233 public: 1234 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1235 : MemBarNode(C, alias_idx, precedent) {} 1236 virtual int Opcode() const; 1237 }; 1238 1239 // "Acquire" - no following ref can move before (but earlier refs can 1240 // follow, like an early Load stalled in cache). Requires multi-cpu 1241 // visibility. Inserted after a FastLock. 1242 class MemBarAcquireLockNode: public MemBarNode { 1243 public: 1244 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1245 : MemBarNode(C, alias_idx, precedent) {} 1246 virtual int Opcode() const; 1247 }; 1248 1249 // "Release" - no earlier ref can move after (but later refs can move 1250 // up, like a speculative pipelined cache-hitting Load). Requires 1251 // multi-cpu visibility. Inserted before a FastUnLock. 1252 class MemBarReleaseLockNode: public MemBarNode { 1253 public: 1254 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1255 : MemBarNode(C, alias_idx, precedent) {} 1256 virtual int Opcode() const; 1257 }; 1258 1259 class MemBarStoreStoreNode: public MemBarNode { 1260 public: 1261 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1262 : MemBarNode(C, alias_idx, precedent) { 1263 init_class_id(Class_MemBarStoreStore); 1264 } 1265 virtual int Opcode() const; 1266 }; 1267 1268 class StoreStoreFenceNode: public MemBarNode { 1269 public: 1270 StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1271 : MemBarNode(C, alias_idx, precedent) {} 1272 virtual int Opcode() const; 1273 }; 1274 1275 // Ordering between a volatile store and a following volatile load. 1276 // Requires multi-CPU visibility? 1277 class MemBarVolatileNode: public MemBarNode { 1278 public: 1279 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1280 : MemBarNode(C, alias_idx, precedent) {} 1281 virtual int Opcode() const; 1282 }; 1283 1284 // Ordering within the same CPU. Used to order unsafe memory references 1285 // inside the compiler when we lack alias info. Not needed "outside" the 1286 // compiler because the CPU does all the ordering for us. 1287 class MemBarCPUOrderNode: public MemBarNode { 1288 public: 1289 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1290 : MemBarNode(C, alias_idx, precedent) {} 1291 virtual int Opcode() const; 1292 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1293 }; 1294 1295 class OnSpinWaitNode: public MemBarNode { 1296 public: 1297 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1298 : MemBarNode(C, alias_idx, precedent) {} 1299 virtual int Opcode() const; 1300 }; 1301 1302 // Isolation of object setup after an AllocateNode and before next safepoint. 1303 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1304 class InitializeNode: public MemBarNode { 1305 friend class AllocateNode; 1306 1307 enum { 1308 Incomplete = 0, 1309 Complete = 1, 1310 WithArraycopy = 2 1311 }; 1312 int _is_complete; 1313 1314 bool _does_not_escape; 1315 1316 public: 1317 enum { 1318 Control = TypeFunc::Control, 1319 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1320 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1321 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1322 }; 1323 1324 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1325 virtual int Opcode() const; 1326 virtual uint size_of() const { return sizeof(*this); } 1327 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1328 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1329 1330 // Manage incoming memory edges via a MergeMem on in(Memory): 1331 Node* memory(uint alias_idx); 1332 1333 // The raw memory edge coming directly from the Allocation. 1334 // The contents of this memory are *always* all-zero-bits. 1335 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1336 1337 // Return the corresponding allocation for this initialization (or null if none). 1338 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1339 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1340 AllocateNode* allocation(); 1341 1342 // Anything other than zeroing in this init? 1343 bool is_non_zero(); 1344 1345 // An InitializeNode must completed before macro expansion is done. 1346 // Completion requires that the AllocateNode must be followed by 1347 // initialization of the new memory to zero, then to any initializers. 1348 bool is_complete() { return _is_complete != Incomplete; } 1349 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1350 1351 // Mark complete. (Must not yet be complete.) 1352 void set_complete(PhaseGVN* phase); 1353 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1354 1355 bool does_not_escape() { return _does_not_escape; } 1356 void set_does_not_escape() { _does_not_escape = true; } 1357 1358 #ifdef ASSERT 1359 // ensure all non-degenerate stores are ordered and non-overlapping 1360 bool stores_are_sane(PhaseValues* phase); 1361 #endif //ASSERT 1362 1363 // See if this store can be captured; return offset where it initializes. 1364 // Return 0 if the store cannot be moved (any sort of problem). 1365 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape); 1366 1367 // Capture another store; reformat it to write my internal raw memory. 1368 // Return the captured copy, else null if there is some sort of problem. 1369 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape); 1370 1371 // Find captured store which corresponds to the range [start..start+size). 1372 // Return my own memory projection (meaning the initial zero bits) 1373 // if there is no such store. Return null if there is a problem. 1374 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase); 1375 1376 // Called when the associated AllocateNode is expanded into CFG. 1377 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1378 intptr_t header_size, Node* size_in_bytes, 1379 PhaseIterGVN* phase); 1380 1381 private: 1382 void remove_extra_zeroes(); 1383 1384 // Find out where a captured store should be placed (or already is placed). 1385 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1386 PhaseValues* phase); 1387 1388 static intptr_t get_store_offset(Node* st, PhaseValues* phase); 1389 1390 Node* make_raw_address(intptr_t offset, PhaseGVN* phase); 1391 1392 bool detect_init_independence(Node* value, PhaseGVN* phase); 1393 1394 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1395 PhaseGVN* phase); 1396 1397 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1398 }; 1399 1400 //------------------------------MergeMem--------------------------------------- 1401 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1402 class MergeMemNode: public Node { 1403 virtual uint hash() const ; // { return NO_HASH; } 1404 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1405 friend class MergeMemStream; 1406 MergeMemNode(Node* def); // clients use MergeMemNode::make 1407 1408 public: 1409 // If the input is a whole memory state, clone it with all its slices intact. 1410 // Otherwise, make a new memory state with just that base memory input. 1411 // In either case, the result is a newly created MergeMem. 1412 static MergeMemNode* make(Node* base_memory); 1413 1414 virtual int Opcode() const; 1415 virtual Node* Identity(PhaseGVN* phase); 1416 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1417 virtual uint ideal_reg() const { return NotAMachineReg; } 1418 virtual uint match_edge(uint idx) const { return 0; } 1419 virtual const RegMask &out_RegMask() const; 1420 virtual const Type *bottom_type() const { return Type::MEMORY; } 1421 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1422 // sparse accessors 1423 // Fetch the previously stored "set_memory_at", or else the base memory. 1424 // (Caller should clone it if it is a phi-nest.) 1425 Node* memory_at(uint alias_idx) const; 1426 // set the memory, regardless of its previous value 1427 void set_memory_at(uint alias_idx, Node* n); 1428 // the "base" is the memory that provides the non-finite support 1429 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1430 // warning: setting the base can implicitly set any of the other slices too 1431 void set_base_memory(Node* def); 1432 // sentinel value which denotes a copy of the base memory: 1433 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1434 static Node* make_empty_memory(); // where the sentinel comes from 1435 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1436 // hook for the iterator, to perform any necessary setup 1437 void iteration_setup(const MergeMemNode* other = nullptr); 1438 // push sentinels until I am at least as long as the other (semantic no-op) 1439 void grow_to_match(const MergeMemNode* other); 1440 bool verify_sparse() const PRODUCT_RETURN0; 1441 #ifndef PRODUCT 1442 virtual void dump_spec(outputStream *st) const; 1443 #endif 1444 }; 1445 1446 class MergeMemStream : public StackObj { 1447 private: 1448 MergeMemNode* _mm; 1449 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1450 Node* _mm_base; // loop-invariant base memory of _mm 1451 int _idx; 1452 int _cnt; 1453 Node* _mem; 1454 Node* _mem2; 1455 int _cnt2; 1456 1457 void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) { 1458 // subsume_node will break sparseness at times, whenever a memory slice 1459 // folds down to a copy of the base ("fat") memory. In such a case, 1460 // the raw edge will update to base, although it should be top. 1461 // This iterator will recognize either top or base_memory as an 1462 // "empty" slice. See is_empty, is_empty2, and next below. 1463 // 1464 // The sparseness property is repaired in MergeMemNode::Ideal. 1465 // As long as access to a MergeMem goes through this iterator 1466 // or the memory_at accessor, flaws in the sparseness will 1467 // never be observed. 1468 // 1469 // Also, iteration_setup repairs sparseness. 1470 assert(mm->verify_sparse(), "please, no dups of base"); 1471 assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base"); 1472 1473 _mm = mm; 1474 _mm_base = mm->base_memory(); 1475 _mm2 = mm2; 1476 _cnt = mm->req(); 1477 _idx = Compile::AliasIdxBot-1; // start at the base memory 1478 _mem = nullptr; 1479 _mem2 = nullptr; 1480 } 1481 1482 #ifdef ASSERT 1483 Node* check_memory() const { 1484 if (at_base_memory()) 1485 return _mm->base_memory(); 1486 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1487 return _mm->memory_at(_idx); 1488 else 1489 return _mm_base; 1490 } 1491 Node* check_memory2() const { 1492 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1493 } 1494 #endif 1495 1496 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1497 void assert_synch() const { 1498 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1499 "no side-effects except through the stream"); 1500 } 1501 1502 public: 1503 1504 // expected usages: 1505 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1506 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1507 1508 // iterate over one merge 1509 MergeMemStream(MergeMemNode* mm) { 1510 mm->iteration_setup(); 1511 init(mm); 1512 debug_only(_cnt2 = 999); 1513 } 1514 // iterate in parallel over two merges 1515 // only iterates through non-empty elements of mm2 1516 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1517 assert(mm2, "second argument must be a MergeMem also"); 1518 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1519 mm->iteration_setup(mm2); 1520 init(mm, mm2); 1521 _cnt2 = mm2->req(); 1522 } 1523 #ifdef ASSERT 1524 ~MergeMemStream() { 1525 assert_synch(); 1526 } 1527 #endif 1528 1529 MergeMemNode* all_memory() const { 1530 return _mm; 1531 } 1532 Node* base_memory() const { 1533 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1534 return _mm_base; 1535 } 1536 const MergeMemNode* all_memory2() const { 1537 assert(_mm2 != nullptr, ""); 1538 return _mm2; 1539 } 1540 bool at_base_memory() const { 1541 return _idx == Compile::AliasIdxBot; 1542 } 1543 int alias_idx() const { 1544 assert(_mem, "must call next 1st"); 1545 return _idx; 1546 } 1547 1548 const TypePtr* adr_type() const { 1549 return Compile::current()->get_adr_type(alias_idx()); 1550 } 1551 1552 const TypePtr* adr_type(Compile* C) const { 1553 return C->get_adr_type(alias_idx()); 1554 } 1555 bool is_empty() const { 1556 assert(_mem, "must call next 1st"); 1557 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1558 return _mem->is_top(); 1559 } 1560 bool is_empty2() const { 1561 assert(_mem2, "must call next 1st"); 1562 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1563 return _mem2->is_top(); 1564 } 1565 Node* memory() const { 1566 assert(!is_empty(), "must not be empty"); 1567 assert_synch(); 1568 return _mem; 1569 } 1570 // get the current memory, regardless of empty or non-empty status 1571 Node* force_memory() const { 1572 assert(!is_empty() || !at_base_memory(), ""); 1573 // Use _mm_base to defend against updates to _mem->base_memory(). 1574 Node *mem = _mem->is_top() ? _mm_base : _mem; 1575 assert(mem == check_memory(), ""); 1576 return mem; 1577 } 1578 Node* memory2() const { 1579 assert(_mem2 == check_memory2(), ""); 1580 return _mem2; 1581 } 1582 void set_memory(Node* mem) { 1583 if (at_base_memory()) { 1584 // Note that this does not change the invariant _mm_base. 1585 _mm->set_base_memory(mem); 1586 } else { 1587 _mm->set_memory_at(_idx, mem); 1588 } 1589 _mem = mem; 1590 assert_synch(); 1591 } 1592 1593 // Recover from a side effect to the MergeMemNode. 1594 void set_memory() { 1595 _mem = _mm->in(_idx); 1596 } 1597 1598 bool next() { return next(false); } 1599 bool next2() { return next(true); } 1600 1601 bool next_non_empty() { return next_non_empty(false); } 1602 bool next_non_empty2() { return next_non_empty(true); } 1603 // next_non_empty2 can yield states where is_empty() is true 1604 1605 private: 1606 // find the next item, which might be empty 1607 bool next(bool have_mm2) { 1608 assert((_mm2 != nullptr) == have_mm2, "use other next"); 1609 assert_synch(); 1610 if (++_idx < _cnt) { 1611 // Note: This iterator allows _mm to be non-sparse. 1612 // It behaves the same whether _mem is top or base_memory. 1613 _mem = _mm->in(_idx); 1614 if (have_mm2) 1615 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1616 return true; 1617 } 1618 return false; 1619 } 1620 1621 // find the next non-empty item 1622 bool next_non_empty(bool have_mm2) { 1623 while (next(have_mm2)) { 1624 if (!is_empty()) { 1625 // make sure _mem2 is filled in sensibly 1626 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1627 return true; 1628 } else if (have_mm2 && !is_empty2()) { 1629 return true; // is_empty() == true 1630 } 1631 } 1632 return false; 1633 } 1634 }; 1635 1636 // cachewb node for guaranteeing writeback of the cache line at a 1637 // given address to (non-volatile) RAM 1638 class CacheWBNode : public Node { 1639 public: 1640 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1641 virtual int Opcode() const; 1642 virtual uint ideal_reg() const { return NotAMachineReg; } 1643 virtual uint match_edge(uint idx) const { return (idx == 2); } 1644 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1645 virtual const Type *bottom_type() const { return Type::MEMORY; } 1646 }; 1647 1648 // cachewb pre sync node for ensuring that writebacks are serialised 1649 // relative to preceding or following stores 1650 class CacheWBPreSyncNode : public Node { 1651 public: 1652 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1653 virtual int Opcode() const; 1654 virtual uint ideal_reg() const { return NotAMachineReg; } 1655 virtual uint match_edge(uint idx) const { return false; } 1656 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1657 virtual const Type *bottom_type() const { return Type::MEMORY; } 1658 }; 1659 1660 // cachewb pre sync node for ensuring that writebacks are serialised 1661 // relative to preceding or following stores 1662 class CacheWBPostSyncNode : public Node { 1663 public: 1664 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1665 virtual int Opcode() const; 1666 virtual uint ideal_reg() const { return NotAMachineReg; } 1667 virtual uint match_edge(uint idx) const { return false; } 1668 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1669 virtual const Type *bottom_type() const { return Type::MEMORY; } 1670 }; 1671 1672 //------------------------------Prefetch--------------------------------------- 1673 1674 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1675 class PrefetchAllocationNode : public Node { 1676 public: 1677 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1678 virtual int Opcode() const; 1679 virtual uint ideal_reg() const { return NotAMachineReg; } 1680 virtual uint match_edge(uint idx) const { return idx==2; } 1681 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1682 }; 1683 1684 #endif // SHARE_OPTO_MEMNODE_HPP