1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP 26 #define SHARE_VM_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 protected: 46 #ifdef ASSERT 47 const TypePtr* _adr_type; // What kind of memory is being addressed? 48 #endif 49 virtual uint size_of() const; 50 public: 51 enum { Control, // When is it safe to do this load? 52 Memory, // Chunk of memory is being loaded from 53 Address, // Actually address, derived from base 54 ValueIn, // Value to store 55 OopStore // Preceeding oop store, only in StoreCM 56 }; 57 typedef enum { unordered = 0, 58 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 59 release // Store has to release or be preceded by MemBarRelease. 60 } MemOrd; 61 protected: 62 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 63 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { 64 init_class_id(Class_Mem); 65 debug_only(_adr_type=at; adr_type();) 66 } 67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 68 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { 69 init_class_id(Class_Mem); 70 debug_only(_adr_type=at; adr_type();) 71 } 72 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 73 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { 74 init_class_id(Class_Mem); 75 debug_only(_adr_type=at; adr_type();) 76 } 77 78 static bool check_if_adr_maybe_raw(Node* adr); 79 80 public: 81 // Helpers for the optimizer. Documented in memnode.cpp. 82 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 83 Node* p2, AllocateNode* a2, 84 PhaseTransform* phase); 85 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 86 87 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 88 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 89 // This one should probably be a phase-specific function: 90 static bool all_controls_dominate(Node* dom, Node* sub); 91 92 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 93 94 // Shared code for Ideal methods: 95 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 96 97 // Helper function for adr_type() implementations. 98 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 99 100 // Raw access function, to allow copying of adr_type efficiently in 101 // product builds and retain the debug info for debug builds. 102 const TypePtr *raw_adr_type() const { 103 #ifdef ASSERT 104 return _adr_type; 105 #else 106 return 0; 107 #endif 108 } 109 110 #ifdef ASSERT 111 void set_raw_adr_type(const TypePtr *t) { 112 _adr_type = t; 113 } 114 #endif 115 116 // Map a load or store opcode to its corresponding store opcode. 117 // (Return -1 if unknown.) 118 virtual int store_Opcode() const { return -1; } 119 120 // What is the type of the value in memory? (T_VOID mean "unspecified".) 121 virtual BasicType memory_type() const = 0; 122 virtual int memory_size() const { 123 #ifdef ASSERT 124 return type2aelembytes(memory_type(), true); 125 #else 126 return type2aelembytes(memory_type()); 127 #endif 128 } 129 130 // Search through memory states which precede this node (load or store). 131 // Look for an exact match for the address, with no intervening 132 // aliased stores. 133 Node* find_previous_store(PhaseTransform* phase); 134 135 // Can this node (load or store) accurately see a stored value in 136 // the given memory state? (The state may or may not be in(Memory).) 137 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 138 139 void set_unaligned_access() { _unaligned_access = true; } 140 bool is_unaligned_access() const { return _unaligned_access; } 141 void set_mismatched_access() { _mismatched_access = true; } 142 bool is_mismatched_access() const { return _mismatched_access; } 143 144 #ifndef PRODUCT 145 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 146 virtual void dump_spec(outputStream *st) const; 147 #endif 148 }; 149 150 //------------------------------LoadNode--------------------------------------- 151 // Load value; requires Memory and Address 152 class LoadNode : public MemNode { 153 public: 154 // Some loads (from unsafe) should be pinned: they don't depend only 155 // on the dominating test. The boolean field _depends_only_on_test 156 // below records whether that node depends only on the dominating 157 // test. 158 // Methods used to build LoadNodes pass an argument of type enum 159 // ControlDependency instead of a boolean because those methods 160 // typically have multiple boolean parameters with default values: 161 // passing the wrong boolean to one of these parameters by mistake 162 // goes easily unnoticed. Using an enum, the compiler can check that 163 // the type of a value and the type of the parameter match. 164 enum ControlDependency { 165 Pinned, 166 DependsOnlyOnTest 167 }; 168 private: 169 // LoadNode::hash() doesn't take the _depends_only_on_test field 170 // into account: If the graph already has a non-pinned LoadNode and 171 // we add a pinned LoadNode with the same inputs, it's safe for GVN 172 // to replace the pinned LoadNode with the non-pinned LoadNode, 173 // otherwise it wouldn't be safe to have a non pinned LoadNode with 174 // those inputs in the first place. If the graph already has a 175 // pinned LoadNode and we add a non pinned LoadNode with the same 176 // inputs, it's safe (but suboptimal) for GVN to replace the 177 // non-pinned LoadNode by the pinned LoadNode. 178 bool _depends_only_on_test; 179 180 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 181 // loads that can be reordered, and such requiring acquire semantics to 182 // adhere to the Java specification. The required behaviour is stored in 183 // this field. 184 const MemOrd _mo; 185 186 protected: 187 virtual uint cmp(const Node &n) const; 188 virtual uint size_of() const; // Size is bigger 189 // Should LoadNode::Ideal() attempt to remove control edges? 190 virtual bool can_remove_control() const; 191 const Type* const _type; // What kind of value is loaded? 192 public: 193 194 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 195 : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) { 196 init_class_id(Class_Load); 197 } 198 inline bool is_unordered() const { return !is_acquire(); } 199 inline bool is_acquire() const { 200 assert(_mo == unordered || _mo == acquire, "unexpected"); 201 return _mo == acquire; 202 } 203 204 // Polymorphic factory method: 205 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 206 const TypePtr* at, const Type *rt, BasicType bt, 207 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest); 208 209 virtual uint hash() const; // Check the type 210 211 // Handle algebraic identities here. If we have an identity, return the Node 212 // we are equivalent to. We look for Load of a Store. 213 virtual Node *Identity( PhaseTransform *phase ); 214 215 // If the load is from Field memory and the pointer is non-null, it might be possible to 216 // zero out the control input. 217 // If the offset is constant and the base is an object allocation, 218 // try to hook me up to the exact initializing store. 219 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 220 221 // Split instance field load through Phi. 222 Node* split_through_phi(PhaseGVN *phase); 223 224 // Recover original value from boxed values 225 Node *eliminate_autobox(PhaseGVN *phase); 226 227 // Compute a new Type for this node. Basically we just do the pre-check, 228 // then call the virtual add() to set the type. 229 virtual const Type *Value( PhaseTransform *phase ) const; 230 231 // Common methods for LoadKlass and LoadNKlass nodes. 232 const Type *klass_value_common( PhaseTransform *phase ) const; 233 Node *klass_identity_common( PhaseTransform *phase ); 234 235 virtual uint ideal_reg() const; 236 virtual const Type *bottom_type() const; 237 // Following method is copied from TypeNode: 238 void set_type(const Type* t) { 239 assert(t != NULL, "sanity"); 240 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 241 *(const Type**)&_type = t; // cast away const-ness 242 // If this node is in the hash table, make sure it doesn't need a rehash. 243 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 244 } 245 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 246 247 // Do not match memory edge 248 virtual uint match_edge(uint idx) const; 249 250 // Map a load opcode to its corresponding store opcode. 251 virtual int store_Opcode() const = 0; 252 253 // Check if the load's memory input is a Phi node with the same control. 254 bool is_instance_field_load_with_local_phi(Node* ctrl); 255 256 #ifndef PRODUCT 257 virtual void dump_spec(outputStream *st) const; 258 #endif 259 #ifdef ASSERT 260 // Helper function to allow a raw load without control edge for some cases 261 static bool is_immutable_value(Node* adr); 262 #endif 263 264 virtual bool is_g1_marking_load() const { 265 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()); 266 return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal 267 && in(2)->in(3)->is_Con() 268 && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset; 269 } 270 271 virtual bool is_shenandoah_state_load() const { 272 if (!UseShenandoahGC) return false; 273 const int state_offset = in_bytes(JavaThread::gc_state_offset()); 274 return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal 275 && in(2)->in(3)->is_Con() 276 && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; 277 } 278 279 protected: 280 const Type* load_array_final_field(const TypeKlassPtr *tkls, 281 ciKlass* klass) const; 282 // depends_only_on_test is almost always true, and needs to be almost always 283 // true to enable key hoisting & commoning optimizations. However, for the 284 // special case of RawPtr loads from TLS top & end, and other loads performed by 285 // GC barriers, the control edge carries the dependence preventing hoisting past 286 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 287 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 288 // which produce results (new raw memory state) inside of loops preventing all 289 // manner of other optimizations). Basically, it's ugly but so is the alternative. 290 // See comment in macro.cpp, around line 125 expand_allocate_common(). 291 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; } 292 }; 293 294 //------------------------------LoadBNode-------------------------------------- 295 // Load a byte (8bits signed) from memory 296 class LoadBNode : public LoadNode { 297 public: 298 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 299 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 300 virtual int Opcode() const; 301 virtual uint ideal_reg() const { return Op_RegI; } 302 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 303 virtual const Type *Value(PhaseTransform *phase) const; 304 virtual int store_Opcode() const { return Op_StoreB; } 305 virtual BasicType memory_type() const { return T_BYTE; } 306 }; 307 308 //------------------------------LoadUBNode------------------------------------- 309 // Load a unsigned byte (8bits unsigned) from memory 310 class LoadUBNode : public LoadNode { 311 public: 312 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 313 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 314 virtual int Opcode() const; 315 virtual uint ideal_reg() const { return Op_RegI; } 316 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 317 virtual const Type *Value(PhaseTransform *phase) const; 318 virtual int store_Opcode() const { return Op_StoreB; } 319 virtual BasicType memory_type() const { return T_BYTE; } 320 }; 321 322 //------------------------------LoadUSNode------------------------------------- 323 // Load an unsigned short/char (16bits unsigned) from memory 324 class LoadUSNode : public LoadNode { 325 public: 326 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 327 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 328 virtual int Opcode() const; 329 virtual uint ideal_reg() const { return Op_RegI; } 330 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 331 virtual const Type *Value(PhaseTransform *phase) const; 332 virtual int store_Opcode() const { return Op_StoreC; } 333 virtual BasicType memory_type() const { return T_CHAR; } 334 }; 335 336 //------------------------------LoadSNode-------------------------------------- 337 // Load a short (16bits signed) from memory 338 class LoadSNode : public LoadNode { 339 public: 340 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 341 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 342 virtual int Opcode() const; 343 virtual uint ideal_reg() const { return Op_RegI; } 344 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 345 virtual const Type *Value(PhaseTransform *phase) const; 346 virtual int store_Opcode() const { return Op_StoreC; } 347 virtual BasicType memory_type() const { return T_SHORT; } 348 }; 349 350 //------------------------------LoadINode-------------------------------------- 351 // Load an integer from memory 352 class LoadINode : public LoadNode { 353 public: 354 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 355 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 356 virtual int Opcode() const; 357 virtual uint ideal_reg() const { return Op_RegI; } 358 virtual int store_Opcode() const { return Op_StoreI; } 359 virtual BasicType memory_type() const { return T_INT; } 360 }; 361 362 //------------------------------LoadRangeNode---------------------------------- 363 // Load an array length from the array 364 class LoadRangeNode : public LoadINode { 365 public: 366 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 367 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 368 virtual int Opcode() const; 369 virtual const Type *Value( PhaseTransform *phase ) const; 370 virtual Node *Identity( PhaseTransform *phase ); 371 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 372 }; 373 374 //------------------------------LoadLNode-------------------------------------- 375 // Load a long from memory 376 class LoadLNode : public LoadNode { 377 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 378 virtual uint cmp( const Node &n ) const { 379 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 380 && LoadNode::cmp(n); 381 } 382 virtual uint size_of() const { return sizeof(*this); } 383 const bool _require_atomic_access; // is piecewise load forbidden? 384 385 public: 386 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 387 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 388 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 389 virtual int Opcode() const; 390 virtual uint ideal_reg() const { return Op_RegL; } 391 virtual int store_Opcode() const { return Op_StoreL; } 392 virtual BasicType memory_type() const { return T_LONG; } 393 bool require_atomic_access() const { return _require_atomic_access; } 394 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 395 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest); 396 #ifndef PRODUCT 397 virtual void dump_spec(outputStream *st) const { 398 LoadNode::dump_spec(st); 399 if (_require_atomic_access) st->print(" Atomic!"); 400 } 401 #endif 402 }; 403 404 //------------------------------LoadL_unalignedNode---------------------------- 405 // Load a long from unaligned memory 406 class LoadL_unalignedNode : public LoadLNode { 407 public: 408 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 409 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 410 virtual int Opcode() const; 411 }; 412 413 //------------------------------LoadFNode-------------------------------------- 414 // Load a float (64 bits) from memory 415 class LoadFNode : public LoadNode { 416 public: 417 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 418 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 419 virtual int Opcode() const; 420 virtual uint ideal_reg() const { return Op_RegF; } 421 virtual int store_Opcode() const { return Op_StoreF; } 422 virtual BasicType memory_type() const { return T_FLOAT; } 423 }; 424 425 //------------------------------LoadDNode-------------------------------------- 426 // Load a double (64 bits) from memory 427 class LoadDNode : public LoadNode { 428 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 429 virtual uint cmp( const Node &n ) const { 430 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 431 && LoadNode::cmp(n); 432 } 433 virtual uint size_of() const { return sizeof(*this); } 434 const bool _require_atomic_access; // is piecewise load forbidden? 435 436 public: 437 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 438 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 439 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 440 virtual int Opcode() const; 441 virtual uint ideal_reg() const { return Op_RegD; } 442 virtual int store_Opcode() const { return Op_StoreD; } 443 virtual BasicType memory_type() const { return T_DOUBLE; } 444 bool require_atomic_access() const { return _require_atomic_access; } 445 static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 446 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest); 447 #ifndef PRODUCT 448 virtual void dump_spec(outputStream *st) const { 449 LoadNode::dump_spec(st); 450 if (_require_atomic_access) st->print(" Atomic!"); 451 } 452 #endif 453 }; 454 455 //------------------------------LoadD_unalignedNode---------------------------- 456 // Load a double from unaligned memory 457 class LoadD_unalignedNode : public LoadDNode { 458 public: 459 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 460 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 461 virtual int Opcode() const; 462 }; 463 464 //------------------------------LoadPNode-------------------------------------- 465 // Load a pointer from memory (either object or array) 466 class LoadPNode : public LoadNode { 467 public: 468 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 469 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 470 virtual int Opcode() const; 471 virtual uint ideal_reg() const { return Op_RegP; } 472 virtual int store_Opcode() const { return Op_StoreP; } 473 virtual BasicType memory_type() const { return T_ADDRESS; } 474 }; 475 476 477 //------------------------------LoadNNode-------------------------------------- 478 // Load a narrow oop from memory (either object or array) 479 class LoadNNode : public LoadNode { 480 public: 481 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 482 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 483 virtual int Opcode() const; 484 virtual uint ideal_reg() const { return Op_RegN; } 485 virtual int store_Opcode() const { return Op_StoreN; } 486 virtual BasicType memory_type() const { return T_NARROWOOP; } 487 }; 488 489 //------------------------------LoadKlassNode---------------------------------- 490 // Load a Klass from an object 491 class LoadKlassNode : public LoadPNode { 492 protected: 493 // In most cases, LoadKlassNode does not have the control input set. If the control 494 // input is set, it must not be removed (by LoadNode::Ideal()). 495 virtual bool can_remove_control() const; 496 public: 497 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 498 : LoadPNode(c, mem, adr, at, tk, mo) {} 499 virtual int Opcode() const; 500 virtual const Type *Value( PhaseTransform *phase ) const; 501 virtual Node *Identity( PhaseTransform *phase ); 502 virtual bool depends_only_on_test() const { return true; } 503 504 // Polymorphic factory method: 505 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 506 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 507 }; 508 509 //------------------------------LoadNKlassNode--------------------------------- 510 // Load a narrow Klass from an object. 511 class LoadNKlassNode : public LoadNNode { 512 public: 513 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 514 : LoadNNode(c, mem, adr, at, tk, mo) {} 515 virtual int Opcode() const; 516 virtual uint ideal_reg() const { return Op_RegN; } 517 virtual int store_Opcode() const { return Op_StoreNKlass; } 518 virtual BasicType memory_type() const { return T_NARROWKLASS; } 519 520 virtual const Type *Value( PhaseTransform *phase ) const; 521 virtual Node *Identity( PhaseTransform *phase ); 522 virtual bool depends_only_on_test() const { return true; } 523 }; 524 525 526 //------------------------------StoreNode-------------------------------------- 527 // Store value; requires Store, Address and Value 528 class StoreNode : public MemNode { 529 private: 530 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 531 // stores that can be reordered, and such requiring release semantics to 532 // adhere to the Java specification. The required behaviour is stored in 533 // this field. 534 const MemOrd _mo; 535 // Needed for proper cloning. 536 virtual uint size_of() const { return sizeof(*this); } 537 protected: 538 virtual uint cmp( const Node &n ) const; 539 virtual bool depends_only_on_test() const { return false; } 540 541 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 542 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 543 544 public: 545 // We must ensure that stores of object references will be visible 546 // only after the object's initialization. So the callers of this 547 // procedure must indicate that the store requires `release' 548 // semantics, if the stored value is an object reference that might 549 // point to a new object and may become externally visible. 550 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 551 : MemNode(c, mem, adr, at, val), _mo(mo) { 552 init_class_id(Class_Store); 553 } 554 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 555 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 556 init_class_id(Class_Store); 557 } 558 559 inline bool is_unordered() const { return !is_release(); } 560 inline bool is_release() const { 561 assert((_mo == unordered || _mo == release), "unexpected"); 562 return _mo == release; 563 } 564 565 // Conservatively release stores of object references in order to 566 // ensure visibility of object initialization. 567 static inline MemOrd release_if_reference(const BasicType t) { 568 const MemOrd mo = (t == T_ARRAY || 569 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 570 t == T_OBJECT) ? release : unordered; 571 return mo; 572 } 573 574 // Polymorphic factory method 575 // 576 // We must ensure that stores of object references will be visible 577 // only after the object's initialization. So the callers of this 578 // procedure must indicate that the store requires `release' 579 // semantics, if the stored value is an object reference that might 580 // point to a new object and may become externally visible. 581 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 582 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 583 584 virtual uint hash() const; // Check the type 585 586 // If the store is to Field memory and the pointer is non-null, we can 587 // zero out the control input. 588 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 589 590 // Compute a new Type for this node. Basically we just do the pre-check, 591 // then call the virtual add() to set the type. 592 virtual const Type *Value( PhaseTransform *phase ) const; 593 594 // Check for identity function on memory (Load then Store at same address) 595 virtual Node *Identity( PhaseTransform *phase ); 596 597 // Do not match memory edge 598 virtual uint match_edge(uint idx) const; 599 600 virtual const Type *bottom_type() const; // returns Type::MEMORY 601 602 // Map a store opcode to its corresponding own opcode, trivially. 603 virtual int store_Opcode() const { return Opcode(); } 604 605 // have all possible loads of the value stored been optimized away? 606 bool value_never_loaded(PhaseTransform *phase) const; 607 608 MemBarNode* trailing_membar() const; 609 }; 610 611 //------------------------------StoreBNode------------------------------------- 612 // Store byte to memory 613 class StoreBNode : public StoreNode { 614 public: 615 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 616 : StoreNode(c, mem, adr, at, val, mo) {} 617 virtual int Opcode() const; 618 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 619 virtual BasicType memory_type() const { return T_BYTE; } 620 }; 621 622 //------------------------------StoreCNode------------------------------------- 623 // Store char/short to memory 624 class StoreCNode : public StoreNode { 625 public: 626 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 627 : StoreNode(c, mem, adr, at, val, mo) {} 628 virtual int Opcode() const; 629 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 630 virtual BasicType memory_type() const { return T_CHAR; } 631 }; 632 633 //------------------------------StoreINode------------------------------------- 634 // Store int to memory 635 class StoreINode : public StoreNode { 636 public: 637 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 638 : StoreNode(c, mem, adr, at, val, mo) {} 639 virtual int Opcode() const; 640 virtual BasicType memory_type() const { return T_INT; } 641 }; 642 643 //------------------------------StoreLNode------------------------------------- 644 // Store long to memory 645 class StoreLNode : public StoreNode { 646 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 647 virtual uint cmp( const Node &n ) const { 648 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 649 && StoreNode::cmp(n); 650 } 651 virtual uint size_of() const { return sizeof(*this); } 652 const bool _require_atomic_access; // is piecewise store forbidden? 653 654 public: 655 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 656 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 657 virtual int Opcode() const; 658 virtual BasicType memory_type() const { return T_LONG; } 659 bool require_atomic_access() const { return _require_atomic_access; } 660 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 661 #ifndef PRODUCT 662 virtual void dump_spec(outputStream *st) const { 663 StoreNode::dump_spec(st); 664 if (_require_atomic_access) st->print(" Atomic!"); 665 } 666 #endif 667 }; 668 669 //------------------------------StoreFNode------------------------------------- 670 // Store float to memory 671 class StoreFNode : public StoreNode { 672 public: 673 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 674 : StoreNode(c, mem, adr, at, val, mo) {} 675 virtual int Opcode() const; 676 virtual BasicType memory_type() const { return T_FLOAT; } 677 }; 678 679 //------------------------------StoreDNode------------------------------------- 680 // Store double to memory 681 class StoreDNode : public StoreNode { 682 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 683 virtual uint cmp( const Node &n ) const { 684 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 685 && StoreNode::cmp(n); 686 } 687 virtual uint size_of() const { return sizeof(*this); } 688 const bool _require_atomic_access; // is piecewise store forbidden? 689 public: 690 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 691 MemOrd mo, bool require_atomic_access = false) 692 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 693 virtual int Opcode() const; 694 virtual BasicType memory_type() const { return T_DOUBLE; } 695 bool require_atomic_access() const { return _require_atomic_access; } 696 static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 697 #ifndef PRODUCT 698 virtual void dump_spec(outputStream *st) const { 699 StoreNode::dump_spec(st); 700 if (_require_atomic_access) st->print(" Atomic!"); 701 } 702 #endif 703 704 }; 705 706 //------------------------------StorePNode------------------------------------- 707 // Store pointer to memory 708 class StorePNode : public StoreNode { 709 public: 710 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 711 : StoreNode(c, mem, adr, at, val, mo) {} 712 virtual int Opcode() const; 713 virtual BasicType memory_type() const { return T_ADDRESS; } 714 }; 715 716 //------------------------------StoreNNode------------------------------------- 717 // Store narrow oop to memory 718 class StoreNNode : public StoreNode { 719 public: 720 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 721 : StoreNode(c, mem, adr, at, val, mo) {} 722 virtual int Opcode() const; 723 virtual BasicType memory_type() const { return T_NARROWOOP; } 724 }; 725 726 //------------------------------StoreNKlassNode-------------------------------------- 727 // Store narrow klass to memory 728 class StoreNKlassNode : public StoreNNode { 729 public: 730 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 731 : StoreNNode(c, mem, adr, at, val, mo) {} 732 virtual int Opcode() const; 733 virtual BasicType memory_type() const { return T_NARROWKLASS; } 734 }; 735 736 //------------------------------StoreCMNode----------------------------------- 737 // Store card-mark byte to memory for CM 738 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 739 // Preceeding equivalent StoreCMs may be eliminated. 740 class StoreCMNode : public StoreNode { 741 private: 742 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 743 virtual uint cmp( const Node &n ) const { 744 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 745 && StoreNode::cmp(n); 746 } 747 virtual uint size_of() const { return sizeof(*this); } 748 int _oop_alias_idx; // The alias_idx of OopStore 749 750 public: 751 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 752 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 753 _oop_alias_idx(oop_alias_idx) { 754 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 755 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 756 "bad oop alias idx"); 757 } 758 virtual int Opcode() const; 759 virtual Node *Identity( PhaseTransform *phase ); 760 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 761 virtual const Type *Value( PhaseTransform *phase ) const; 762 virtual BasicType memory_type() const { return T_VOID; } // unspecific 763 int oop_alias_idx() const { return _oop_alias_idx; } 764 }; 765 766 //------------------------------LoadPLockedNode--------------------------------- 767 // Load-locked a pointer from memory (either object or array). 768 // On Sparc & Intel this is implemented as a normal pointer load. 769 // On PowerPC and friends it's a real load-locked. 770 class LoadPLockedNode : public LoadPNode { 771 public: 772 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 773 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 774 virtual int Opcode() const; 775 virtual int store_Opcode() const { return Op_StorePConditional; } 776 virtual bool depends_only_on_test() const { return true; } 777 }; 778 779 //------------------------------SCMemProjNode--------------------------------------- 780 // This class defines a projection of the memory state of a store conditional node. 781 // These nodes return a value, but also update memory. 782 class SCMemProjNode : public ProjNode { 783 public: 784 enum {SCMEMPROJCON = (uint)-2}; 785 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 786 virtual int Opcode() const; 787 virtual bool is_CFG() const { return false; } 788 virtual const Type *bottom_type() const {return Type::MEMORY;} 789 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} 790 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 791 virtual const Type *Value( PhaseTransform *phase ) const; 792 #ifndef PRODUCT 793 virtual void dump_spec(outputStream *st) const {}; 794 #endif 795 }; 796 797 //------------------------------LoadStoreNode--------------------------- 798 // Note: is_Mem() method returns 'true' for this class. 799 class LoadStoreNode : public Node { 800 private: 801 const Type* const _type; // What kind of value is loaded? 802 const TypePtr* _adr_type; // What kind of memory is being addressed? 803 virtual uint size_of() const; // Size is bigger 804 public: 805 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 806 virtual bool depends_only_on_test() const { return false; } 807 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 808 809 virtual const Type *bottom_type() const { return _type; } 810 virtual uint ideal_reg() const; 811 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 812 void set_adr_type(const TypePtr *t) { 813 _adr_type = t; 814 } 815 816 bool result_not_used() const; 817 MemBarNode* trailing_membar() const; 818 }; 819 820 class LoadStoreConditionalNode : public LoadStoreNode { 821 public: 822 enum { 823 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 824 }; 825 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 826 }; 827 828 //------------------------------StorePConditionalNode--------------------------- 829 // Conditionally store pointer to memory, if no change since prior 830 // load-locked. Sets flags for success or failure of the store. 831 class StorePConditionalNode : public LoadStoreConditionalNode { 832 public: 833 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 834 virtual int Opcode() const; 835 // Produces flags 836 virtual uint ideal_reg() const { return Op_RegFlags; } 837 }; 838 839 //------------------------------StoreIConditionalNode--------------------------- 840 // Conditionally store int to memory, if no change since prior 841 // load-locked. Sets flags for success or failure of the store. 842 class StoreIConditionalNode : public LoadStoreConditionalNode { 843 public: 844 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 845 virtual int Opcode() const; 846 // Produces flags 847 virtual uint ideal_reg() const { return Op_RegFlags; } 848 }; 849 850 //------------------------------StoreLConditionalNode--------------------------- 851 // Conditionally store long to memory, if no change since prior 852 // load-locked. Sets flags for success or failure of the store. 853 class StoreLConditionalNode : public LoadStoreConditionalNode { 854 public: 855 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 856 virtual int Opcode() const; 857 // Produces flags 858 virtual uint ideal_reg() const { return Op_RegFlags; } 859 }; 860 861 862 //------------------------------CompareAndSwapLNode--------------------------- 863 class CompareAndSwapLNode : public LoadStoreConditionalNode { 864 public: 865 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 866 virtual int Opcode() const; 867 }; 868 869 870 //------------------------------CompareAndSwapINode--------------------------- 871 class CompareAndSwapINode : public LoadStoreConditionalNode { 872 public: 873 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 874 virtual int Opcode() const; 875 }; 876 877 878 //------------------------------CompareAndSwapPNode--------------------------- 879 class CompareAndSwapPNode : public LoadStoreConditionalNode { 880 public: 881 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 882 virtual int Opcode() const; 883 }; 884 885 //------------------------------CompareAndSwapNNode--------------------------- 886 class CompareAndSwapNNode : public LoadStoreConditionalNode { 887 public: 888 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 889 virtual int Opcode() const; 890 }; 891 892 //------------------------------GetAndAddINode--------------------------- 893 class GetAndAddINode : public LoadStoreNode { 894 public: 895 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 896 virtual int Opcode() const; 897 }; 898 899 //------------------------------GetAndAddLNode--------------------------- 900 class GetAndAddLNode : public LoadStoreNode { 901 public: 902 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 903 virtual int Opcode() const; 904 }; 905 906 907 //------------------------------GetAndSetINode--------------------------- 908 class GetAndSetINode : public LoadStoreNode { 909 public: 910 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 911 virtual int Opcode() const; 912 }; 913 914 //------------------------------GetAndSetINode--------------------------- 915 class GetAndSetLNode : public LoadStoreNode { 916 public: 917 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 918 virtual int Opcode() const; 919 }; 920 921 //------------------------------GetAndSetPNode--------------------------- 922 class GetAndSetPNode : public LoadStoreNode { 923 public: 924 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 925 virtual int Opcode() const; 926 }; 927 928 //------------------------------GetAndSetNNode--------------------------- 929 class GetAndSetNNode : public LoadStoreNode { 930 public: 931 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 932 virtual int Opcode() const; 933 }; 934 935 //------------------------------ClearArray------------------------------------- 936 class ClearArrayNode: public Node { 937 public: 938 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) 939 : Node(ctrl,arymem,word_cnt,base) { 940 init_class_id(Class_ClearArray); 941 } 942 virtual int Opcode() const; 943 virtual const Type *bottom_type() const { return Type::MEMORY; } 944 // ClearArray modifies array elements, and so affects only the 945 // array memory addressed by the bottom_type of its base address. 946 virtual const class TypePtr *adr_type() const; 947 virtual Node *Identity( PhaseTransform *phase ); 948 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 949 virtual uint match_edge(uint idx) const; 950 951 // Clear the given area of an object or array. 952 // The start offset must always be aligned mod BytesPerInt. 953 // The end offset must always be aligned mod BytesPerLong. 954 // Return the new memory. 955 static Node* clear_memory(Node* control, Node* mem, Node* dest, 956 intptr_t start_offset, 957 intptr_t end_offset, 958 PhaseGVN* phase); 959 static Node* clear_memory(Node* control, Node* mem, Node* dest, 960 intptr_t start_offset, 961 Node* end_offset, 962 PhaseGVN* phase); 963 static Node* clear_memory(Node* control, Node* mem, Node* dest, 964 Node* start_offset, 965 Node* end_offset, 966 PhaseGVN* phase); 967 // Return allocation input memory edge if it is different instance 968 // or itself if it is the one we are looking for. 969 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 970 }; 971 972 //------------------------------StrIntrinsic------------------------------- 973 // Base class for Ideal nodes used in String instrinsic code. 974 class StrIntrinsicNode: public Node { 975 public: 976 StrIntrinsicNode(Node* control, Node* char_array_mem, 977 Node* s1, Node* c1, Node* s2, Node* c2): 978 Node(control, char_array_mem, s1, c1, s2, c2) { 979 } 980 981 StrIntrinsicNode(Node* control, Node* char_array_mem, 982 Node* s1, Node* s2, Node* c): 983 Node(control, char_array_mem, s1, s2, c) { 984 } 985 986 StrIntrinsicNode(Node* control, Node* char_array_mem, 987 Node* s1, Node* s2): 988 Node(control, char_array_mem, s1, s2) { 989 } 990 991 virtual bool depends_only_on_test() const { return false; } 992 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } 993 virtual uint match_edge(uint idx) const; 994 virtual uint ideal_reg() const { return Op_RegI; } 995 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 996 virtual const Type *Value(PhaseTransform *phase) const; 997 }; 998 999 //------------------------------StrComp------------------------------------- 1000 class StrCompNode: public StrIntrinsicNode { 1001 public: 1002 StrCompNode(Node* control, Node* char_array_mem, 1003 Node* s1, Node* c1, Node* s2, Node* c2): 1004 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; 1005 virtual int Opcode() const; 1006 virtual const Type* bottom_type() const { return TypeInt::INT; } 1007 }; 1008 1009 //------------------------------StrEquals------------------------------------- 1010 class StrEqualsNode: public StrIntrinsicNode { 1011 public: 1012 StrEqualsNode(Node* control, Node* char_array_mem, 1013 Node* s1, Node* s2, Node* c): 1014 StrIntrinsicNode(control, char_array_mem, s1, s2, c) {}; 1015 virtual int Opcode() const; 1016 virtual const Type* bottom_type() const { return TypeInt::BOOL; } 1017 }; 1018 1019 //------------------------------StrIndexOf------------------------------------- 1020 class StrIndexOfNode: public StrIntrinsicNode { 1021 public: 1022 StrIndexOfNode(Node* control, Node* char_array_mem, 1023 Node* s1, Node* c1, Node* s2, Node* c2): 1024 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; 1025 virtual int Opcode() const; 1026 virtual const Type* bottom_type() const { return TypeInt::INT; } 1027 }; 1028 1029 //------------------------------AryEq--------------------------------------- 1030 class AryEqNode: public StrIntrinsicNode { 1031 public: 1032 AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2): 1033 StrIntrinsicNode(control, char_array_mem, s1, s2) {}; 1034 virtual int Opcode() const; 1035 virtual const Type* bottom_type() const { return TypeInt::BOOL; } 1036 }; 1037 1038 1039 //------------------------------EncodeISOArray-------------------------------- 1040 // encode char[] to byte[] in ISO_8859_1 1041 class EncodeISOArrayNode: public Node { 1042 public: 1043 EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {}; 1044 virtual int Opcode() const; 1045 virtual bool depends_only_on_test() const { return false; } 1046 virtual const Type* bottom_type() const { return TypeInt::INT; } 1047 virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; } 1048 virtual uint match_edge(uint idx) const; 1049 virtual uint ideal_reg() const { return Op_RegI; } 1050 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1051 virtual const Type *Value(PhaseTransform *phase) const; 1052 }; 1053 1054 //------------------------------MemBar----------------------------------------- 1055 // There are different flavors of Memory Barriers to match the Java Memory 1056 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1057 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1058 // volatile-load. Monitor-exit and volatile-store act as Release: no 1059 // preceding ref can be moved to after them. We insert a MemBar-Release 1060 // before a FastUnlock or volatile-store. All volatiles need to be 1061 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1062 // separate it from any following volatile-load. 1063 class MemBarNode: public MultiNode { 1064 virtual uint hash() const ; // { return NO_HASH; } 1065 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1066 1067 virtual uint size_of() const { return sizeof(*this); } 1068 // Memory type this node is serializing. Usually either rawptr or bottom. 1069 const TypePtr* _adr_type; 1070 1071 // How is this membar related to a nearby memory access? 1072 enum { 1073 Standalone, 1074 TrailingLoad, 1075 TrailingStore, 1076 LeadingStore, 1077 TrailingLoadStore, 1078 LeadingLoadStore 1079 } _kind; 1080 1081 #ifdef ASSERT 1082 uint _pair_idx; 1083 #endif 1084 1085 public: 1086 enum { 1087 Precedent = TypeFunc::Parms // optional edge to force precedence 1088 }; 1089 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1090 virtual int Opcode() const = 0; 1091 virtual const class TypePtr *adr_type() const { return _adr_type; } 1092 virtual const Type *Value( PhaseTransform *phase ) const; 1093 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1094 virtual uint match_edge(uint idx) const { return 0; } 1095 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1096 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1097 // Factory method. Builds a wide or narrow membar. 1098 // Optional 'precedent' becomes an extra edge if not null. 1099 static MemBarNode* make(Compile* C, int opcode, 1100 int alias_idx = Compile::AliasIdxBot, 1101 Node* precedent = NULL); 1102 1103 MemBarNode* trailing_membar() const; 1104 MemBarNode* leading_membar() const; 1105 1106 void set_trailing_load() { _kind = TrailingLoad; } 1107 bool trailing_load() const { return _kind == TrailingLoad; } 1108 bool trailing_store() const { return _kind == TrailingStore; } 1109 bool leading_store() const { return _kind == LeadingStore; } 1110 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1111 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1112 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1113 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1114 bool standalone() const { return _kind == Standalone; } 1115 1116 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1117 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1118 1119 void remove(PhaseIterGVN *igvn); 1120 }; 1121 1122 // "Acquire" - no following ref can move before (but earlier refs can 1123 // follow, like an early Load stalled in cache). Requires multi-cpu 1124 // visibility. Inserted after a volatile load. 1125 class MemBarAcquireNode: public MemBarNode { 1126 public: 1127 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1128 : MemBarNode(C, alias_idx, precedent) {} 1129 virtual int Opcode() const; 1130 }; 1131 1132 // "Acquire" - no following ref can move before (but earlier refs can 1133 // follow, like an early Load stalled in cache). Requires multi-cpu 1134 // visibility. Inserted independ of any load, as required 1135 // for intrinsic sun.misc.Unsafe.loadFence(). 1136 class LoadFenceNode: public MemBarNode { 1137 public: 1138 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1139 : MemBarNode(C, alias_idx, precedent) {} 1140 virtual int Opcode() const; 1141 }; 1142 1143 // "Release" - no earlier ref can move after (but later refs can move 1144 // up, like a speculative pipelined cache-hitting Load). Requires 1145 // multi-cpu visibility. Inserted before a volatile store. 1146 class MemBarReleaseNode: public MemBarNode { 1147 public: 1148 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1149 : MemBarNode(C, alias_idx, precedent) {} 1150 virtual int Opcode() const; 1151 }; 1152 1153 // "Release" - no earlier ref can move after (but later refs can move 1154 // up, like a speculative pipelined cache-hitting Load). Requires 1155 // multi-cpu visibility. Inserted independent of any store, as required 1156 // for intrinsic sun.misc.Unsafe.storeFence(). 1157 class StoreFenceNode: public MemBarNode { 1158 public: 1159 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1160 : MemBarNode(C, alias_idx, precedent) {} 1161 virtual int Opcode() const; 1162 }; 1163 1164 // "Acquire" - no following ref can move before (but earlier refs can 1165 // follow, like an early Load stalled in cache). Requires multi-cpu 1166 // visibility. Inserted after a FastLock. 1167 class MemBarAcquireLockNode: public MemBarNode { 1168 public: 1169 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1170 : MemBarNode(C, alias_idx, precedent) {} 1171 virtual int Opcode() const; 1172 }; 1173 1174 // "Release" - no earlier ref can move after (but later refs can move 1175 // up, like a speculative pipelined cache-hitting Load). Requires 1176 // multi-cpu visibility. Inserted before a FastUnLock. 1177 class MemBarReleaseLockNode: public MemBarNode { 1178 public: 1179 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1180 : MemBarNode(C, alias_idx, precedent) {} 1181 virtual int Opcode() const; 1182 }; 1183 1184 class MemBarStoreStoreNode: public MemBarNode { 1185 public: 1186 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1187 : MemBarNode(C, alias_idx, precedent) { 1188 init_class_id(Class_MemBarStoreStore); 1189 } 1190 virtual int Opcode() const; 1191 }; 1192 1193 // Ordering between a volatile store and a following volatile load. 1194 // Requires multi-CPU visibility? 1195 class MemBarVolatileNode: public MemBarNode { 1196 public: 1197 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1198 : MemBarNode(C, alias_idx, precedent) {} 1199 virtual int Opcode() const; 1200 }; 1201 1202 // Ordering within the same CPU. Used to order unsafe memory references 1203 // inside the compiler when we lack alias info. Not needed "outside" the 1204 // compiler because the CPU does all the ordering for us. 1205 class MemBarCPUOrderNode: public MemBarNode { 1206 public: 1207 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1208 : MemBarNode(C, alias_idx, precedent) {} 1209 virtual int Opcode() const; 1210 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1211 }; 1212 1213 // Isolation of object setup after an AllocateNode and before next safepoint. 1214 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1215 class InitializeNode: public MemBarNode { 1216 friend class AllocateNode; 1217 1218 enum { 1219 Incomplete = 0, 1220 Complete = 1, 1221 WithArraycopy = 2 1222 }; 1223 int _is_complete; 1224 1225 bool _does_not_escape; 1226 1227 public: 1228 enum { 1229 Control = TypeFunc::Control, 1230 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1231 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1232 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1233 }; 1234 1235 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1236 virtual int Opcode() const; 1237 virtual uint size_of() const { return sizeof(*this); } 1238 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1239 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1240 1241 // Manage incoming memory edges via a MergeMem on in(Memory): 1242 Node* memory(uint alias_idx); 1243 1244 // The raw memory edge coming directly from the Allocation. 1245 // The contents of this memory are *always* all-zero-bits. 1246 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1247 1248 // Return the corresponding allocation for this initialization (or null if none). 1249 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1250 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1251 AllocateNode* allocation(); 1252 1253 // Anything other than zeroing in this init? 1254 bool is_non_zero(); 1255 1256 // An InitializeNode must completed before macro expansion is done. 1257 // Completion requires that the AllocateNode must be followed by 1258 // initialization of the new memory to zero, then to any initializers. 1259 bool is_complete() { return _is_complete != Incomplete; } 1260 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1261 1262 // Mark complete. (Must not yet be complete.) 1263 void set_complete(PhaseGVN* phase); 1264 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1265 1266 bool does_not_escape() { return _does_not_escape; } 1267 void set_does_not_escape() { _does_not_escape = true; } 1268 1269 #ifdef ASSERT 1270 // ensure all non-degenerate stores are ordered and non-overlapping 1271 bool stores_are_sane(PhaseTransform* phase); 1272 #endif //ASSERT 1273 1274 // See if this store can be captured; return offset where it initializes. 1275 // Return 0 if the store cannot be moved (any sort of problem). 1276 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1277 1278 // Capture another store; reformat it to write my internal raw memory. 1279 // Return the captured copy, else NULL if there is some sort of problem. 1280 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1281 1282 // Find captured store which corresponds to the range [start..start+size). 1283 // Return my own memory projection (meaning the initial zero bits) 1284 // if there is no such store. Return NULL if there is a problem. 1285 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1286 1287 // Called when the associated AllocateNode is expanded into CFG. 1288 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1289 intptr_t header_size, Node* size_in_bytes, 1290 PhaseGVN* phase); 1291 1292 private: 1293 void remove_extra_zeroes(); 1294 1295 // Find out where a captured store should be placed (or already is placed). 1296 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1297 PhaseTransform* phase); 1298 1299 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1300 1301 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1302 1303 bool detect_init_independence(Node* n, int& count); 1304 1305 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1306 PhaseGVN* phase); 1307 1308 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1309 }; 1310 1311 //------------------------------MergeMem--------------------------------------- 1312 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1313 class MergeMemNode: public Node { 1314 virtual uint hash() const ; // { return NO_HASH; } 1315 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1316 friend class MergeMemStream; 1317 MergeMemNode(Node* def); // clients use MergeMemNode::make 1318 1319 public: 1320 // If the input is a whole memory state, clone it with all its slices intact. 1321 // Otherwise, make a new memory state with just that base memory input. 1322 // In either case, the result is a newly created MergeMem. 1323 static MergeMemNode* make(Compile* C, Node* base_memory); 1324 1325 virtual int Opcode() const; 1326 virtual Node *Identity( PhaseTransform *phase ); 1327 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1328 virtual uint ideal_reg() const { return NotAMachineReg; } 1329 virtual uint match_edge(uint idx) const { return 0; } 1330 virtual const RegMask &out_RegMask() const; 1331 virtual const Type *bottom_type() const { return Type::MEMORY; } 1332 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1333 // sparse accessors 1334 // Fetch the previously stored "set_memory_at", or else the base memory. 1335 // (Caller should clone it if it is a phi-nest.) 1336 Node* memory_at(uint alias_idx) const; 1337 // set the memory, regardless of its previous value 1338 void set_memory_at(uint alias_idx, Node* n); 1339 // the "base" is the memory that provides the non-finite support 1340 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1341 // warning: setting the base can implicitly set any of the other slices too 1342 void set_base_memory(Node* def); 1343 // sentinel value which denotes a copy of the base memory: 1344 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1345 static Node* make_empty_memory(); // where the sentinel comes from 1346 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1347 // hook for the iterator, to perform any necessary setup 1348 void iteration_setup(const MergeMemNode* other = NULL); 1349 // push sentinels until I am at least as long as the other (semantic no-op) 1350 void grow_to_match(const MergeMemNode* other); 1351 bool verify_sparse() const PRODUCT_RETURN0; 1352 #ifndef PRODUCT 1353 virtual void dump_spec(outputStream *st) const; 1354 #endif 1355 }; 1356 1357 class MergeMemStream : public StackObj { 1358 private: 1359 MergeMemNode* _mm; 1360 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1361 Node* _mm_base; // loop-invariant base memory of _mm 1362 int _idx; 1363 int _cnt; 1364 Node* _mem; 1365 Node* _mem2; 1366 int _cnt2; 1367 1368 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1369 // subsume_node will break sparseness at times, whenever a memory slice 1370 // folds down to a copy of the base ("fat") memory. In such a case, 1371 // the raw edge will update to base, although it should be top. 1372 // This iterator will recognize either top or base_memory as an 1373 // "empty" slice. See is_empty, is_empty2, and next below. 1374 // 1375 // The sparseness property is repaired in MergeMemNode::Ideal. 1376 // As long as access to a MergeMem goes through this iterator 1377 // or the memory_at accessor, flaws in the sparseness will 1378 // never be observed. 1379 // 1380 // Also, iteration_setup repairs sparseness. 1381 assert(mm->verify_sparse(), "please, no dups of base"); 1382 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1383 1384 _mm = mm; 1385 _mm_base = mm->base_memory(); 1386 _mm2 = mm2; 1387 _cnt = mm->req(); 1388 _idx = Compile::AliasIdxBot-1; // start at the base memory 1389 _mem = NULL; 1390 _mem2 = NULL; 1391 } 1392 1393 #ifdef ASSERT 1394 Node* check_memory() const { 1395 if (at_base_memory()) 1396 return _mm->base_memory(); 1397 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1398 return _mm->memory_at(_idx); 1399 else 1400 return _mm_base; 1401 } 1402 Node* check_memory2() const { 1403 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1404 } 1405 #endif 1406 1407 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1408 void assert_synch() const { 1409 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1410 "no side-effects except through the stream"); 1411 } 1412 1413 public: 1414 1415 // expected usages: 1416 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1417 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1418 1419 // iterate over one merge 1420 MergeMemStream(MergeMemNode* mm) { 1421 mm->iteration_setup(); 1422 init(mm); 1423 debug_only(_cnt2 = 999); 1424 } 1425 // iterate in parallel over two merges 1426 // only iterates through non-empty elements of mm2 1427 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1428 assert(mm2, "second argument must be a MergeMem also"); 1429 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1430 mm->iteration_setup(mm2); 1431 init(mm, mm2); 1432 _cnt2 = mm2->req(); 1433 } 1434 #ifdef ASSERT 1435 ~MergeMemStream() { 1436 assert_synch(); 1437 } 1438 #endif 1439 1440 MergeMemNode* all_memory() const { 1441 return _mm; 1442 } 1443 Node* base_memory() const { 1444 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1445 return _mm_base; 1446 } 1447 const MergeMemNode* all_memory2() const { 1448 assert(_mm2 != NULL, ""); 1449 return _mm2; 1450 } 1451 bool at_base_memory() const { 1452 return _idx == Compile::AliasIdxBot; 1453 } 1454 int alias_idx() const { 1455 assert(_mem, "must call next 1st"); 1456 return _idx; 1457 } 1458 1459 const TypePtr* adr_type() const { 1460 return Compile::current()->get_adr_type(alias_idx()); 1461 } 1462 1463 const TypePtr* adr_type(Compile* C) const { 1464 return C->get_adr_type(alias_idx()); 1465 } 1466 bool is_empty() const { 1467 assert(_mem, "must call next 1st"); 1468 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1469 return _mem->is_top(); 1470 } 1471 bool is_empty2() const { 1472 assert(_mem2, "must call next 1st"); 1473 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1474 return _mem2->is_top(); 1475 } 1476 Node* memory() const { 1477 assert(!is_empty(), "must not be empty"); 1478 assert_synch(); 1479 return _mem; 1480 } 1481 // get the current memory, regardless of empty or non-empty status 1482 Node* force_memory() const { 1483 assert(!is_empty() || !at_base_memory(), ""); 1484 // Use _mm_base to defend against updates to _mem->base_memory(). 1485 Node *mem = _mem->is_top() ? _mm_base : _mem; 1486 assert(mem == check_memory(), ""); 1487 return mem; 1488 } 1489 Node* memory2() const { 1490 assert(_mem2 == check_memory2(), ""); 1491 return _mem2; 1492 } 1493 void set_memory(Node* mem) { 1494 if (at_base_memory()) { 1495 // Note that this does not change the invariant _mm_base. 1496 _mm->set_base_memory(mem); 1497 } else { 1498 _mm->set_memory_at(_idx, mem); 1499 } 1500 _mem = mem; 1501 assert_synch(); 1502 } 1503 1504 // Recover from a side effect to the MergeMemNode. 1505 void set_memory() { 1506 _mem = _mm->in(_idx); 1507 } 1508 1509 bool next() { return next(false); } 1510 bool next2() { return next(true); } 1511 1512 bool next_non_empty() { return next_non_empty(false); } 1513 bool next_non_empty2() { return next_non_empty(true); } 1514 // next_non_empty2 can yield states where is_empty() is true 1515 1516 private: 1517 // find the next item, which might be empty 1518 bool next(bool have_mm2) { 1519 assert((_mm2 != NULL) == have_mm2, "use other next"); 1520 assert_synch(); 1521 if (++_idx < _cnt) { 1522 // Note: This iterator allows _mm to be non-sparse. 1523 // It behaves the same whether _mem is top or base_memory. 1524 _mem = _mm->in(_idx); 1525 if (have_mm2) 1526 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1527 return true; 1528 } 1529 return false; 1530 } 1531 1532 // find the next non-empty item 1533 bool next_non_empty(bool have_mm2) { 1534 while (next(have_mm2)) { 1535 if (!is_empty()) { 1536 // make sure _mem2 is filled in sensibly 1537 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1538 return true; 1539 } else if (have_mm2 && !is_empty2()) { 1540 return true; // is_empty() == true 1541 } 1542 } 1543 return false; 1544 } 1545 }; 1546 1547 //------------------------------Prefetch--------------------------------------- 1548 1549 // Non-faulting prefetch load. Prefetch for many reads. 1550 class PrefetchReadNode : public Node { 1551 public: 1552 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} 1553 virtual int Opcode() const; 1554 virtual uint ideal_reg() const { return NotAMachineReg; } 1555 virtual uint match_edge(uint idx) const { return idx==2; } 1556 virtual const Type *bottom_type() const { return Type::ABIO; } 1557 }; 1558 1559 // Non-faulting prefetch load. Prefetch for many reads & many writes. 1560 class PrefetchWriteNode : public Node { 1561 public: 1562 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} 1563 virtual int Opcode() const; 1564 virtual uint ideal_reg() const { return NotAMachineReg; } 1565 virtual uint match_edge(uint idx) const { return idx==2; } 1566 virtual const Type *bottom_type() const { return Type::ABIO; } 1567 }; 1568 1569 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1570 class PrefetchAllocationNode : public Node { 1571 public: 1572 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1573 virtual int Opcode() const; 1574 virtual uint ideal_reg() const { return NotAMachineReg; } 1575 virtual uint match_edge(uint idx) const { return idx==2; } 1576 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1577 }; 1578 1579 #endif // SHARE_VM_OPTO_MEMNODE_HPP