1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_OPTO_MEMNODE_HPP
27 #define SHARE_OPTO_MEMNODE_HPP
28
29 #include "opto/multnode.hpp"
30 #include "opto/node.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/type.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 class MultiNode;
37 class PhaseCCP;
38 class PhaseTransform;
39
40 //------------------------------MemNode----------------------------------------
41 // Load or Store, possibly throwing a null pointer exception
42 class MemNode : public Node {
43 private:
44 bool _unaligned_access; // Unaligned access from unsafe
45 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
46 bool _unsafe_access; // Access of unsafe origin.
47 uint8_t _barrier_data; // Bit field with barrier information
48
49 protected:
50 #ifdef ASSERT
51 const TypePtr* _adr_type; // What kind of memory is being addressed?
52 #endif
53 virtual uint size_of() const;
54 public:
55 enum { Control, // When is it safe to do this load?
56 Memory, // Chunk of memory is being loaded from
57 Address, // Actually address, derived from base
58 ValueIn // Value to store
59 };
60 typedef enum { unordered = 0,
61 acquire, // Load has to acquire or be succeeded by MemBarAcquire.
62 release, // Store has to release or be preceded by MemBarRelease.
63 seqcst, // LoadStore has to have both acquire and release semantics.
64 unset // The memory ordering is not set (used for testing)
65 } MemOrd;
66 protected:
67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) :
68 Node(c0,c1,c2),
69 _unaligned_access(false),
70 _mismatched_access(false),
71 _unsafe_access(false),
72 _barrier_data(0) {
73 init_class_id(Class_Mem);
74 DEBUG_ONLY(_adr_type=at; adr_type();)
75 }
76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
77 Node(c0,c1,c2,c3),
78 _unaligned_access(false),
79 _mismatched_access(false),
80 _unsafe_access(false),
81 _barrier_data(0) {
82 init_class_id(Class_Mem);
83 DEBUG_ONLY(_adr_type=at; adr_type();)
84 }
85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
86 Node(c0,c1,c2,c3,c4),
87 _unaligned_access(false),
88 _mismatched_access(false),
89 _unsafe_access(false),
90 _barrier_data(0) {
91 init_class_id(Class_Mem);
92 DEBUG_ONLY(_adr_type=at; adr_type();)
93 }
94
95 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
96 ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const;
97 static bool check_if_adr_maybe_raw(Node* adr);
98
99 public:
100 // Helpers for the optimizer. Documented in memnode.cpp.
101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
102 Node* p2, AllocateNode* a2,
103 PhaseTransform* phase);
104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
105
106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
108 // The following two should probably be phase-specific functions:
109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 // Return the barrier data of n, if available, or 0 otherwise.
130 static uint8_t barrier_data(const Node* n);
131
132 // Map a load or store opcode to its corresponding store opcode.
133 // (Return -1 if unknown.)
134 virtual int store_Opcode() const { return -1; }
135
136 // What is the type of the value in memory? (T_VOID mean "unspecified".)
137 // The returned type is a property of the value that is loaded/stored and
138 // not the memory that is accessed. For mismatched memory accesses
139 // they might differ. For instance, a value of type 'short' may be stored
140 // into an array of elements of type 'long'.
141 virtual BasicType value_basic_type() const = 0;
142 virtual int memory_size() const {
143 #ifdef ASSERT
144 return type2aelembytes(value_basic_type(), true);
145 #else
146 return type2aelembytes(value_basic_type());
147 #endif
148 }
149
150 uint8_t barrier_data() { return _barrier_data; }
151 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
152
153 // Search through memory states which precede this node (load or store).
154 // Look for an exact match for the address, with no intervening
155 // aliased stores.
156 Node* find_previous_store(PhaseValues* phase);
157
158 // Can this node (load or store) accurately see a stored value in
159 // the given memory state? (The state may or may not be in(Memory).)
160 Node* can_see_stored_value(Node* st, PhaseValues* phase) const;
161
162 void set_unaligned_access() { _unaligned_access = true; }
163 bool is_unaligned_access() const { return _unaligned_access; }
164 void set_mismatched_access() { _mismatched_access = true; }
165 bool is_mismatched_access() const { return _mismatched_access; }
166 void set_unsafe_access() { _unsafe_access = true; }
167 bool is_unsafe_access() const { return _unsafe_access; }
168
169 #ifndef PRODUCT
170 static void dump_adr_type(const TypePtr* adr_type, outputStream* st);
171 virtual void dump_spec(outputStream *st) const;
172 #endif
173 };
174
175 //------------------------------LoadNode---------------------------------------
176 // Load value; requires Memory and Address
177 class LoadNode : public MemNode {
178 public:
179 // Some loads (from unsafe) should be pinned: they don't depend only
180 // on the dominating test. The field _control_dependency below records
181 // whether that node depends only on the dominating test.
182 // Pinned and UnknownControl are similar, but differ in that Pinned
183 // loads are not allowed to float across safepoints, whereas UnknownControl
184 // loads are allowed to do that. Therefore, Pinned is stricter.
185 enum ControlDependency {
186 Pinned,
187 UnknownControl,
188 DependsOnlyOnTest
189 };
190
191 private:
192 // LoadNode::hash() doesn't take the _control_dependency field
193 // into account: If the graph already has a non-pinned LoadNode and
194 // we add a pinned LoadNode with the same inputs, it's safe for GVN
195 // to replace the pinned LoadNode with the non-pinned LoadNode,
196 // otherwise it wouldn't be safe to have a non pinned LoadNode with
197 // those inputs in the first place. If the graph already has a
198 // pinned LoadNode and we add a non pinned LoadNode with the same
199 // inputs, it's safe (but suboptimal) for GVN to replace the
200 // non-pinned LoadNode by the pinned LoadNode.
201 ControlDependency _control_dependency;
202
203 // On platforms with weak memory ordering (e.g., PPC) we distinguish
204 // loads that can be reordered, and such requiring acquire semantics to
205 // adhere to the Java specification. The required behaviour is stored in
206 // this field.
207 const MemOrd _mo;
208
209 AllocateNode* is_new_object_mark_load() const;
210
211 protected:
212 virtual bool cmp(const Node &n) const;
213 virtual uint size_of() const; // Size is bigger
214 // Should LoadNode::Ideal() attempt to remove control edges?
215 virtual bool can_remove_control() const;
216 const Type* const _type; // What kind of value is loaded?
217
218 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
219 public:
220
221 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
222 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
223 init_class_id(Class_Load);
224 }
225 inline bool is_unordered() const { return !is_acquire(); }
226 inline bool is_acquire() const {
227 assert(_mo == unordered || _mo == acquire, "unexpected");
228 return _mo == acquire;
229 }
230 inline bool is_unsigned() const {
231 int lop = Opcode();
232 return (lop == Op_LoadUB) || (lop == Op_LoadUS);
233 }
234
235 // Polymorphic factory method:
236 static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
237 const TypePtr* at, const Type* rt, BasicType bt,
238 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
239 bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
240 uint8_t barrier_data = 0);
241
242 virtual uint hash() const; // Check the type
243
244 // Handle algebraic identities here. If we have an identity, return the Node
245 // we are equivalent to. We look for Load of a Store.
246 virtual Node* Identity(PhaseGVN* phase);
247
248 // If the load is from Field memory and the pointer is non-null, it might be possible to
249 // zero out the control input.
250 // If the offset is constant and the base is an object allocation,
251 // try to hook me up to the exact initializing store.
252 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
253
254 // Return true if it's possible to split the Load through a Phi merging the bases
255 bool can_split_through_phi_base(PhaseGVN *phase);
256
257 // Split instance field load through Phi.
258 Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
259
260 // Recover original value from boxed values
261 Node *eliminate_autobox(PhaseIterGVN *igvn);
262
263 // Compute a new Type for this node. Basically we just do the pre-check,
264 // then call the virtual add() to set the type.
265 virtual const Type* Value(PhaseGVN* phase) const;
266
267 // Common methods for LoadKlass and LoadNKlass nodes.
268 const Type* klass_value_common(PhaseGVN* phase) const;
269 Node* klass_identity_common(PhaseGVN* phase);
270
271 virtual uint ideal_reg() const;
272 virtual const Type *bottom_type() const;
273 // Following method is copied from TypeNode:
274 void set_type(const Type* t) {
275 assert(t != nullptr, "sanity");
276 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
277 *(const Type**)&_type = t; // cast away const-ness
278 // If this node is in the hash table, make sure it doesn't need a rehash.
279 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
280 }
281 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
282
283 // Do not match memory edge
284 virtual uint match_edge(uint idx) const;
285
286 // Map a load opcode to its corresponding store opcode.
287 virtual int store_Opcode() const = 0;
288
289 // Check if the load's memory input is a Phi node with the same control.
290 bool is_instance_field_load_with_local_phi(Node* ctrl);
291
292 Node* convert_to_unsigned_load(PhaseGVN& gvn);
293 Node* convert_to_signed_load(PhaseGVN& gvn);
294
295 bool has_reinterpret_variant(const Type* rt);
296 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt);
297
298 ControlDependency control_dependency() const { return _control_dependency; }
299 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
300 bool has_pinned_control_dependency() const { return _control_dependency == Pinned; }
301
302 LoadNode* pin_array_access_node() const;
303
304 #ifndef PRODUCT
305 virtual void dump_spec(outputStream *st) const;
306 #endif
307 #ifdef ASSERT
308 // Helper function to allow a raw load without control edge for some cases
309 static bool is_immutable_value(Node* adr);
310 #endif
311 protected:
312 const Type* load_array_final_field(const TypeKlassPtr *tkls,
313 ciKlass* klass) const;
314
315 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
316
317 // depends_only_on_test is almost always true, and needs to be almost always
318 // true to enable key hoisting & commoning optimizations. However, for the
319 // special case of RawPtr loads from TLS top & end, and other loads performed by
320 // GC barriers, the control edge carries the dependence preventing hoisting past
321 // a Safepoint instead of the memory edge. (An unfortunate consequence of having
322 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
323 // which produce results (new raw memory state) inside of loops preventing all
324 // manner of other optimizations). Basically, it's ugly but so is the alternative.
325 // See comment in macro.cpp, around line 125 expand_allocate_common().
326 virtual bool depends_only_on_test() const {
327 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
328 }
329
330 LoadNode* clone_pinned() const;
331 };
332
333 //------------------------------LoadBNode--------------------------------------
334 // Load a byte (8bits signed) from memory
335 class LoadBNode : public LoadNode {
336 public:
337 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
338 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
339 virtual int Opcode() const;
340 virtual uint ideal_reg() const { return Op_RegI; }
341 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
342 virtual const Type* Value(PhaseGVN* phase) const;
343 virtual int store_Opcode() const { return Op_StoreB; }
344 virtual BasicType value_basic_type() const { return T_BYTE; }
345 };
346
347 //------------------------------LoadUBNode-------------------------------------
348 // Load a unsigned byte (8bits unsigned) from memory
349 class LoadUBNode : public LoadNode {
350 public:
351 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
352 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
353 virtual int Opcode() const;
354 virtual uint ideal_reg() const { return Op_RegI; }
355 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
356 virtual const Type* Value(PhaseGVN* phase) const;
357 virtual int store_Opcode() const { return Op_StoreB; }
358 virtual BasicType value_basic_type() const { return T_BYTE; }
359 };
360
361 //------------------------------LoadUSNode-------------------------------------
362 // Load an unsigned short/char (16bits unsigned) from memory
363 class LoadUSNode : public LoadNode {
364 public:
365 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
366 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
367 virtual int Opcode() const;
368 virtual uint ideal_reg() const { return Op_RegI; }
369 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
370 virtual const Type* Value(PhaseGVN* phase) const;
371 virtual int store_Opcode() const { return Op_StoreC; }
372 virtual BasicType value_basic_type() const { return T_CHAR; }
373 };
374
375 //------------------------------LoadSNode--------------------------------------
376 // Load a short (16bits signed) from memory
377 class LoadSNode : public LoadNode {
378 public:
379 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
380 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
381 virtual int Opcode() const;
382 virtual uint ideal_reg() const { return Op_RegI; }
383 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
384 virtual const Type* Value(PhaseGVN* phase) const;
385 virtual int store_Opcode() const { return Op_StoreC; }
386 virtual BasicType value_basic_type() const { return T_SHORT; }
387 };
388
389 //------------------------------LoadINode--------------------------------------
390 // Load an integer from memory
391 class LoadINode : public LoadNode {
392 public:
393 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
394 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
395 virtual int Opcode() const;
396 virtual uint ideal_reg() const { return Op_RegI; }
397 virtual int store_Opcode() const { return Op_StoreI; }
398 virtual BasicType value_basic_type() const { return T_INT; }
399 };
400
401 //------------------------------LoadRangeNode----------------------------------
402 // Load an array length from the array
403 class LoadRangeNode : public LoadINode {
404 public:
405 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
406 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
407 virtual int Opcode() const;
408 virtual const Type* Value(PhaseGVN* phase) const;
409 virtual Node* Identity(PhaseGVN* phase);
410 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
411 };
412
413 //------------------------------LoadLNode--------------------------------------
414 // Load a long from memory
415 class LoadLNode : public LoadNode {
416 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
417 virtual bool cmp( const Node &n ) const {
418 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
419 && LoadNode::cmp(n);
420 }
421 virtual uint size_of() const { return sizeof(*this); }
422 const bool _require_atomic_access; // is piecewise load forbidden?
423
424 public:
425 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
426 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
427 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
428 virtual int Opcode() const;
429 virtual uint ideal_reg() const { return Op_RegL; }
430 virtual int store_Opcode() const { return Op_StoreL; }
431 virtual BasicType value_basic_type() const { return T_LONG; }
432 bool require_atomic_access() const { return _require_atomic_access; }
433
434 #ifndef PRODUCT
435 virtual void dump_spec(outputStream *st) const {
436 LoadNode::dump_spec(st);
437 if (_require_atomic_access) st->print(" Atomic!");
438 }
439 #endif
440 };
441
442 //------------------------------LoadL_unalignedNode----------------------------
443 // Load a long from unaligned memory
444 class LoadL_unalignedNode : public LoadLNode {
445 public:
446 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
447 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
448 virtual int Opcode() const;
449 };
450
451 //------------------------------LoadFNode--------------------------------------
452 // Load a float (64 bits) from memory
453 class LoadFNode : public LoadNode {
454 public:
455 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
456 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
457 virtual int Opcode() const;
458 virtual uint ideal_reg() const { return Op_RegF; }
459 virtual int store_Opcode() const { return Op_StoreF; }
460 virtual BasicType value_basic_type() const { return T_FLOAT; }
461 };
462
463 //------------------------------LoadDNode--------------------------------------
464 // Load a double (64 bits) from memory
465 class LoadDNode : public LoadNode {
466 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
467 virtual bool cmp( const Node &n ) const {
468 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
469 && LoadNode::cmp(n);
470 }
471 virtual uint size_of() const { return sizeof(*this); }
472 const bool _require_atomic_access; // is piecewise load forbidden?
473
474 public:
475 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
476 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
477 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
478 virtual int Opcode() const;
479 virtual uint ideal_reg() const { return Op_RegD; }
480 virtual int store_Opcode() const { return Op_StoreD; }
481 virtual BasicType value_basic_type() const { return T_DOUBLE; }
482 bool require_atomic_access() const { return _require_atomic_access; }
483
484 #ifndef PRODUCT
485 virtual void dump_spec(outputStream *st) const {
486 LoadNode::dump_spec(st);
487 if (_require_atomic_access) st->print(" Atomic!");
488 }
489 #endif
490 };
491
492 //------------------------------LoadD_unalignedNode----------------------------
493 // Load a double from unaligned memory
494 class LoadD_unalignedNode : public LoadDNode {
495 public:
496 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
497 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
498 virtual int Opcode() const;
499 };
500
501 //------------------------------LoadPNode--------------------------------------
502 // Load a pointer from memory (either object or array)
503 class LoadPNode : public LoadNode {
504 public:
505 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
506 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
507 virtual int Opcode() const;
508 virtual uint ideal_reg() const { return Op_RegP; }
509 virtual int store_Opcode() const { return Op_StoreP; }
510 virtual BasicType value_basic_type() const { return T_ADDRESS; }
511 };
512
513
514 //------------------------------LoadNNode--------------------------------------
515 // Load a narrow oop from memory (either object or array)
516 class LoadNNode : public LoadNode {
517 public:
518 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
519 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
520 virtual int Opcode() const;
521 virtual uint ideal_reg() const { return Op_RegN; }
522 virtual int store_Opcode() const { return Op_StoreN; }
523 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
524 };
525
526 //------------------------------LoadKlassNode----------------------------------
527 // Load a Klass from an object
528 class LoadKlassNode : public LoadPNode {
529 private:
530 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
531 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
532
533 public:
534 virtual int Opcode() const;
535 virtual const Type* Value(PhaseGVN* phase) const;
536 virtual Node* Identity(PhaseGVN* phase);
537 virtual bool depends_only_on_test() const { return true; }
538
539 // Polymorphic factory method:
540 static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
541 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
542 };
543
544 //------------------------------LoadNKlassNode---------------------------------
545 // Load a narrow Klass from an object.
546 // With compact headers, the input address (adr) does not point at the exact
547 // header position where the (narrow) class pointer is located, but into the
548 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
549 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
550 // extract the actual class pointer. C2's type system is agnostic on whether the
551 // input address directly points into the class pointer.
552 class LoadNKlassNode : public LoadNNode {
553 private:
554 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
555 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
556 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
557
558 public:
559 virtual int Opcode() const;
560 virtual uint ideal_reg() const { return Op_RegN; }
561 virtual int store_Opcode() const { return Op_StoreNKlass; }
562 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
563
564 virtual const Type* Value(PhaseGVN* phase) const;
565 virtual Node* Identity(PhaseGVN* phase);
566 virtual bool depends_only_on_test() const { return true; }
567 };
568
569
570 //------------------------------StoreNode--------------------------------------
571 // Store value; requires Store, Address and Value
572 class StoreNode : public MemNode {
573 private:
574 // On platforms with weak memory ordering (e.g., PPC) we distinguish
575 // stores that can be reordered, and such requiring release semantics to
576 // adhere to the Java specification. The required behaviour is stored in
577 // this field.
578 const MemOrd _mo;
579 // Needed for proper cloning.
580 virtual uint size_of() const { return sizeof(*this); }
581 protected:
582 virtual bool cmp( const Node &n ) const;
583 virtual bool depends_only_on_test() const { return false; }
584
585 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
586 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
587
588 public:
589 // We must ensure that stores of object references will be visible
590 // only after the object's initialization. So the callers of this
591 // procedure must indicate that the store requires `release'
592 // semantics, if the stored value is an object reference that might
593 // point to a new object and may become externally visible.
594 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
595 : MemNode(c, mem, adr, at, val), _mo(mo) {
596 init_class_id(Class_Store);
597 }
598 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
599 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
600 init_class_id(Class_Store);
601 }
602
603 inline bool is_unordered() const { return !is_release(); }
604 inline bool is_release() const {
605 assert((_mo == unordered || _mo == release), "unexpected");
606 return _mo == release;
607 }
608
609 // Conservatively release stores of object references in order to
610 // ensure visibility of object initialization.
611 static inline MemOrd release_if_reference(const BasicType t) {
612 #ifdef AARCH64
613 // AArch64 doesn't need a release store here because object
614 // initialization contains the necessary barriers.
615 return unordered;
616 #else
617 const MemOrd mo = (t == T_ARRAY ||
618 t == T_ADDRESS || // Might be the address of an object reference (`boxing').
619 t == T_OBJECT) ? release : unordered;
620 return mo;
621 #endif
622 }
623
624 // Polymorphic factory method
625 //
626 // We must ensure that stores of object references will be visible
627 // only after the object's initialization. So the callers of this
628 // procedure must indicate that the store requires `release'
629 // semantics, if the stored value is an object reference that might
630 // point to a new object and may become externally visible.
631 static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
632 const TypePtr* at, Node* val, BasicType bt,
633 MemOrd mo, bool require_atomic_access = false);
634
635 virtual uint hash() const; // Check the type
636
637 // If the store is to Field memory and the pointer is non-null, we can
638 // zero out the control input.
639 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
640
641 // Compute a new Type for this node. Basically we just do the pre-check,
642 // then call the virtual add() to set the type.
643 virtual const Type* Value(PhaseGVN* phase) const;
644
645 // Check for identity function on memory (Load then Store at same address)
646 virtual Node* Identity(PhaseGVN* phase);
647
648 // Do not match memory edge
649 virtual uint match_edge(uint idx) const;
650
651 virtual const Type *bottom_type() const; // returns Type::MEMORY
652
653 // Map a store opcode to its corresponding own opcode, trivially.
654 virtual int store_Opcode() const { return Opcode(); }
655
656 // have all possible loads of the value stored been optimized away?
657 bool value_never_loaded(PhaseValues* phase) const;
658
659 bool has_reinterpret_variant(const Type* vt);
660 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt);
661
662 MemBarNode* trailing_membar() const;
663 };
664
665 //------------------------------StoreBNode-------------------------------------
666 // Store byte to memory
667 class StoreBNode : public StoreNode {
668 public:
669 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
670 : StoreNode(c, mem, adr, at, val, mo) {}
671 virtual int Opcode() const;
672 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
673 virtual BasicType value_basic_type() const { return T_BYTE; }
674 };
675
676 //------------------------------StoreCNode-------------------------------------
677 // Store char/short to memory
678 class StoreCNode : public StoreNode {
679 public:
680 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
681 : StoreNode(c, mem, adr, at, val, mo) {}
682 virtual int Opcode() const;
683 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
684 virtual BasicType value_basic_type() const { return T_CHAR; }
685 };
686
687 //------------------------------StoreINode-------------------------------------
688 // Store int to memory
689 class StoreINode : public StoreNode {
690 public:
691 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
692 : StoreNode(c, mem, adr, at, val, mo) {}
693 virtual int Opcode() const;
694 virtual BasicType value_basic_type() const { return T_INT; }
695 };
696
697 //------------------------------StoreLNode-------------------------------------
698 // Store long to memory
699 class StoreLNode : public StoreNode {
700 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
701 virtual bool cmp( const Node &n ) const {
702 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
703 && StoreNode::cmp(n);
704 }
705 virtual uint size_of() const { return sizeof(*this); }
706 const bool _require_atomic_access; // is piecewise store forbidden?
707
708 public:
709 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
710 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
711 virtual int Opcode() const;
712 virtual BasicType value_basic_type() const { return T_LONG; }
713 bool require_atomic_access() const { return _require_atomic_access; }
714
715 #ifndef PRODUCT
716 virtual void dump_spec(outputStream *st) const {
717 StoreNode::dump_spec(st);
718 if (_require_atomic_access) st->print(" Atomic!");
719 }
720 #endif
721 };
722
723 //------------------------------StoreFNode-------------------------------------
724 // Store float to memory
725 class StoreFNode : public StoreNode {
726 public:
727 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
728 : StoreNode(c, mem, adr, at, val, mo) {}
729 virtual int Opcode() const;
730 virtual BasicType value_basic_type() const { return T_FLOAT; }
731 };
732
733 //------------------------------StoreDNode-------------------------------------
734 // Store double to memory
735 class StoreDNode : public StoreNode {
736 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
737 virtual bool cmp( const Node &n ) const {
738 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
739 && StoreNode::cmp(n);
740 }
741 virtual uint size_of() const { return sizeof(*this); }
742 const bool _require_atomic_access; // is piecewise store forbidden?
743 public:
744 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
745 MemOrd mo, bool require_atomic_access = false)
746 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
747 virtual int Opcode() const;
748 virtual BasicType value_basic_type() const { return T_DOUBLE; }
749 bool require_atomic_access() const { return _require_atomic_access; }
750
751 #ifndef PRODUCT
752 virtual void dump_spec(outputStream *st) const {
753 StoreNode::dump_spec(st);
754 if (_require_atomic_access) st->print(" Atomic!");
755 }
756 #endif
757
758 };
759
760 //------------------------------StorePNode-------------------------------------
761 // Store pointer to memory
762 class StorePNode : public StoreNode {
763 public:
764 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
765 : StoreNode(c, mem, adr, at, val, mo) {}
766 virtual int Opcode() const;
767 virtual BasicType value_basic_type() const { return T_ADDRESS; }
768 };
769
770 //------------------------------StoreNNode-------------------------------------
771 // Store narrow oop to memory
772 class StoreNNode : public StoreNode {
773 public:
774 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
775 : StoreNode(c, mem, adr, at, val, mo) {}
776 virtual int Opcode() const;
777 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
778 };
779
780 //------------------------------StoreNKlassNode--------------------------------------
781 // Store narrow klass to memory
782 class StoreNKlassNode : public StoreNNode {
783 public:
784 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
785 : StoreNNode(c, mem, adr, at, val, mo) {}
786 virtual int Opcode() const;
787 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
788 };
789
790 //------------------------------SCMemProjNode---------------------------------------
791 // This class defines a projection of the memory state of a store conditional node.
792 // These nodes return a value, but also update memory.
793 class SCMemProjNode : public ProjNode {
794 public:
795 enum {SCMEMPROJCON = (uint)-2};
796 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
797 virtual int Opcode() const;
798 virtual bool is_CFG() const { return false; }
799 virtual const Type *bottom_type() const {return Type::MEMORY;}
800 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
801 virtual const Type* Value(PhaseGVN* phase) const;
802 #ifndef PRODUCT
803 virtual void dump_spec(outputStream *st) const {};
804 #endif
805 };
806
807 //------------------------------LoadStoreNode---------------------------
808 // Note: is_Mem() method returns 'true' for this class.
809 class LoadStoreNode : public Node {
810 private:
811 const Type* const _type; // What kind of value is loaded?
812 uint8_t _barrier_data; // Bit field with barrier information
813 virtual uint size_of() const; // Size is bigger
814 #ifdef ASSERT
815 const TypePtr* _adr_type; // What kind of memory is being addressed?
816 #endif // ASSERT
817 public:
818 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
819 virtual bool depends_only_on_test() const { return false; }
820 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
821
822 virtual const Type *bottom_type() const { return _type; }
823 virtual uint ideal_reg() const;
824 virtual const TypePtr* adr_type() const;
825 virtual const Type* Value(PhaseGVN* phase) const;
826
827 bool result_not_used() const;
828 MemBarNode* trailing_membar() const;
829
830 uint8_t barrier_data() { return _barrier_data; }
831 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
832 };
833
834 class LoadStoreConditionalNode : public LoadStoreNode {
835 public:
836 enum {
837 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
838 };
839 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
840 virtual const Type* Value(PhaseGVN* phase) const;
841 };
842
843 class CompareAndSwapNode : public LoadStoreConditionalNode {
844 private:
845 const MemNode::MemOrd _mem_ord;
846 public:
847 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
848 MemNode::MemOrd order() const {
849 return _mem_ord;
850 }
851 virtual uint size_of() const { return sizeof(*this); }
852 };
853
854 class CompareAndExchangeNode : public LoadStoreNode {
855 private:
856 const MemNode::MemOrd _mem_ord;
857 public:
858 enum {
859 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
860 };
861 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
862 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
863 init_req(ExpectedIn, ex );
864 }
865
866 MemNode::MemOrd order() const {
867 return _mem_ord;
868 }
869 virtual uint size_of() const { return sizeof(*this); }
870 };
871
872 //------------------------------CompareAndSwapBNode---------------------------
873 class CompareAndSwapBNode : public CompareAndSwapNode {
874 public:
875 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
876 virtual int Opcode() const;
877 };
878
879 //------------------------------CompareAndSwapSNode---------------------------
880 class CompareAndSwapSNode : public CompareAndSwapNode {
881 public:
882 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
883 virtual int Opcode() const;
884 };
885
886 //------------------------------CompareAndSwapINode---------------------------
887 class CompareAndSwapINode : public CompareAndSwapNode {
888 public:
889 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
890 virtual int Opcode() const;
891 };
892
893 //------------------------------CompareAndSwapLNode---------------------------
894 class CompareAndSwapLNode : public CompareAndSwapNode {
895 public:
896 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
897 virtual int Opcode() const;
898 };
899
900 //------------------------------CompareAndSwapPNode---------------------------
901 class CompareAndSwapPNode : public CompareAndSwapNode {
902 public:
903 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
904 virtual int Opcode() const;
905 };
906
907 //------------------------------CompareAndSwapNNode---------------------------
908 class CompareAndSwapNNode : public CompareAndSwapNode {
909 public:
910 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
911 virtual int Opcode() const;
912 };
913
914 //------------------------------WeakCompareAndSwapBNode---------------------------
915 class WeakCompareAndSwapBNode : public CompareAndSwapNode {
916 public:
917 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
918 virtual int Opcode() const;
919 };
920
921 //------------------------------WeakCompareAndSwapSNode---------------------------
922 class WeakCompareAndSwapSNode : public CompareAndSwapNode {
923 public:
924 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
925 virtual int Opcode() const;
926 };
927
928 //------------------------------WeakCompareAndSwapINode---------------------------
929 class WeakCompareAndSwapINode : public CompareAndSwapNode {
930 public:
931 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
932 virtual int Opcode() const;
933 };
934
935 //------------------------------WeakCompareAndSwapLNode---------------------------
936 class WeakCompareAndSwapLNode : public CompareAndSwapNode {
937 public:
938 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
939 virtual int Opcode() const;
940 };
941
942 //------------------------------WeakCompareAndSwapPNode---------------------------
943 class WeakCompareAndSwapPNode : public CompareAndSwapNode {
944 public:
945 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
946 virtual int Opcode() const;
947 };
948
949 //------------------------------WeakCompareAndSwapNNode---------------------------
950 class WeakCompareAndSwapNNode : public CompareAndSwapNode {
951 public:
952 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
953 virtual int Opcode() const;
954 };
955
956 //------------------------------CompareAndExchangeBNode---------------------------
957 class CompareAndExchangeBNode : public CompareAndExchangeNode {
958 public:
959 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { }
960 virtual int Opcode() const;
961 };
962
963
964 //------------------------------CompareAndExchangeSNode---------------------------
965 class CompareAndExchangeSNode : public CompareAndExchangeNode {
966 public:
967 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { }
968 virtual int Opcode() const;
969 };
970
971 //------------------------------CompareAndExchangeLNode---------------------------
972 class CompareAndExchangeLNode : public CompareAndExchangeNode {
973 public:
974 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
975 virtual int Opcode() const;
976 };
977
978
979 //------------------------------CompareAndExchangeINode---------------------------
980 class CompareAndExchangeINode : public CompareAndExchangeNode {
981 public:
982 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
983 virtual int Opcode() const;
984 };
985
986
987 //------------------------------CompareAndExchangePNode---------------------------
988 class CompareAndExchangePNode : public CompareAndExchangeNode {
989 public:
990 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
991 virtual int Opcode() const;
992 };
993
994 //------------------------------CompareAndExchangeNNode---------------------------
995 class CompareAndExchangeNNode : public CompareAndExchangeNode {
996 public:
997 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
998 virtual int Opcode() const;
999 };
1000
1001 //------------------------------GetAndAddBNode---------------------------
1002 class GetAndAddBNode : public LoadStoreNode {
1003 public:
1004 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1005 virtual int Opcode() const;
1006 };
1007
1008 //------------------------------GetAndAddSNode---------------------------
1009 class GetAndAddSNode : public LoadStoreNode {
1010 public:
1011 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1012 virtual int Opcode() const;
1013 };
1014
1015 //------------------------------GetAndAddINode---------------------------
1016 class GetAndAddINode : public LoadStoreNode {
1017 public:
1018 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1019 virtual int Opcode() const;
1020 };
1021
1022 //------------------------------GetAndAddLNode---------------------------
1023 class GetAndAddLNode : public LoadStoreNode {
1024 public:
1025 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1026 virtual int Opcode() const;
1027 };
1028
1029 //------------------------------GetAndSetBNode---------------------------
1030 class GetAndSetBNode : public LoadStoreNode {
1031 public:
1032 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1033 virtual int Opcode() const;
1034 };
1035
1036 //------------------------------GetAndSetSNode---------------------------
1037 class GetAndSetSNode : public LoadStoreNode {
1038 public:
1039 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1040 virtual int Opcode() const;
1041 };
1042
1043 //------------------------------GetAndSetINode---------------------------
1044 class GetAndSetINode : public LoadStoreNode {
1045 public:
1046 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1047 virtual int Opcode() const;
1048 };
1049
1050 //------------------------------GetAndSetLNode---------------------------
1051 class GetAndSetLNode : public LoadStoreNode {
1052 public:
1053 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1054 virtual int Opcode() const;
1055 };
1056
1057 //------------------------------GetAndSetPNode---------------------------
1058 class GetAndSetPNode : public LoadStoreNode {
1059 public:
1060 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1061 virtual int Opcode() const;
1062 };
1063
1064 //------------------------------GetAndSetNNode---------------------------
1065 class GetAndSetNNode : public LoadStoreNode {
1066 public:
1067 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1068 virtual int Opcode() const;
1069 };
1070
1071 //------------------------------ClearArray-------------------------------------
1072 class ClearArrayNode: public Node {
1073 private:
1074 bool _is_large;
1075 public:
1076 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1077 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1078 init_class_id(Class_ClearArray);
1079 }
1080 virtual int Opcode() const;
1081 virtual const Type *bottom_type() const { return Type::MEMORY; }
1082 // ClearArray modifies array elements, and so affects only the
1083 // array memory addressed by the bottom_type of its base address.
1084 virtual const class TypePtr *adr_type() const;
1085 virtual Node* Identity(PhaseGVN* phase);
1086 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1087 virtual uint match_edge(uint idx) const;
1088 bool is_large() const { return _is_large; }
1089 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1090 virtual uint hash() const { return Node::hash() + _is_large; }
1091 virtual bool cmp(const Node& n) const {
1092 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1093 }
1094
1095 // Clear the given area of an object or array.
1096 // The start offset must always be aligned mod BytesPerInt.
1097 // The end offset must always be aligned mod BytesPerLong.
1098 // Return the new memory.
1099 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1100 intptr_t start_offset,
1101 intptr_t end_offset,
1102 PhaseGVN* phase);
1103 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1104 intptr_t start_offset,
1105 Node* end_offset,
1106 PhaseGVN* phase);
1107 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1108 Node* start_offset,
1109 Node* end_offset,
1110 PhaseGVN* phase);
1111 // Return allocation input memory edge if it is different instance
1112 // or itself if it is the one we are looking for.
1113 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1114 };
1115
1116 //------------------------------MemBar-----------------------------------------
1117 // There are different flavors of Memory Barriers to match the Java Memory
1118 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1119 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1120 // volatile-load. Monitor-exit and volatile-store act as Release: no
1121 // preceding ref can be moved to after them. We insert a MemBar-Release
1122 // before a FastUnlock or volatile-store. All volatiles need to be
1123 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1124 // separate it from any following volatile-load.
1125 class MemBarNode: public MultiNode {
1126 virtual uint hash() const ; // { return NO_HASH; }
1127 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1128
1129 virtual uint size_of() const { return sizeof(*this); }
1130 // Memory type this node is serializing. Usually either rawptr or bottom.
1131 const TypePtr* _adr_type;
1132
1133 // How is this membar related to a nearby memory access?
1134 enum {
1135 Standalone,
1136 TrailingLoad,
1137 TrailingStore,
1138 LeadingStore,
1139 TrailingLoadStore,
1140 LeadingLoadStore,
1141 TrailingExpandedArrayCopy
1142 } _kind;
1143
1144 #ifdef ASSERT
1145 uint _pair_idx;
1146 #endif
1147
1148 public:
1149 enum {
1150 Precedent = TypeFunc::Parms // optional edge to force precedence
1151 };
1152 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1153 virtual int Opcode() const = 0;
1154 virtual const class TypePtr *adr_type() const { return _adr_type; }
1155 virtual const Type* Value(PhaseGVN* phase) const;
1156 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1157 virtual uint match_edge(uint idx) const { return 0; }
1158 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1159 virtual Node *match( const ProjNode *proj, const Matcher *m );
1160 // Factory method. Builds a wide or narrow membar.
1161 // Optional 'precedent' becomes an extra edge if not null.
1162 static MemBarNode* make(Compile* C, int opcode,
1163 int alias_idx = Compile::AliasIdxBot,
1164 Node* precedent = nullptr);
1165
1166 MemBarNode* trailing_membar() const;
1167 MemBarNode* leading_membar() const;
1168
1169 void set_trailing_load() { _kind = TrailingLoad; }
1170 bool trailing_load() const { return _kind == TrailingLoad; }
1171 bool trailing_store() const { return _kind == TrailingStore; }
1172 bool leading_store() const { return _kind == LeadingStore; }
1173 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1174 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1175 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1176 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1177 bool standalone() const { return _kind == Standalone; }
1178 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1179 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
1180
1181 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
1182 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
1183
1184 void remove(PhaseIterGVN *igvn);
1185 };
1186
1187 // "Acquire" - no following ref can move before (but earlier refs can
1188 // follow, like an early Load stalled in cache). Requires multi-cpu
1189 // visibility. Inserted after a volatile load.
1190 class MemBarAcquireNode: public MemBarNode {
1191 public:
1192 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1193 : MemBarNode(C, alias_idx, precedent) {}
1194 virtual int Opcode() const;
1195 };
1196
1197 // "Acquire" - no following ref can move before (but earlier refs can
1198 // follow, like an early Load stalled in cache). Requires multi-cpu
1199 // visibility. Inserted independent of any load, as required
1200 // for intrinsic Unsafe.loadFence().
1201 class LoadFenceNode: public MemBarNode {
1202 public:
1203 LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1204 : MemBarNode(C, alias_idx, precedent) {}
1205 virtual int Opcode() const;
1206 };
1207
1208 // "Release" - no earlier ref can move after (but later refs can move
1209 // up, like a speculative pipelined cache-hitting Load). Requires
1210 // multi-cpu visibility. Inserted before a volatile store.
1211 class MemBarReleaseNode: public MemBarNode {
1212 public:
1213 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1214 : MemBarNode(C, alias_idx, precedent) {}
1215 virtual int Opcode() const;
1216 };
1217
1218 // "Release" - no earlier ref can move after (but later refs can move
1219 // up, like a speculative pipelined cache-hitting Load). Requires
1220 // multi-cpu visibility. Inserted independent of any store, as required
1221 // for intrinsic Unsafe.storeFence().
1222 class StoreFenceNode: public MemBarNode {
1223 public:
1224 StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1225 : MemBarNode(C, alias_idx, precedent) {}
1226 virtual int Opcode() const;
1227 };
1228
1229 // "Acquire" - no following ref can move before (but earlier refs can
1230 // follow, like an early Load stalled in cache). Requires multi-cpu
1231 // visibility. Inserted after a FastLock.
1232 class MemBarAcquireLockNode: public MemBarNode {
1233 public:
1234 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1235 : MemBarNode(C, alias_idx, precedent) {}
1236 virtual int Opcode() const;
1237 };
1238
1239 // "Release" - no earlier ref can move after (but later refs can move
1240 // up, like a speculative pipelined cache-hitting Load). Requires
1241 // multi-cpu visibility. Inserted before a FastUnLock.
1242 class MemBarReleaseLockNode: public MemBarNode {
1243 public:
1244 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1245 : MemBarNode(C, alias_idx, precedent) {}
1246 virtual int Opcode() const;
1247 };
1248
1249 class MemBarStoreStoreNode: public MemBarNode {
1250 public:
1251 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1252 : MemBarNode(C, alias_idx, precedent) {
1253 init_class_id(Class_MemBarStoreStore);
1254 }
1255 virtual int Opcode() const;
1256 };
1257
1258 class StoreStoreFenceNode: public MemBarNode {
1259 public:
1260 StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1261 : MemBarNode(C, alias_idx, precedent) {}
1262 virtual int Opcode() const;
1263 };
1264
1265 // Ordering between a volatile store and a following volatile load.
1266 // Requires multi-CPU visibility?
1267 class MemBarVolatileNode: public MemBarNode {
1268 public:
1269 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1270 : MemBarNode(C, alias_idx, precedent) {}
1271 virtual int Opcode() const;
1272 };
1273
1274 // Ordering within the same CPU. Used to order unsafe memory references
1275 // inside the compiler when we lack alias info. Not needed "outside" the
1276 // compiler because the CPU does all the ordering for us.
1277 class MemBarCPUOrderNode: public MemBarNode {
1278 public:
1279 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1280 : MemBarNode(C, alias_idx, precedent) {}
1281 virtual int Opcode() const;
1282 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1283 };
1284
1285 class OnSpinWaitNode: public MemBarNode {
1286 public:
1287 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1288 : MemBarNode(C, alias_idx, precedent) {}
1289 virtual int Opcode() const;
1290 };
1291
1292 // Isolation of object setup after an AllocateNode and before next safepoint.
1293 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1294 class InitializeNode: public MemBarNode {
1295 friend class AllocateNode;
1296
1297 enum {
1298 Incomplete = 0,
1299 Complete = 1,
1300 WithArraycopy = 2
1301 };
1302 int _is_complete;
1303
1304 bool _does_not_escape;
1305
1306 public:
1307 enum {
1308 Control = TypeFunc::Control,
1309 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
1310 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
1311 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
1312 };
1313
1314 InitializeNode(Compile* C, int adr_type, Node* rawoop);
1315 virtual int Opcode() const;
1316 virtual uint size_of() const { return sizeof(*this); }
1317 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1318 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
1319
1320 // Manage incoming memory edges via a MergeMem on in(Memory):
1321 Node* memory(uint alias_idx);
1322
1323 // The raw memory edge coming directly from the Allocation.
1324 // The contents of this memory are *always* all-zero-bits.
1325 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1326
1327 // Return the corresponding allocation for this initialization (or null if none).
1328 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1329 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1330 AllocateNode* allocation();
1331
1332 // Anything other than zeroing in this init?
1333 bool is_non_zero();
1334
1335 // An InitializeNode must completed before macro expansion is done.
1336 // Completion requires that the AllocateNode must be followed by
1337 // initialization of the new memory to zero, then to any initializers.
1338 bool is_complete() { return _is_complete != Incomplete; }
1339 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1340
1341 // Mark complete. (Must not yet be complete.)
1342 void set_complete(PhaseGVN* phase);
1343 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1344
1345 bool does_not_escape() { return _does_not_escape; }
1346 void set_does_not_escape() { _does_not_escape = true; }
1347
1348 #ifdef ASSERT
1349 // ensure all non-degenerate stores are ordered and non-overlapping
1350 bool stores_are_sane(PhaseValues* phase);
1351 #endif //ASSERT
1352
1353 // See if this store can be captured; return offset where it initializes.
1354 // Return 0 if the store cannot be moved (any sort of problem).
1355 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape);
1356
1357 // Capture another store; reformat it to write my internal raw memory.
1358 // Return the captured copy, else null if there is some sort of problem.
1359 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape);
1360
1361 // Find captured store which corresponds to the range [start..start+size).
1362 // Return my own memory projection (meaning the initial zero bits)
1363 // if there is no such store. Return null if there is a problem.
1364 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase);
1365
1366 // Called when the associated AllocateNode is expanded into CFG.
1367 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1368 intptr_t header_size, Node* size_in_bytes,
1369 PhaseIterGVN* phase);
1370
1371 // An Initialize node has multiple memory projections. Helper methods used when the node is removed.
1372 // For use at parse time
1373 void replace_mem_projs_by(Node* mem, Compile* C);
1374 // For use with IGVN
1375 void replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn);
1376
1377 // Does a NarrowMemProj with this adr_type and this node as input already exist?
1378 bool already_has_narrow_mem_proj_with_adr_type(const TypePtr* adr_type) const;
1379
1380 // Used during matching: find the MachProj memory projection if there's one. Expectation is that there should be at
1381 // most one.
1382 MachProjNode* mem_mach_proj() const;
1383
1384 private:
1385 void remove_extra_zeroes();
1386
1387 // Find out where a captured store should be placed (or already is placed).
1388 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1389 PhaseValues* phase);
1390
1391 static intptr_t get_store_offset(Node* st, PhaseValues* phase);
1392
1393 Node* make_raw_address(intptr_t offset, PhaseGVN* phase);
1394
1395 bool detect_init_independence(Node* value, PhaseGVN* phase);
1396
1397 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1398 PhaseGVN* phase);
1399
1400 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1401
1402 // Iterate with i over all NarrowMemProj uses calling callback
1403 template <class Callback, class Iterator> NarrowMemProjNode* apply_to_narrow_mem_projs_any_iterator(Iterator i, Callback callback) const {
1404 auto filter = [&](ProjNode* proj) {
1405 if (proj->is_NarrowMemProj() && callback(proj->as_NarrowMemProj()) == BREAK_AND_RETURN_CURRENT_PROJ) {
1406 return BREAK_AND_RETURN_CURRENT_PROJ;
1407 }
1408 return CONTINUE;
1409 };
1410 ProjNode* res = apply_to_projs_any_iterator(i, filter);
1411 if (res == nullptr) {
1412 return nullptr;
1413 }
1414 return res->as_NarrowMemProj();
1415 }
1416
1417 public:
1418
1419 // callback is allowed to add new uses that will then be iterated over
1420 template <class Callback> void for_each_narrow_mem_proj_with_new_uses(Callback callback) const {
1421 auto callback_always_continue = [&](NarrowMemProjNode* proj) {
1422 callback(proj);
1423 return MultiNode::CONTINUE;
1424 };
1425 DUIterator i = outs();
1426 apply_to_narrow_mem_projs_any_iterator(UsesIterator(i, this), callback_always_continue);
1427 }
1428 };
1429
1430 //------------------------------MergeMem---------------------------------------
1431 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1432 class MergeMemNode: public Node {
1433 virtual uint hash() const ; // { return NO_HASH; }
1434 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1435 friend class MergeMemStream;
1436 MergeMemNode(Node* def); // clients use MergeMemNode::make
1437
1438 public:
1439 // If the input is a whole memory state, clone it with all its slices intact.
1440 // Otherwise, make a new memory state with just that base memory input.
1441 // In either case, the result is a newly created MergeMem.
1442 static MergeMemNode* make(Node* base_memory);
1443
1444 virtual int Opcode() const;
1445 virtual Node* Identity(PhaseGVN* phase);
1446 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1447 virtual uint ideal_reg() const { return NotAMachineReg; }
1448 virtual uint match_edge(uint idx) const { return 0; }
1449 virtual const RegMask &out_RegMask() const;
1450 virtual const Type *bottom_type() const { return Type::MEMORY; }
1451 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1452 // sparse accessors
1453 // Fetch the previously stored "set_memory_at", or else the base memory.
1454 // (Caller should clone it if it is a phi-nest.)
1455 Node* memory_at(uint alias_idx) const;
1456 // set the memory, regardless of its previous value
1457 void set_memory_at(uint alias_idx, Node* n);
1458 // the "base" is the memory that provides the non-finite support
1459 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1460 // warning: setting the base can implicitly set any of the other slices too
1461 void set_base_memory(Node* def);
1462 // sentinel value which denotes a copy of the base memory:
1463 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1464 static Node* make_empty_memory(); // where the sentinel comes from
1465 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1466 // hook for the iterator, to perform any necessary setup
1467 void iteration_setup(const MergeMemNode* other = nullptr);
1468 // push sentinels until I am at least as long as the other (semantic no-op)
1469 void grow_to_match(const MergeMemNode* other);
1470 bool verify_sparse() const PRODUCT_RETURN0;
1471 #ifndef PRODUCT
1472 virtual void dump_spec(outputStream *st) const;
1473 #endif
1474 };
1475
1476 class MergeMemStream : public StackObj {
1477 private:
1478 MergeMemNode* _mm;
1479 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1480 Node* _mm_base; // loop-invariant base memory of _mm
1481 int _idx;
1482 int _cnt;
1483 Node* _mem;
1484 Node* _mem2;
1485 int _cnt2;
1486
1487 void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) {
1488 // subsume_node will break sparseness at times, whenever a memory slice
1489 // folds down to a copy of the base ("fat") memory. In such a case,
1490 // the raw edge will update to base, although it should be top.
1491 // This iterator will recognize either top or base_memory as an
1492 // "empty" slice. See is_empty, is_empty2, and next below.
1493 //
1494 // The sparseness property is repaired in MergeMemNode::Ideal.
1495 // As long as access to a MergeMem goes through this iterator
1496 // or the memory_at accessor, flaws in the sparseness will
1497 // never be observed.
1498 //
1499 // Also, iteration_setup repairs sparseness.
1500 assert(mm->verify_sparse(), "please, no dups of base");
1501 assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base");
1502
1503 _mm = mm;
1504 _mm_base = mm->base_memory();
1505 _mm2 = mm2;
1506 _cnt = mm->req();
1507 _idx = Compile::AliasIdxBot-1; // start at the base memory
1508 _mem = nullptr;
1509 _mem2 = nullptr;
1510 }
1511
1512 #ifdef ASSERT
1513 Node* check_memory() const {
1514 if (at_base_memory())
1515 return _mm->base_memory();
1516 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1517 return _mm->memory_at(_idx);
1518 else
1519 return _mm_base;
1520 }
1521 Node* check_memory2() const {
1522 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1523 }
1524 #endif
1525
1526 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1527 void assert_synch() const {
1528 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1529 "no side-effects except through the stream");
1530 }
1531
1532 public:
1533
1534 // expected usages:
1535 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1536 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1537
1538 // iterate over one merge
1539 MergeMemStream(MergeMemNode* mm) {
1540 mm->iteration_setup();
1541 init(mm);
1542 DEBUG_ONLY(_cnt2 = 999);
1543 }
1544 // iterate in parallel over two merges
1545 // only iterates through non-empty elements of mm2
1546 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1547 assert(mm2, "second argument must be a MergeMem also");
1548 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1549 mm->iteration_setup(mm2);
1550 init(mm, mm2);
1551 _cnt2 = mm2->req();
1552 }
1553 #ifdef ASSERT
1554 ~MergeMemStream() {
1555 assert_synch();
1556 }
1557 #endif
1558
1559 MergeMemNode* all_memory() const {
1560 return _mm;
1561 }
1562 Node* base_memory() const {
1563 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1564 return _mm_base;
1565 }
1566 const MergeMemNode* all_memory2() const {
1567 assert(_mm2 != nullptr, "");
1568 return _mm2;
1569 }
1570 bool at_base_memory() const {
1571 return _idx == Compile::AliasIdxBot;
1572 }
1573 int alias_idx() const {
1574 assert(_mem, "must call next 1st");
1575 return _idx;
1576 }
1577
1578 const TypePtr* adr_type() const {
1579 return Compile::current()->get_adr_type(alias_idx());
1580 }
1581
1582 const TypePtr* adr_type(Compile* C) const {
1583 return C->get_adr_type(alias_idx());
1584 }
1585 bool is_empty() const {
1586 assert(_mem, "must call next 1st");
1587 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1588 return _mem->is_top();
1589 }
1590 bool is_empty2() const {
1591 assert(_mem2, "must call next 1st");
1592 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1593 return _mem2->is_top();
1594 }
1595 Node* memory() const {
1596 assert(!is_empty(), "must not be empty");
1597 assert_synch();
1598 return _mem;
1599 }
1600 // get the current memory, regardless of empty or non-empty status
1601 Node* force_memory() const {
1602 assert(!is_empty() || !at_base_memory(), "");
1603 // Use _mm_base to defend against updates to _mem->base_memory().
1604 Node *mem = _mem->is_top() ? _mm_base : _mem;
1605 assert(mem == check_memory(), "");
1606 return mem;
1607 }
1608 Node* memory2() const {
1609 assert(_mem2 == check_memory2(), "");
1610 return _mem2;
1611 }
1612 void set_memory(Node* mem) {
1613 if (at_base_memory()) {
1614 // Note that this does not change the invariant _mm_base.
1615 _mm->set_base_memory(mem);
1616 } else {
1617 _mm->set_memory_at(_idx, mem);
1618 }
1619 _mem = mem;
1620 assert_synch();
1621 }
1622
1623 // Recover from a side effect to the MergeMemNode.
1624 void set_memory() {
1625 _mem = _mm->in(_idx);
1626 }
1627
1628 bool next() { return next(false); }
1629 bool next2() { return next(true); }
1630
1631 bool next_non_empty() { return next_non_empty(false); }
1632 bool next_non_empty2() { return next_non_empty(true); }
1633 // next_non_empty2 can yield states where is_empty() is true
1634
1635 private:
1636 // find the next item, which might be empty
1637 bool next(bool have_mm2) {
1638 assert((_mm2 != nullptr) == have_mm2, "use other next");
1639 assert_synch();
1640 if (++_idx < _cnt) {
1641 // Note: This iterator allows _mm to be non-sparse.
1642 // It behaves the same whether _mem is top or base_memory.
1643 _mem = _mm->in(_idx);
1644 if (have_mm2)
1645 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1646 return true;
1647 }
1648 return false;
1649 }
1650
1651 // find the next non-empty item
1652 bool next_non_empty(bool have_mm2) {
1653 while (next(have_mm2)) {
1654 if (!is_empty()) {
1655 // make sure _mem2 is filled in sensibly
1656 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1657 return true;
1658 } else if (have_mm2 && !is_empty2()) {
1659 return true; // is_empty() == true
1660 }
1661 }
1662 return false;
1663 }
1664 };
1665
1666 // cachewb node for guaranteeing writeback of the cache line at a
1667 // given address to (non-volatile) RAM
1668 class CacheWBNode : public Node {
1669 public:
1670 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {}
1671 virtual int Opcode() const;
1672 virtual uint ideal_reg() const { return NotAMachineReg; }
1673 virtual uint match_edge(uint idx) const { return (idx == 2); }
1674 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1675 virtual const Type *bottom_type() const { return Type::MEMORY; }
1676 };
1677
1678 // cachewb pre sync node for ensuring that writebacks are serialised
1679 // relative to preceding or following stores
1680 class CacheWBPreSyncNode : public Node {
1681 public:
1682 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1683 virtual int Opcode() const;
1684 virtual uint ideal_reg() const { return NotAMachineReg; }
1685 virtual uint match_edge(uint idx) const { return false; }
1686 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1687 virtual const Type *bottom_type() const { return Type::MEMORY; }
1688 };
1689
1690 // cachewb pre sync node for ensuring that writebacks are serialised
1691 // relative to preceding or following stores
1692 class CacheWBPostSyncNode : public Node {
1693 public:
1694 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1695 virtual int Opcode() const;
1696 virtual uint ideal_reg() const { return NotAMachineReg; }
1697 virtual uint match_edge(uint idx) const { return false; }
1698 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1699 virtual const Type *bottom_type() const { return Type::MEMORY; }
1700 };
1701
1702 //------------------------------Prefetch---------------------------------------
1703
1704 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1705 class PrefetchAllocationNode : public Node {
1706 public:
1707 PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {}
1708 virtual int Opcode() const;
1709 virtual uint ideal_reg() const { return NotAMachineReg; }
1710 virtual uint match_edge(uint idx) const { return idx==2; }
1711 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1712 };
1713
1714 #endif // SHARE_OPTO_MEMNODE_HPP