1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_OPTO_MEMNODE_HPP
27 #define SHARE_OPTO_MEMNODE_HPP
28
29 #include "opto/multnode.hpp"
30 #include "opto/node.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/type.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 class MultiNode;
37 class PhaseCCP;
38 class PhaseTransform;
39
40 //------------------------------MemNode----------------------------------------
41 // Load or Store, possibly throwing a null pointer exception
42 class MemNode : public Node {
43 private:
44 bool _unaligned_access; // Unaligned access from unsafe
45 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
46 bool _unsafe_access; // Access of unsafe origin.
47 uint8_t _barrier_data; // Bit field with barrier information
48
49 protected:
50 #ifdef ASSERT
51 const TypePtr* _adr_type; // What kind of memory is being addressed?
52 #endif
53 virtual uint size_of() const;
54 public:
55 enum { Control, // When is it safe to do this load?
56 Memory, // Chunk of memory is being loaded from
57 Address, // Actually address, derived from base
58 ValueIn // Value to store
59 };
60 typedef enum { unordered = 0,
61 acquire, // Load has to acquire or be succeeded by MemBarAcquire.
62 release, // Store has to release or be preceded by MemBarRelease.
63 seqcst, // LoadStore has to have both acquire and release semantics.
64 unset // The memory ordering is not set (used for testing)
65 } MemOrd;
66 protected:
67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) :
68 Node(c0,c1,c2),
69 _unaligned_access(false),
70 _mismatched_access(false),
71 _unsafe_access(false),
72 _barrier_data(0) {
73 init_class_id(Class_Mem);
74 DEBUG_ONLY(_adr_type=at; adr_type();)
75 }
76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
77 Node(c0,c1,c2,c3),
78 _unaligned_access(false),
79 _mismatched_access(false),
80 _unsafe_access(false),
81 _barrier_data(0) {
82 init_class_id(Class_Mem);
83 DEBUG_ONLY(_adr_type=at; adr_type();)
84 }
85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
86 Node(c0,c1,c2,c3,c4),
87 _unaligned_access(false),
88 _mismatched_access(false),
89 _unsafe_access(false),
90 _barrier_data(0) {
91 init_class_id(Class_Mem);
92 DEBUG_ONLY(_adr_type=at; adr_type();)
93 }
94
95 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
96 ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const;
97 static bool check_if_adr_maybe_raw(Node* adr);
98
99 public:
100 // Helpers for the optimizer. Documented in memnode.cpp.
101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
102 Node* p2, AllocateNode* a2,
103 PhaseTransform* phase);
104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
105
106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
108 // The following two should probably be phase-specific functions:
109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 #ifdef ASSERT
130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
131 #endif
132
133 // Return the barrier data of n, if available, or 0 otherwise.
134 static uint8_t barrier_data(const Node* n);
135
136 // Map a load or store opcode to its corresponding store opcode.
137 // (Return -1 if unknown.)
138 virtual int store_Opcode() const { return -1; }
139
140 // What is the type of the value in memory? (T_VOID mean "unspecified".)
141 // The returned type is a property of the value that is loaded/stored and
142 // not the memory that is accessed. For mismatched memory accesses
143 // they might differ. For instance, a value of type 'short' may be stored
144 // into an array of elements of type 'long'.
145 virtual BasicType value_basic_type() const = 0;
146 virtual int memory_size() const {
147 #ifdef ASSERT
148 return type2aelembytes(value_basic_type(), true);
149 #else
150 return type2aelembytes(value_basic_type());
151 #endif
152 }
153
154 uint8_t barrier_data() { return _barrier_data; }
155 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
156
157 // Search through memory states which precede this node (load or store).
158 // Look for an exact match for the address, with no intervening
159 // aliased stores.
160 Node* find_previous_store(PhaseValues* phase);
161
162 // Can this node (load or store) accurately see a stored value in
163 // the given memory state? (The state may or may not be in(Memory).)
164 Node* can_see_stored_value(Node* st, PhaseValues* phase) const;
165
166 void set_unaligned_access() { _unaligned_access = true; }
167 bool is_unaligned_access() const { return _unaligned_access; }
168 void set_mismatched_access() { _mismatched_access = true; }
169 bool is_mismatched_access() const { return _mismatched_access; }
170 void set_unsafe_access() { _unsafe_access = true; }
171 bool is_unsafe_access() const { return _unsafe_access; }
172
173 #ifndef PRODUCT
174 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
175 virtual void dump_spec(outputStream *st) const;
176 #endif
177 };
178
179 //------------------------------LoadNode---------------------------------------
180 // Load value; requires Memory and Address
181 class LoadNode : public MemNode {
182 public:
183 // Some loads (from unsafe) should be pinned: they don't depend only
184 // on the dominating test. The field _control_dependency below records
185 // whether that node depends only on the dominating test.
186 // Pinned and UnknownControl are similar, but differ in that Pinned
187 // loads are not allowed to float across safepoints, whereas UnknownControl
188 // loads are allowed to do that. Therefore, Pinned is stricter.
189 enum ControlDependency {
190 Pinned,
191 UnknownControl,
192 DependsOnlyOnTest
193 };
194
195 private:
196 // LoadNode::hash() doesn't take the _control_dependency field
197 // into account: If the graph already has a non-pinned LoadNode and
198 // we add a pinned LoadNode with the same inputs, it's safe for GVN
199 // to replace the pinned LoadNode with the non-pinned LoadNode,
200 // otherwise it wouldn't be safe to have a non pinned LoadNode with
201 // those inputs in the first place. If the graph already has a
202 // pinned LoadNode and we add a non pinned LoadNode with the same
203 // inputs, it's safe (but suboptimal) for GVN to replace the
204 // non-pinned LoadNode by the pinned LoadNode.
205 ControlDependency _control_dependency;
206
207 // On platforms with weak memory ordering (e.g., PPC) we distinguish
208 // loads that can be reordered, and such requiring acquire semantics to
209 // adhere to the Java specification. The required behaviour is stored in
210 // this field.
211 const MemOrd _mo;
212
213 AllocateNode* is_new_object_mark_load() const;
214
215 protected:
216 virtual bool cmp(const Node &n) const;
217 virtual uint size_of() const; // Size is bigger
218 // Should LoadNode::Ideal() attempt to remove control edges?
219 virtual bool can_remove_control() const;
220 const Type* const _type; // What kind of value is loaded?
221
222 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
223 public:
224
225 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
226 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
227 init_class_id(Class_Load);
228 }
229 inline bool is_unordered() const { return !is_acquire(); }
230 inline bool is_acquire() const {
231 assert(_mo == unordered || _mo == acquire, "unexpected");
232 return _mo == acquire;
233 }
234 inline bool is_unsigned() const {
235 int lop = Opcode();
236 return (lop == Op_LoadUB) || (lop == Op_LoadUS);
237 }
238
239 // Polymorphic factory method:
240 static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
241 const TypePtr* at, const Type* rt, BasicType bt,
242 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
243 bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
244 uint8_t barrier_data = 0);
245
246 virtual uint hash() const; // Check the type
247
248 // Handle algebraic identities here. If we have an identity, return the Node
249 // we are equivalent to. We look for Load of a Store.
250 virtual Node* Identity(PhaseGVN* phase);
251
252 // If the load is from Field memory and the pointer is non-null, it might be possible to
253 // zero out the control input.
254 // If the offset is constant and the base is an object allocation,
255 // try to hook me up to the exact initializing store.
256 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
257
258 // Return true if it's possible to split the Load through a Phi merging the bases
259 bool can_split_through_phi_base(PhaseGVN *phase);
260
261 // Split instance field load through Phi.
262 Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
263
264 // Recover original value from boxed values
265 Node *eliminate_autobox(PhaseIterGVN *igvn);
266
267 // Compute a new Type for this node. Basically we just do the pre-check,
268 // then call the virtual add() to set the type.
269 virtual const Type* Value(PhaseGVN* phase) const;
270
271 // Common methods for LoadKlass and LoadNKlass nodes.
272 const Type* klass_value_common(PhaseGVN* phase) const;
273 Node* klass_identity_common(PhaseGVN* phase);
274
275 virtual uint ideal_reg() const;
276 virtual const Type *bottom_type() const;
277 // Following method is copied from TypeNode:
278 void set_type(const Type* t) {
279 assert(t != nullptr, "sanity");
280 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
281 *(const Type**)&_type = t; // cast away const-ness
282 // If this node is in the hash table, make sure it doesn't need a rehash.
283 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
284 }
285 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
286
287 // Do not match memory edge
288 virtual uint match_edge(uint idx) const;
289
290 // Map a load opcode to its corresponding store opcode.
291 virtual int store_Opcode() const = 0;
292
293 // Check if the load's memory input is a Phi node with the same control.
294 bool is_instance_field_load_with_local_phi(Node* ctrl);
295
296 Node* convert_to_unsigned_load(PhaseGVN& gvn);
297 Node* convert_to_signed_load(PhaseGVN& gvn);
298
299 bool has_reinterpret_variant(const Type* rt);
300 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt);
301
302 ControlDependency control_dependency() const { return _control_dependency; }
303 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
304 bool has_pinned_control_dependency() const { return _control_dependency == Pinned; }
305
306 LoadNode* pin_array_access_node() const;
307
308 #ifndef PRODUCT
309 virtual void dump_spec(outputStream *st) const;
310 #endif
311 #ifdef ASSERT
312 // Helper function to allow a raw load without control edge for some cases
313 static bool is_immutable_value(Node* adr);
314 #endif
315 protected:
316 const Type* load_array_final_field(const TypeKlassPtr *tkls,
317 ciKlass* klass) const;
318
319 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
320
321 // depends_only_on_test is almost always true, and needs to be almost always
322 // true to enable key hoisting & commoning optimizations. However, for the
323 // special case of RawPtr loads from TLS top & end, and other loads performed by
324 // GC barriers, the control edge carries the dependence preventing hoisting past
325 // a Safepoint instead of the memory edge. (An unfortunate consequence of having
326 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
327 // which produce results (new raw memory state) inside of loops preventing all
328 // manner of other optimizations). Basically, it's ugly but so is the alternative.
329 // See comment in macro.cpp, around line 125 expand_allocate_common().
330 virtual bool depends_only_on_test() const {
331 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
332 }
333
334 LoadNode* clone_pinned() const;
335 };
336
337 //------------------------------LoadBNode--------------------------------------
338 // Load a byte (8bits signed) from memory
339 class LoadBNode : public LoadNode {
340 public:
341 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
342 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
343 virtual int Opcode() const;
344 virtual uint ideal_reg() const { return Op_RegI; }
345 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
346 virtual const Type* Value(PhaseGVN* phase) const;
347 virtual int store_Opcode() const { return Op_StoreB; }
348 virtual BasicType value_basic_type() const { return T_BYTE; }
349 };
350
351 //------------------------------LoadUBNode-------------------------------------
352 // Load a unsigned byte (8bits unsigned) from memory
353 class LoadUBNode : public LoadNode {
354 public:
355 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
356 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
357 virtual int Opcode() const;
358 virtual uint ideal_reg() const { return Op_RegI; }
359 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
360 virtual const Type* Value(PhaseGVN* phase) const;
361 virtual int store_Opcode() const { return Op_StoreB; }
362 virtual BasicType value_basic_type() const { return T_BYTE; }
363 };
364
365 //------------------------------LoadUSNode-------------------------------------
366 // Load an unsigned short/char (16bits unsigned) from memory
367 class LoadUSNode : public LoadNode {
368 public:
369 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
370 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
371 virtual int Opcode() const;
372 virtual uint ideal_reg() const { return Op_RegI; }
373 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
374 virtual const Type* Value(PhaseGVN* phase) const;
375 virtual int store_Opcode() const { return Op_StoreC; }
376 virtual BasicType value_basic_type() const { return T_CHAR; }
377 };
378
379 //------------------------------LoadSNode--------------------------------------
380 // Load a short (16bits signed) from memory
381 class LoadSNode : public LoadNode {
382 public:
383 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
384 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
385 virtual int Opcode() const;
386 virtual uint ideal_reg() const { return Op_RegI; }
387 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
388 virtual const Type* Value(PhaseGVN* phase) const;
389 virtual int store_Opcode() const { return Op_StoreC; }
390 virtual BasicType value_basic_type() const { return T_SHORT; }
391 };
392
393 //------------------------------LoadINode--------------------------------------
394 // Load an integer from memory
395 class LoadINode : public LoadNode {
396 public:
397 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
398 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
399 virtual int Opcode() const;
400 virtual uint ideal_reg() const { return Op_RegI; }
401 virtual int store_Opcode() const { return Op_StoreI; }
402 virtual BasicType value_basic_type() const { return T_INT; }
403 };
404
405 //------------------------------LoadRangeNode----------------------------------
406 // Load an array length from the array
407 class LoadRangeNode : public LoadINode {
408 public:
409 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
410 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
411 virtual int Opcode() const;
412 virtual const Type* Value(PhaseGVN* phase) const;
413 virtual Node* Identity(PhaseGVN* phase);
414 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
415 };
416
417 //------------------------------LoadLNode--------------------------------------
418 // Load a long from memory
419 class LoadLNode : public LoadNode {
420 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
421 virtual bool cmp( const Node &n ) const {
422 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
423 && LoadNode::cmp(n);
424 }
425 virtual uint size_of() const { return sizeof(*this); }
426 const bool _require_atomic_access; // is piecewise load forbidden?
427
428 public:
429 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
430 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
431 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
432 virtual int Opcode() const;
433 virtual uint ideal_reg() const { return Op_RegL; }
434 virtual int store_Opcode() const { return Op_StoreL; }
435 virtual BasicType value_basic_type() const { return T_LONG; }
436 bool require_atomic_access() const { return _require_atomic_access; }
437
438 #ifndef PRODUCT
439 virtual void dump_spec(outputStream *st) const {
440 LoadNode::dump_spec(st);
441 if (_require_atomic_access) st->print(" Atomic!");
442 }
443 #endif
444 };
445
446 //------------------------------LoadL_unalignedNode----------------------------
447 // Load a long from unaligned memory
448 class LoadL_unalignedNode : public LoadLNode {
449 public:
450 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
451 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
452 virtual int Opcode() const;
453 };
454
455 //------------------------------LoadFNode--------------------------------------
456 // Load a float (64 bits) from memory
457 class LoadFNode : public LoadNode {
458 public:
459 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
460 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
461 virtual int Opcode() const;
462 virtual uint ideal_reg() const { return Op_RegF; }
463 virtual int store_Opcode() const { return Op_StoreF; }
464 virtual BasicType value_basic_type() const { return T_FLOAT; }
465 };
466
467 //------------------------------LoadDNode--------------------------------------
468 // Load a double (64 bits) from memory
469 class LoadDNode : public LoadNode {
470 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
471 virtual bool cmp( const Node &n ) const {
472 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
473 && LoadNode::cmp(n);
474 }
475 virtual uint size_of() const { return sizeof(*this); }
476 const bool _require_atomic_access; // is piecewise load forbidden?
477
478 public:
479 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
480 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
481 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
482 virtual int Opcode() const;
483 virtual uint ideal_reg() const { return Op_RegD; }
484 virtual int store_Opcode() const { return Op_StoreD; }
485 virtual BasicType value_basic_type() const { return T_DOUBLE; }
486 bool require_atomic_access() const { return _require_atomic_access; }
487
488 #ifndef PRODUCT
489 virtual void dump_spec(outputStream *st) const {
490 LoadNode::dump_spec(st);
491 if (_require_atomic_access) st->print(" Atomic!");
492 }
493 #endif
494 };
495
496 //------------------------------LoadD_unalignedNode----------------------------
497 // Load a double from unaligned memory
498 class LoadD_unalignedNode : public LoadDNode {
499 public:
500 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
501 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
502 virtual int Opcode() const;
503 };
504
505 //------------------------------LoadPNode--------------------------------------
506 // Load a pointer from memory (either object or array)
507 class LoadPNode : public LoadNode {
508 public:
509 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
510 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
511 virtual int Opcode() const;
512 virtual uint ideal_reg() const { return Op_RegP; }
513 virtual int store_Opcode() const { return Op_StoreP; }
514 virtual BasicType value_basic_type() const { return T_ADDRESS; }
515 };
516
517
518 //------------------------------LoadNNode--------------------------------------
519 // Load a narrow oop from memory (either object or array)
520 class LoadNNode : public LoadNode {
521 public:
522 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
523 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
524 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
525 virtual int Opcode() const;
526 virtual uint ideal_reg() const { return Op_RegN; }
527 virtual int store_Opcode() const { return Op_StoreN; }
528 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
529 };
530
531 //------------------------------LoadKlassNode----------------------------------
532 // Load a Klass from an object
533 class LoadKlassNode : public LoadPNode {
534 private:
535 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
536 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
537
538 public:
539 virtual int Opcode() const;
540 virtual const Type* Value(PhaseGVN* phase) const;
541 virtual Node* Identity(PhaseGVN* phase);
542 virtual bool depends_only_on_test() const { return true; }
543
544 // Polymorphic factory method:
545 static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
546 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
547 };
548
549 //------------------------------LoadNKlassNode---------------------------------
550 // Load a narrow Klass from an object.
551 // With compact headers, the input address (adr) does not point at the exact
552 // header position where the (narrow) class pointer is located, but into the
553 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
554 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
555 // extract the actual class pointer. C2's type system is agnostic on whether the
556 // input address directly points into the class pointer.
557 class LoadNKlassNode : public LoadNNode {
558 private:
559 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
560 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
561 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
562
563 public:
564 virtual int Opcode() const;
565 virtual uint ideal_reg() const { return Op_RegN; }
566 virtual int store_Opcode() const { return Op_StoreNKlass; }
567 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
568
569 virtual const Type* Value(PhaseGVN* phase) const;
570 virtual Node* Identity(PhaseGVN* phase);
571 virtual bool depends_only_on_test() const { return true; }
572 };
573
574 //------------------------------StoreNode--------------------------------------
575 // Store value; requires Store, Address and Value
576 class StoreNode : public MemNode {
577 private:
578 // On platforms with weak memory ordering (e.g., PPC) we distinguish
579 // stores that can be reordered, and such requiring release semantics to
580 // adhere to the Java specification. The required behaviour is stored in
581 // this field.
582 const MemOrd _mo;
583 // Needed for proper cloning.
584 virtual uint size_of() const { return sizeof(*this); }
585 protected:
586 virtual bool cmp( const Node &n ) const;
587 virtual bool depends_only_on_test() const { return false; }
588
589 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
590 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
591
592 public:
593 // We must ensure that stores of object references will be visible
594 // only after the object's initialization. So the callers of this
595 // procedure must indicate that the store requires `release'
596 // semantics, if the stored value is an object reference that might
597 // point to a new object and may become externally visible.
598 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
599 : MemNode(c, mem, adr, at, val), _mo(mo) {
600 init_class_id(Class_Store);
601 }
602 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
603 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
604 init_class_id(Class_Store);
605 }
606
607 inline bool is_unordered() const { return !is_release(); }
608 inline bool is_release() const {
609 assert((_mo == unordered || _mo == release), "unexpected");
610 return _mo == release;
611 }
612
613 // Conservatively release stores of object references in order to
614 // ensure visibility of object initialization.
615 static inline MemOrd release_if_reference(const BasicType t) {
616 #ifdef AARCH64
617 // AArch64 doesn't need a release store here because object
618 // initialization contains the necessary barriers.
619 return unordered;
620 #else
621 const MemOrd mo = (t == T_ARRAY ||
622 t == T_ADDRESS || // Might be the address of an object reference (`boxing').
623 t == T_OBJECT) ? release : unordered;
624 return mo;
625 #endif
626 }
627
628 // Polymorphic factory method
629 //
630 // We must ensure that stores of object references will be visible
631 // only after the object's initialization. So the callers of this
632 // procedure must indicate that the store requires `release'
633 // semantics, if the stored value is an object reference that might
634 // point to a new object and may become externally visible.
635 static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
636 const TypePtr* at, Node* val, BasicType bt,
637 MemOrd mo, bool require_atomic_access = false);
638
639 virtual uint hash() const; // Check the type
640
641 // If the store is to Field memory and the pointer is non-null, we can
642 // zero out the control input.
643 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
644
645 // Compute a new Type for this node. Basically we just do the pre-check,
646 // then call the virtual add() to set the type.
647 virtual const Type* Value(PhaseGVN* phase) const;
648
649 // Check for identity function on memory (Load then Store at same address)
650 virtual Node* Identity(PhaseGVN* phase);
651
652 // Do not match memory edge
653 virtual uint match_edge(uint idx) const;
654
655 virtual const Type *bottom_type() const; // returns Type::MEMORY
656
657 // Map a store opcode to its corresponding own opcode, trivially.
658 virtual int store_Opcode() const { return Opcode(); }
659
660 // have all possible loads of the value stored been optimized away?
661 bool value_never_loaded(PhaseValues* phase) const;
662
663 bool has_reinterpret_variant(const Type* vt);
664 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt);
665
666 MemBarNode* trailing_membar() const;
667 };
668
669 //------------------------------StoreBNode-------------------------------------
670 // Store byte to memory
671 class StoreBNode : public StoreNode {
672 public:
673 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
674 : StoreNode(c, mem, adr, at, val, mo) {}
675 virtual int Opcode() const;
676 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
677 virtual BasicType value_basic_type() const { return T_BYTE; }
678 };
679
680 //------------------------------StoreCNode-------------------------------------
681 // Store char/short to memory
682 class StoreCNode : public StoreNode {
683 public:
684 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
685 : StoreNode(c, mem, adr, at, val, mo) {}
686 virtual int Opcode() const;
687 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
688 virtual BasicType value_basic_type() const { return T_CHAR; }
689 };
690
691 //------------------------------StoreINode-------------------------------------
692 // Store int to memory
693 class StoreINode : public StoreNode {
694 public:
695 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
696 : StoreNode(c, mem, adr, at, val, mo) {}
697 virtual int Opcode() const;
698 virtual BasicType value_basic_type() const { return T_INT; }
699 };
700
701 //------------------------------StoreLNode-------------------------------------
702 // Store long to memory
703 class StoreLNode : public StoreNode {
704 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
705 virtual bool cmp( const Node &n ) const {
706 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
707 && StoreNode::cmp(n);
708 }
709 virtual uint size_of() const { return sizeof(*this); }
710 const bool _require_atomic_access; // is piecewise store forbidden?
711
712 public:
713 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
714 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
715 virtual int Opcode() const;
716 virtual BasicType value_basic_type() const { return T_LONG; }
717 bool require_atomic_access() const { return _require_atomic_access; }
718
719 #ifndef PRODUCT
720 virtual void dump_spec(outputStream *st) const {
721 StoreNode::dump_spec(st);
722 if (_require_atomic_access) st->print(" Atomic!");
723 }
724 #endif
725 };
726
727 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
728 class StoreLSpecialNode : public StoreNode {
729
730 public:
731 StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
732 : StoreNode(c, mem, adr, at, val, mo) {
733 set_mismatched_access();
734 if (oop_off != nullptr) {
735 add_req(oop_off);
736 }
737 }
738 virtual int Opcode() const;
739 virtual BasicType value_basic_type() const { return T_LONG; }
740
741 virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
742 idx == MemNode::ValueIn ||
743 idx == MemNode::ValueIn + 1; }
744 };
745
746 //------------------------------StoreFNode-------------------------------------
747 // Store float to memory
748 class StoreFNode : public StoreNode {
749 public:
750 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
751 : StoreNode(c, mem, adr, at, val, mo) {}
752 virtual int Opcode() const;
753 virtual BasicType value_basic_type() const { return T_FLOAT; }
754 };
755
756 //------------------------------StoreDNode-------------------------------------
757 // Store double to memory
758 class StoreDNode : public StoreNode {
759 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
760 virtual bool cmp( const Node &n ) const {
761 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
762 && StoreNode::cmp(n);
763 }
764 virtual uint size_of() const { return sizeof(*this); }
765 const bool _require_atomic_access; // is piecewise store forbidden?
766 public:
767 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
768 MemOrd mo, bool require_atomic_access = false)
769 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
770 virtual int Opcode() const;
771 virtual BasicType value_basic_type() const { return T_DOUBLE; }
772 bool require_atomic_access() const { return _require_atomic_access; }
773
774 #ifndef PRODUCT
775 virtual void dump_spec(outputStream *st) const {
776 StoreNode::dump_spec(st);
777 if (_require_atomic_access) st->print(" Atomic!");
778 }
779 #endif
780
781 };
782
783 //------------------------------StorePNode-------------------------------------
784 // Store pointer to memory
785 class StorePNode : public StoreNode {
786 public:
787 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
788 : StoreNode(c, mem, adr, at, val, mo) {}
789 virtual int Opcode() const;
790 virtual BasicType value_basic_type() const { return T_ADDRESS; }
791 };
792
793 //------------------------------StoreNNode-------------------------------------
794 // Store narrow oop to memory
795 class StoreNNode : public StoreNode {
796 public:
797 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
798 : StoreNode(c, mem, adr, at, val, mo) {}
799 virtual int Opcode() const;
800 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
801 };
802
803 //------------------------------StoreNKlassNode--------------------------------------
804 // Store narrow klass to memory
805 class StoreNKlassNode : public StoreNNode {
806 public:
807 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
808 : StoreNNode(c, mem, adr, at, val, mo) {}
809 virtual int Opcode() const;
810 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
811 };
812
813 //------------------------------SCMemProjNode---------------------------------------
814 // This class defines a projection of the memory state of a store conditional node.
815 // These nodes return a value, but also update memory.
816 class SCMemProjNode : public ProjNode {
817 public:
818 enum {SCMEMPROJCON = (uint)-2};
819 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
820 virtual int Opcode() const;
821 virtual bool is_CFG() const { return false; }
822 virtual const Type *bottom_type() const {return Type::MEMORY;}
823 virtual const TypePtr *adr_type() const {
824 Node* ctrl = in(0);
825 if (ctrl == nullptr) return nullptr; // node is dead
826 return ctrl->in(MemNode::Memory)->adr_type();
827 }
828 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
829 virtual const Type* Value(PhaseGVN* phase) const;
830 #ifndef PRODUCT
831 virtual void dump_spec(outputStream *st) const {};
832 #endif
833 };
834
835 //------------------------------LoadStoreNode---------------------------
836 // Note: is_Mem() method returns 'true' for this class.
837 class LoadStoreNode : public Node {
838 private:
839 const Type* const _type; // What kind of value is loaded?
840 const TypePtr* _adr_type; // What kind of memory is being addressed?
841 uint8_t _barrier_data; // Bit field with barrier information
842 virtual uint size_of() const; // Size is bigger
843 public:
844 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
845 virtual bool depends_only_on_test() const { return false; }
846 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
847
848 virtual const Type *bottom_type() const { return _type; }
849 virtual uint ideal_reg() const;
850 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
851 virtual const Type* Value(PhaseGVN* phase) const;
852
853 bool result_not_used() const;
854 MemBarNode* trailing_membar() const;
855
856 uint8_t barrier_data() { return _barrier_data; }
857 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
858 };
859
860 class LoadStoreConditionalNode : public LoadStoreNode {
861 public:
862 enum {
863 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
864 };
865 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
866 virtual const Type* Value(PhaseGVN* phase) const;
867 };
868
869 class CompareAndSwapNode : public LoadStoreConditionalNode {
870 private:
871 const MemNode::MemOrd _mem_ord;
872 public:
873 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
874 MemNode::MemOrd order() const {
875 return _mem_ord;
876 }
877 virtual uint size_of() const { return sizeof(*this); }
878 };
879
880 class CompareAndExchangeNode : public LoadStoreNode {
881 private:
882 const MemNode::MemOrd _mem_ord;
883 public:
884 enum {
885 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
886 };
887 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
888 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
889 init_req(ExpectedIn, ex );
890 }
891
892 MemNode::MemOrd order() const {
893 return _mem_ord;
894 }
895 virtual uint size_of() const { return sizeof(*this); }
896 };
897
898 //------------------------------CompareAndSwapBNode---------------------------
899 class CompareAndSwapBNode : public CompareAndSwapNode {
900 public:
901 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
902 virtual int Opcode() const;
903 };
904
905 //------------------------------CompareAndSwapSNode---------------------------
906 class CompareAndSwapSNode : public CompareAndSwapNode {
907 public:
908 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
909 virtual int Opcode() const;
910 };
911
912 //------------------------------CompareAndSwapINode---------------------------
913 class CompareAndSwapINode : public CompareAndSwapNode {
914 public:
915 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
916 virtual int Opcode() const;
917 };
918
919 //------------------------------CompareAndSwapLNode---------------------------
920 class CompareAndSwapLNode : public CompareAndSwapNode {
921 public:
922 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
923 virtual int Opcode() const;
924 };
925
926 //------------------------------CompareAndSwapPNode---------------------------
927 class CompareAndSwapPNode : public CompareAndSwapNode {
928 public:
929 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
930 virtual int Opcode() const;
931 };
932
933 //------------------------------CompareAndSwapNNode---------------------------
934 class CompareAndSwapNNode : public CompareAndSwapNode {
935 public:
936 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
937 virtual int Opcode() const;
938 };
939
940 //------------------------------WeakCompareAndSwapBNode---------------------------
941 class WeakCompareAndSwapBNode : public CompareAndSwapNode {
942 public:
943 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
944 virtual int Opcode() const;
945 };
946
947 //------------------------------WeakCompareAndSwapSNode---------------------------
948 class WeakCompareAndSwapSNode : public CompareAndSwapNode {
949 public:
950 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
951 virtual int Opcode() const;
952 };
953
954 //------------------------------WeakCompareAndSwapINode---------------------------
955 class WeakCompareAndSwapINode : public CompareAndSwapNode {
956 public:
957 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
958 virtual int Opcode() const;
959 };
960
961 //------------------------------WeakCompareAndSwapLNode---------------------------
962 class WeakCompareAndSwapLNode : public CompareAndSwapNode {
963 public:
964 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
965 virtual int Opcode() const;
966 };
967
968 //------------------------------WeakCompareAndSwapPNode---------------------------
969 class WeakCompareAndSwapPNode : public CompareAndSwapNode {
970 public:
971 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
972 virtual int Opcode() const;
973 };
974
975 //------------------------------WeakCompareAndSwapNNode---------------------------
976 class WeakCompareAndSwapNNode : public CompareAndSwapNode {
977 public:
978 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
979 virtual int Opcode() const;
980 };
981
982 //------------------------------CompareAndExchangeBNode---------------------------
983 class CompareAndExchangeBNode : public CompareAndExchangeNode {
984 public:
985 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { }
986 virtual int Opcode() const;
987 };
988
989
990 //------------------------------CompareAndExchangeSNode---------------------------
991 class CompareAndExchangeSNode : public CompareAndExchangeNode {
992 public:
993 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { }
994 virtual int Opcode() const;
995 };
996
997 //------------------------------CompareAndExchangeLNode---------------------------
998 class CompareAndExchangeLNode : public CompareAndExchangeNode {
999 public:
1000 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
1001 virtual int Opcode() const;
1002 };
1003
1004
1005 //------------------------------CompareAndExchangeINode---------------------------
1006 class CompareAndExchangeINode : public CompareAndExchangeNode {
1007 public:
1008 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
1009 virtual int Opcode() const;
1010 };
1011
1012
1013 //------------------------------CompareAndExchangePNode---------------------------
1014 class CompareAndExchangePNode : public CompareAndExchangeNode {
1015 public:
1016 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1017 virtual int Opcode() const;
1018 };
1019
1020 //------------------------------CompareAndExchangeNNode---------------------------
1021 class CompareAndExchangeNNode : public CompareAndExchangeNode {
1022 public:
1023 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1024 virtual int Opcode() const;
1025 };
1026
1027 //------------------------------GetAndAddBNode---------------------------
1028 class GetAndAddBNode : public LoadStoreNode {
1029 public:
1030 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1031 virtual int Opcode() const;
1032 };
1033
1034 //------------------------------GetAndAddSNode---------------------------
1035 class GetAndAddSNode : public LoadStoreNode {
1036 public:
1037 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1038 virtual int Opcode() const;
1039 };
1040
1041 //------------------------------GetAndAddINode---------------------------
1042 class GetAndAddINode : public LoadStoreNode {
1043 public:
1044 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1045 virtual int Opcode() const;
1046 };
1047
1048 //------------------------------GetAndAddLNode---------------------------
1049 class GetAndAddLNode : public LoadStoreNode {
1050 public:
1051 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1052 virtual int Opcode() const;
1053 };
1054
1055 //------------------------------GetAndSetBNode---------------------------
1056 class GetAndSetBNode : public LoadStoreNode {
1057 public:
1058 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1059 virtual int Opcode() const;
1060 };
1061
1062 //------------------------------GetAndSetSNode---------------------------
1063 class GetAndSetSNode : public LoadStoreNode {
1064 public:
1065 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1066 virtual int Opcode() const;
1067 };
1068
1069 //------------------------------GetAndSetINode---------------------------
1070 class GetAndSetINode : public LoadStoreNode {
1071 public:
1072 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1073 virtual int Opcode() const;
1074 };
1075
1076 //------------------------------GetAndSetLNode---------------------------
1077 class GetAndSetLNode : public LoadStoreNode {
1078 public:
1079 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1080 virtual int Opcode() const;
1081 };
1082
1083 //------------------------------GetAndSetPNode---------------------------
1084 class GetAndSetPNode : public LoadStoreNode {
1085 public:
1086 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1087 virtual int Opcode() const;
1088 };
1089
1090 //------------------------------GetAndSetNNode---------------------------
1091 class GetAndSetNNode : public LoadStoreNode {
1092 public:
1093 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1094 virtual int Opcode() const;
1095 };
1096
1097 //------------------------------ClearArray-------------------------------------
1098 class ClearArrayNode: public Node {
1099 private:
1100 bool _is_large;
1101 bool _word_copy_only;
1102 public:
1103 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1104 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1105 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1106 init_class_id(Class_ClearArray);
1107 }
1108 virtual int Opcode() const;
1109 virtual const Type *bottom_type() const { return Type::MEMORY; }
1110 // ClearArray modifies array elements, and so affects only the
1111 // array memory addressed by the bottom_type of its base address.
1112 virtual const class TypePtr *adr_type() const;
1113 virtual Node* Identity(PhaseGVN* phase);
1114 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1115 virtual uint match_edge(uint idx) const;
1116 bool is_large() const { return _is_large; }
1117 bool word_copy_only() const { return _word_copy_only; }
1118 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1119 virtual uint hash() const { return Node::hash() + _is_large; }
1120 virtual bool cmp(const Node& n) const {
1121 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1122 }
1123
1124 // Clear the given area of an object or array.
1125 // The start offset must always be aligned mod BytesPerInt.
1126 // The end offset must always be aligned mod BytesPerLong.
1127 // Return the new memory.
1128 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1129 Node* val,
1130 Node* raw_val,
1131 intptr_t start_offset,
1132 intptr_t end_offset,
1133 PhaseGVN* phase);
1134 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1135 Node* val,
1136 Node* raw_val,
1137 intptr_t start_offset,
1138 Node* end_offset,
1139 PhaseGVN* phase);
1140 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1141 Node* raw_val,
1142 Node* start_offset,
1143 Node* end_offset,
1144 PhaseGVN* phase);
1145 // Return allocation input memory edge if it is different instance
1146 // or itself if it is the one we are looking for.
1147 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1148 };
1149
1150 //------------------------------MemBar-----------------------------------------
1151 // There are different flavors of Memory Barriers to match the Java Memory
1152 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1153 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1154 // volatile-load. Monitor-exit and volatile-store act as Release: no
1155 // preceding ref can be moved to after them. We insert a MemBar-Release
1156 // before a FastUnlock or volatile-store. All volatiles need to be
1157 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1158 // separate it from any following volatile-load.
1159 class MemBarNode: public MultiNode {
1160 virtual uint hash() const ; // { return NO_HASH; }
1161 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1162
1163 virtual uint size_of() const { return sizeof(*this); }
1164 // Memory type this node is serializing. Usually either rawptr or bottom.
1165 const TypePtr* _adr_type;
1166
1167 // How is this membar related to a nearby memory access?
1168 enum {
1169 Standalone,
1170 TrailingLoad,
1171 TrailingStore,
1172 LeadingStore,
1173 TrailingLoadStore,
1174 LeadingLoadStore,
1175 TrailingExpandedArrayCopy
1176 } _kind;
1177
1178 #ifdef ASSERT
1179 uint _pair_idx;
1180 #endif
1181
1182 public:
1183 enum {
1184 Precedent = TypeFunc::Parms // optional edge to force precedence
1185 };
1186 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1187 virtual int Opcode() const = 0;
1188 virtual const class TypePtr *adr_type() const { return _adr_type; }
1189 virtual const Type* Value(PhaseGVN* phase) const;
1190 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1191 virtual uint match_edge(uint idx) const { return 0; }
1192 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1193 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1194 // Factory method. Builds a wide or narrow membar.
1195 // Optional 'precedent' becomes an extra edge if not null.
1196 static MemBarNode* make(Compile* C, int opcode,
1197 int alias_idx = Compile::AliasIdxBot,
1198 Node* precedent = nullptr);
1199
1200 MemBarNode* trailing_membar() const;
1201 MemBarNode* leading_membar() const;
1202
1203 void set_trailing_load() { _kind = TrailingLoad; }
1204 bool trailing_load() const { return _kind == TrailingLoad; }
1205 bool trailing_store() const { return _kind == TrailingStore; }
1206 bool leading_store() const { return _kind == LeadingStore; }
1207 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1208 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1209 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1210 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1211 bool standalone() const { return _kind == Standalone; }
1212 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1213 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
1214
1215 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
1216 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
1217
1218 void remove(PhaseIterGVN *igvn);
1219 };
1220
1221 // "Acquire" - no following ref can move before (but earlier refs can
1222 // follow, like an early Load stalled in cache). Requires multi-cpu
1223 // visibility. Inserted after a volatile load.
1224 class MemBarAcquireNode: public MemBarNode {
1225 public:
1226 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1227 : MemBarNode(C, alias_idx, precedent) {}
1228 virtual int Opcode() const;
1229 };
1230
1231 // "Acquire" - no following ref can move before (but earlier refs can
1232 // follow, like an early Load stalled in cache). Requires multi-cpu
1233 // visibility. Inserted independent of any load, as required
1234 // for intrinsic Unsafe.loadFence().
1235 class LoadFenceNode: public MemBarNode {
1236 public:
1237 LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1238 : MemBarNode(C, alias_idx, precedent) {}
1239 virtual int Opcode() const;
1240 };
1241
1242 // "Release" - no earlier ref can move after (but later refs can move
1243 // up, like a speculative pipelined cache-hitting Load). Requires
1244 // multi-cpu visibility. Inserted before a volatile store.
1245 class MemBarReleaseNode: public MemBarNode {
1246 public:
1247 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1248 : MemBarNode(C, alias_idx, precedent) {}
1249 virtual int Opcode() const;
1250 };
1251
1252 // "Release" - no earlier ref can move after (but later refs can move
1253 // up, like a speculative pipelined cache-hitting Load). Requires
1254 // multi-cpu visibility. Inserted independent of any store, as required
1255 // for intrinsic Unsafe.storeFence().
1256 class StoreFenceNode: public MemBarNode {
1257 public:
1258 StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1259 : MemBarNode(C, alias_idx, precedent) {}
1260 virtual int Opcode() const;
1261 };
1262
1263 // "Acquire" - no following ref can move before (but earlier refs can
1264 // follow, like an early Load stalled in cache). Requires multi-cpu
1265 // visibility. Inserted after a FastLock.
1266 class MemBarAcquireLockNode: public MemBarNode {
1267 public:
1268 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1269 : MemBarNode(C, alias_idx, precedent) {}
1270 virtual int Opcode() const;
1271 };
1272
1273 // "Release" - no earlier ref can move after (but later refs can move
1274 // up, like a speculative pipelined cache-hitting Load). Requires
1275 // multi-cpu visibility. Inserted before a FastUnLock.
1276 class MemBarReleaseLockNode: public MemBarNode {
1277 public:
1278 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1279 : MemBarNode(C, alias_idx, precedent) {}
1280 virtual int Opcode() const;
1281 };
1282
1283 class MemBarStoreStoreNode: public MemBarNode {
1284 public:
1285 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1286 : MemBarNode(C, alias_idx, precedent) {
1287 init_class_id(Class_MemBarStoreStore);
1288 }
1289 virtual int Opcode() const;
1290 };
1291
1292 class StoreStoreFenceNode: public MemBarNode {
1293 public:
1294 StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1295 : MemBarNode(C, alias_idx, precedent) {}
1296 virtual int Opcode() const;
1297 };
1298
1299 // Ordering between a volatile store and a following volatile load.
1300 // Requires multi-CPU visibility?
1301 class MemBarVolatileNode: public MemBarNode {
1302 public:
1303 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1304 : MemBarNode(C, alias_idx, precedent) {}
1305 virtual int Opcode() const;
1306 };
1307
1308 // Ordering within the same CPU. Used to order unsafe memory references
1309 // inside the compiler when we lack alias info. Not needed "outside" the
1310 // compiler because the CPU does all the ordering for us.
1311 class MemBarCPUOrderNode: public MemBarNode {
1312 public:
1313 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1314 : MemBarNode(C, alias_idx, precedent) {}
1315 virtual int Opcode() const;
1316 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1317 };
1318
1319 class OnSpinWaitNode: public MemBarNode {
1320 public:
1321 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1322 : MemBarNode(C, alias_idx, precedent) {}
1323 virtual int Opcode() const;
1324 };
1325
1326 // Isolation of object setup after an AllocateNode and before next safepoint.
1327 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1328 class InitializeNode: public MemBarNode {
1329 friend class AllocateNode;
1330
1331 enum {
1332 Incomplete = 0,
1333 Complete = 1,
1334 WithArraycopy = 2
1335 };
1336 int _is_complete;
1337
1338 bool _does_not_escape;
1339
1340 public:
1341 enum {
1342 Control = TypeFunc::Control,
1343 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
1344 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
1345 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
1346 };
1347
1348 InitializeNode(Compile* C, int adr_type, Node* rawoop);
1349 virtual int Opcode() const;
1350 virtual uint size_of() const { return sizeof(*this); }
1351 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1352 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
1353
1354 // Manage incoming memory edges via a MergeMem on in(Memory):
1355 Node* memory(uint alias_idx);
1356
1357 // The raw memory edge coming directly from the Allocation.
1358 // The contents of this memory are *always* all-zero-bits.
1359 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1360
1361 // Return the corresponding allocation for this initialization (or null if none).
1362 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1363 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1364 AllocateNode* allocation();
1365
1366 // Anything other than zeroing in this init?
1367 bool is_non_zero();
1368
1369 // An InitializeNode must completed before macro expansion is done.
1370 // Completion requires that the AllocateNode must be followed by
1371 // initialization of the new memory to zero, then to any initializers.
1372 bool is_complete() { return _is_complete != Incomplete; }
1373 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1374
1375 // Mark complete. (Must not yet be complete.)
1376 void set_complete(PhaseGVN* phase);
1377 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1378
1379 bool does_not_escape() { return _does_not_escape; }
1380 void set_does_not_escape() { _does_not_escape = true; }
1381
1382 #ifdef ASSERT
1383 // ensure all non-degenerate stores are ordered and non-overlapping
1384 bool stores_are_sane(PhaseValues* phase);
1385 #endif //ASSERT
1386
1387 // See if this store can be captured; return offset where it initializes.
1388 // Return 0 if the store cannot be moved (any sort of problem).
1389 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape);
1390
1391 // Capture another store; reformat it to write my internal raw memory.
1392 // Return the captured copy, else null if there is some sort of problem.
1393 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape);
1394
1395 // Find captured store which corresponds to the range [start..start+size).
1396 // Return my own memory projection (meaning the initial zero bits)
1397 // if there is no such store. Return null if there is a problem.
1398 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase);
1399
1400 // Called when the associated AllocateNode is expanded into CFG.
1401 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1402 intptr_t header_size, Node* size_in_bytes,
1403 PhaseIterGVN* phase);
1404
1405 private:
1406 void remove_extra_zeroes();
1407
1408 // Find out where a captured store should be placed (or already is placed).
1409 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1410 PhaseValues* phase);
1411
1412 static intptr_t get_store_offset(Node* st, PhaseValues* phase);
1413
1414 Node* make_raw_address(intptr_t offset, PhaseGVN* phase);
1415
1416 bool detect_init_independence(Node* value, PhaseGVN* phase);
1417
1418 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1419 PhaseGVN* phase);
1420
1421 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1422 };
1423
1424 //------------------------------MergeMem---------------------------------------
1425 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1426 class MergeMemNode: public Node {
1427 virtual uint hash() const ; // { return NO_HASH; }
1428 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1429 friend class MergeMemStream;
1430 MergeMemNode(Node* def); // clients use MergeMemNode::make
1431
1432 public:
1433 // If the input is a whole memory state, clone it with all its slices intact.
1434 // Otherwise, make a new memory state with just that base memory input.
1435 // In either case, the result is a newly created MergeMem.
1436 static MergeMemNode* make(Node* base_memory);
1437
1438 virtual int Opcode() const;
1439 virtual Node* Identity(PhaseGVN* phase);
1440 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1441 virtual uint ideal_reg() const { return NotAMachineReg; }
1442 virtual uint match_edge(uint idx) const { return 0; }
1443 virtual const RegMask &out_RegMask() const;
1444 virtual const Type *bottom_type() const { return Type::MEMORY; }
1445 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1446 // sparse accessors
1447 // Fetch the previously stored "set_memory_at", or else the base memory.
1448 // (Caller should clone it if it is a phi-nest.)
1449 Node* memory_at(uint alias_idx) const;
1450 // set the memory, regardless of its previous value
1451 void set_memory_at(uint alias_idx, Node* n);
1452 // the "base" is the memory that provides the non-finite support
1453 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1454 // warning: setting the base can implicitly set any of the other slices too
1455 void set_base_memory(Node* def);
1456 // sentinel value which denotes a copy of the base memory:
1457 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1458 static Node* make_empty_memory(); // where the sentinel comes from
1459 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1460 // hook for the iterator, to perform any necessary setup
1461 void iteration_setup(const MergeMemNode* other = nullptr);
1462 // push sentinels until I am at least as long as the other (semantic no-op)
1463 void grow_to_match(const MergeMemNode* other);
1464 bool verify_sparse() const PRODUCT_RETURN0;
1465 #ifndef PRODUCT
1466 virtual void dump_spec(outputStream *st) const;
1467 #endif
1468 };
1469
1470 class MergeMemStream : public StackObj {
1471 private:
1472 MergeMemNode* _mm;
1473 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1474 Node* _mm_base; // loop-invariant base memory of _mm
1475 int _idx;
1476 int _cnt;
1477 Node* _mem;
1478 Node* _mem2;
1479 int _cnt2;
1480
1481 void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) {
1482 // subsume_node will break sparseness at times, whenever a memory slice
1483 // folds down to a copy of the base ("fat") memory. In such a case,
1484 // the raw edge will update to base, although it should be top.
1485 // This iterator will recognize either top or base_memory as an
1486 // "empty" slice. See is_empty, is_empty2, and next below.
1487 //
1488 // The sparseness property is repaired in MergeMemNode::Ideal.
1489 // As long as access to a MergeMem goes through this iterator
1490 // or the memory_at accessor, flaws in the sparseness will
1491 // never be observed.
1492 //
1493 // Also, iteration_setup repairs sparseness.
1494 assert(mm->verify_sparse(), "please, no dups of base");
1495 assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base");
1496
1497 _mm = mm;
1498 _mm_base = mm->base_memory();
1499 _mm2 = mm2;
1500 _cnt = mm->req();
1501 _idx = Compile::AliasIdxBot-1; // start at the base memory
1502 _mem = nullptr;
1503 _mem2 = nullptr;
1504 }
1505
1506 #ifdef ASSERT
1507 Node* check_memory() const {
1508 if (at_base_memory())
1509 return _mm->base_memory();
1510 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1511 return _mm->memory_at(_idx);
1512 else
1513 return _mm_base;
1514 }
1515 Node* check_memory2() const {
1516 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1517 }
1518 #endif
1519
1520 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1521 void assert_synch() const {
1522 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1523 "no side-effects except through the stream");
1524 }
1525
1526 public:
1527
1528 // expected usages:
1529 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1530 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1531
1532 // iterate over one merge
1533 MergeMemStream(MergeMemNode* mm) {
1534 mm->iteration_setup();
1535 init(mm);
1536 DEBUG_ONLY(_cnt2 = 999);
1537 }
1538 // iterate in parallel over two merges
1539 // only iterates through non-empty elements of mm2
1540 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1541 assert(mm2, "second argument must be a MergeMem also");
1542 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1543 mm->iteration_setup(mm2);
1544 init(mm, mm2);
1545 _cnt2 = mm2->req();
1546 }
1547 #ifdef ASSERT
1548 ~MergeMemStream() {
1549 assert_synch();
1550 }
1551 #endif
1552
1553 MergeMemNode* all_memory() const {
1554 return _mm;
1555 }
1556 Node* base_memory() const {
1557 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1558 return _mm_base;
1559 }
1560 const MergeMemNode* all_memory2() const {
1561 assert(_mm2 != nullptr, "");
1562 return _mm2;
1563 }
1564 bool at_base_memory() const {
1565 return _idx == Compile::AliasIdxBot;
1566 }
1567 int alias_idx() const {
1568 assert(_mem, "must call next 1st");
1569 return _idx;
1570 }
1571
1572 const TypePtr* adr_type() const {
1573 return Compile::current()->get_adr_type(alias_idx());
1574 }
1575
1576 const TypePtr* adr_type(Compile* C) const {
1577 return C->get_adr_type(alias_idx());
1578 }
1579 bool is_empty() const {
1580 assert(_mem, "must call next 1st");
1581 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1582 return _mem->is_top();
1583 }
1584 bool is_empty2() const {
1585 assert(_mem2, "must call next 1st");
1586 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1587 return _mem2->is_top();
1588 }
1589 Node* memory() const {
1590 assert(!is_empty(), "must not be empty");
1591 assert_synch();
1592 return _mem;
1593 }
1594 // get the current memory, regardless of empty or non-empty status
1595 Node* force_memory() const {
1596 assert(!is_empty() || !at_base_memory(), "");
1597 // Use _mm_base to defend against updates to _mem->base_memory().
1598 Node *mem = _mem->is_top() ? _mm_base : _mem;
1599 assert(mem == check_memory(), "");
1600 return mem;
1601 }
1602 Node* memory2() const {
1603 assert(_mem2 == check_memory2(), "");
1604 return _mem2;
1605 }
1606 void set_memory(Node* mem) {
1607 if (at_base_memory()) {
1608 // Note that this does not change the invariant _mm_base.
1609 _mm->set_base_memory(mem);
1610 } else {
1611 _mm->set_memory_at(_idx, mem);
1612 }
1613 _mem = mem;
1614 assert_synch();
1615 }
1616
1617 // Recover from a side effect to the MergeMemNode.
1618 void set_memory() {
1619 _mem = _mm->in(_idx);
1620 }
1621
1622 bool next() { return next(false); }
1623 bool next2() { return next(true); }
1624
1625 bool next_non_empty() { return next_non_empty(false); }
1626 bool next_non_empty2() { return next_non_empty(true); }
1627 // next_non_empty2 can yield states where is_empty() is true
1628
1629 private:
1630 // find the next item, which might be empty
1631 bool next(bool have_mm2) {
1632 assert((_mm2 != nullptr) == have_mm2, "use other next");
1633 assert_synch();
1634 if (++_idx < _cnt) {
1635 // Note: This iterator allows _mm to be non-sparse.
1636 // It behaves the same whether _mem is top or base_memory.
1637 _mem = _mm->in(_idx);
1638 if (have_mm2)
1639 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1640 return true;
1641 }
1642 return false;
1643 }
1644
1645 // find the next non-empty item
1646 bool next_non_empty(bool have_mm2) {
1647 while (next(have_mm2)) {
1648 if (!is_empty()) {
1649 // make sure _mem2 is filled in sensibly
1650 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1651 return true;
1652 } else if (have_mm2 && !is_empty2()) {
1653 return true; // is_empty() == true
1654 }
1655 }
1656 return false;
1657 }
1658 };
1659
1660 // cachewb node for guaranteeing writeback of the cache line at a
1661 // given address to (non-volatile) RAM
1662 class CacheWBNode : public Node {
1663 public:
1664 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {}
1665 virtual int Opcode() const;
1666 virtual uint ideal_reg() const { return NotAMachineReg; }
1667 virtual uint match_edge(uint idx) const { return (idx == 2); }
1668 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1669 virtual const Type *bottom_type() const { return Type::MEMORY; }
1670 };
1671
1672 // cachewb pre sync node for ensuring that writebacks are serialised
1673 // relative to preceding or following stores
1674 class CacheWBPreSyncNode : public Node {
1675 public:
1676 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1677 virtual int Opcode() const;
1678 virtual uint ideal_reg() const { return NotAMachineReg; }
1679 virtual uint match_edge(uint idx) const { return false; }
1680 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1681 virtual const Type *bottom_type() const { return Type::MEMORY; }
1682 };
1683
1684 // cachewb pre sync node for ensuring that writebacks are serialised
1685 // relative to preceding or following stores
1686 class CacheWBPostSyncNode : public Node {
1687 public:
1688 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1689 virtual int Opcode() const;
1690 virtual uint ideal_reg() const { return NotAMachineReg; }
1691 virtual uint match_edge(uint idx) const { return false; }
1692 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1693 virtual const Type *bottom_type() const { return Type::MEMORY; }
1694 };
1695
1696 //------------------------------Prefetch---------------------------------------
1697
1698 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1699 class PrefetchAllocationNode : public Node {
1700 public:
1701 PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {}
1702 virtual int Opcode() const;
1703 virtual uint ideal_reg() const { return NotAMachineReg; }
1704 virtual uint match_edge(uint idx) const { return idx==2; }
1705 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1706 };
1707
1708 #endif // SHARE_OPTO_MEMNODE_HPP