1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_OPTO_MEMNODE_HPP
27 #define SHARE_OPTO_MEMNODE_HPP
28
29 #include "opto/multnode.hpp"
30 #include "opto/node.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/type.hpp"
33
34 // Portions of code courtesy of Clifford Click
35
36 class MultiNode;
37 class PhaseCCP;
38 class PhaseTransform;
39
40 //------------------------------MemNode----------------------------------------
41 // Load or Store, possibly throwing a null pointer exception
42 class MemNode : public Node {
43 private:
44 bool _unaligned_access; // Unaligned access from unsafe
45 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
46 bool _unsafe_access; // Access of unsafe origin.
47 uint8_t _barrier_data; // Bit field with barrier information
48
49 protected:
50 #ifdef ASSERT
51 const TypePtr* _adr_type; // What kind of memory is being addressed?
52 #endif
53 virtual uint size_of() const;
54 public:
55 enum { Control, // When is it safe to do this load?
56 Memory, // Chunk of memory is being loaded from
57 Address, // Actually address, derived from base
58 ValueIn // Value to store
59 };
60 typedef enum { unordered = 0,
61 acquire, // Load has to acquire or be succeeded by MemBarAcquire.
62 release, // Store has to release or be preceded by MemBarRelease.
63 seqcst, // LoadStore has to have both acquire and release semantics.
64 unset // The memory ordering is not set (used for testing)
65 } MemOrd;
66 protected:
67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) :
68 Node(c0,c1,c2),
69 _unaligned_access(false),
70 _mismatched_access(false),
71 _unsafe_access(false),
72 _barrier_data(0) {
73 init_class_id(Class_Mem);
74 DEBUG_ONLY(_adr_type=at; adr_type();)
75 }
76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
77 Node(c0,c1,c2,c3),
78 _unaligned_access(false),
79 _mismatched_access(false),
80 _unsafe_access(false),
81 _barrier_data(0) {
82 init_class_id(Class_Mem);
83 DEBUG_ONLY(_adr_type=at; adr_type();)
84 }
85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
86 Node(c0,c1,c2,c3,c4),
87 _unaligned_access(false),
88 _mismatched_access(false),
89 _unsafe_access(false),
90 _barrier_data(0) {
91 init_class_id(Class_Mem);
92 DEBUG_ONLY(_adr_type=at; adr_type();)
93 }
94
95 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
96 ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const;
97 static bool check_if_adr_maybe_raw(Node* adr);
98
99 public:
100 // Helpers for the optimizer. Documented in memnode.cpp.
101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
102 Node* p2, AllocateNode* a2,
103 PhaseTransform* phase);
104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
105
106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
108 // The following two should probably be phase-specific functions:
109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 #ifdef ASSERT
130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
131 #endif
132
133 // Return the barrier data of n, if available, or 0 otherwise.
134 static uint8_t barrier_data(const Node* n);
135
136 // Map a load or store opcode to its corresponding store opcode.
137 // (Return -1 if unknown.)
138 virtual int store_Opcode() const { return -1; }
139
140 // What is the type of the value in memory? (T_VOID mean "unspecified".)
141 // The returned type is a property of the value that is loaded/stored and
142 // not the memory that is accessed. For mismatched memory accesses
143 // they might differ. For instance, a value of type 'short' may be stored
144 // into an array of elements of type 'long'.
145 virtual BasicType value_basic_type() const = 0;
146 virtual int memory_size() const {
147 #ifdef ASSERT
148 return type2aelembytes(value_basic_type(), true);
149 #else
150 return type2aelembytes(value_basic_type());
151 #endif
152 }
153
154 uint8_t barrier_data() { return _barrier_data; }
155 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
156
157 // Search through memory states which precede this node (load or store).
158 // Look for an exact match for the address, with no intervening
159 // aliased stores.
160 Node* find_previous_store(PhaseValues* phase);
161
162 // Can this node (load or store) accurately see a stored value in
163 // the given memory state? (The state may or may not be in(Memory).)
164 Node* can_see_stored_value(Node* st, PhaseValues* phase) const;
165
166 void set_unaligned_access() { _unaligned_access = true; }
167 bool is_unaligned_access() const { return _unaligned_access; }
168 void set_mismatched_access() { _mismatched_access = true; }
169 bool is_mismatched_access() const { return _mismatched_access; }
170 void set_unsafe_access() { _unsafe_access = true; }
171 bool is_unsafe_access() const { return _unsafe_access; }
172
173 #ifndef PRODUCT
174 static void dump_adr_type(const TypePtr* adr_type, outputStream* st);
175 virtual void dump_spec(outputStream *st) const;
176 #endif
177 };
178
179 //------------------------------LoadNode---------------------------------------
180 // Load value; requires Memory and Address
181 class LoadNode : public MemNode {
182 public:
183 // Some loads (from unsafe) should be pinned: they don't depend only
184 // on the dominating test. The field _control_dependency below records
185 // whether that node depends only on the dominating test.
186 // Pinned and UnknownControl are similar, but differ in that Pinned
187 // loads are not allowed to float across safepoints, whereas UnknownControl
188 // loads are allowed to do that. Therefore, Pinned is stricter.
189 enum ControlDependency {
190 Pinned,
191 UnknownControl,
192 DependsOnlyOnTest
193 };
194
195 private:
196 // LoadNode::hash() doesn't take the _control_dependency field
197 // into account: If the graph already has a non-pinned LoadNode and
198 // we add a pinned LoadNode with the same inputs, it's safe for GVN
199 // to replace the pinned LoadNode with the non-pinned LoadNode,
200 // otherwise it wouldn't be safe to have a non pinned LoadNode with
201 // those inputs in the first place. If the graph already has a
202 // pinned LoadNode and we add a non pinned LoadNode with the same
203 // inputs, it's safe (but suboptimal) for GVN to replace the
204 // non-pinned LoadNode by the pinned LoadNode.
205 ControlDependency _control_dependency;
206
207 // On platforms with weak memory ordering (e.g., PPC) we distinguish
208 // loads that can be reordered, and such requiring acquire semantics to
209 // adhere to the Java specification. The required behaviour is stored in
210 // this field.
211 const MemOrd _mo;
212
213 AllocateNode* is_new_object_mark_load() const;
214
215 protected:
216 virtual bool cmp(const Node &n) const;
217 virtual uint size_of() const; // Size is bigger
218 // Should LoadNode::Ideal() attempt to remove control edges?
219 virtual bool can_remove_control() const;
220 const Type* const _type; // What kind of value is loaded?
221
222 virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
223 public:
224
225 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
226 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
227 init_class_id(Class_Load);
228 }
229 inline bool is_unordered() const { return !is_acquire(); }
230 inline bool is_acquire() const {
231 assert(_mo == unordered || _mo == acquire, "unexpected");
232 return _mo == acquire;
233 }
234 inline bool is_unsigned() const {
235 int lop = Opcode();
236 return (lop == Op_LoadUB) || (lop == Op_LoadUS);
237 }
238
239 // Polymorphic factory method:
240 static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
241 const TypePtr* at, const Type* rt, BasicType bt,
242 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
243 bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
244 uint8_t barrier_data = 0);
245
246 virtual uint hash() const; // Check the type
247
248 // Handle algebraic identities here. If we have an identity, return the Node
249 // we are equivalent to. We look for Load of a Store.
250 virtual Node* Identity(PhaseGVN* phase);
251
252 // If the load is from Field memory and the pointer is non-null, it might be possible to
253 // zero out the control input.
254 // If the offset is constant and the base is an object allocation,
255 // try to hook me up to the exact initializing store.
256 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
257
258 // Return true if it's possible to split the Load through a Phi merging the bases
259 bool can_split_through_phi_base(PhaseGVN *phase);
260
261 // Split instance field load through Phi.
262 Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
263
264 // Recover original value from boxed values
265 Node *eliminate_autobox(PhaseIterGVN *igvn);
266
267 // Compute a new Type for this node. Basically we just do the pre-check,
268 // then call the virtual add() to set the type.
269 virtual const Type* Value(PhaseGVN* phase) const;
270
271 // Common methods for LoadKlass and LoadNKlass nodes.
272 const Type* klass_value_common(PhaseGVN* phase) const;
273 Node* klass_identity_common(PhaseGVN* phase);
274
275 virtual uint ideal_reg() const;
276 virtual const Type *bottom_type() const;
277 // Following method is copied from TypeNode:
278 void set_type(const Type* t) {
279 assert(t != nullptr, "sanity");
280 DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
281 *(const Type**)&_type = t; // cast away const-ness
282 // If this node is in the hash table, make sure it doesn't need a rehash.
283 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
284 }
285 const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
286
287 // Do not match memory edge
288 virtual uint match_edge(uint idx) const;
289
290 // Map a load opcode to its corresponding store opcode.
291 virtual int store_Opcode() const = 0;
292
293 // Check if the load's memory input is a Phi node with the same control.
294 bool is_instance_field_load_with_local_phi(Node* ctrl);
295
296 Node* convert_to_unsigned_load(PhaseGVN& gvn);
297 Node* convert_to_signed_load(PhaseGVN& gvn);
298
299 bool has_reinterpret_variant(const Type* rt);
300 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt);
301
302 ControlDependency control_dependency() const { return _control_dependency; }
303 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
304 bool has_pinned_control_dependency() const { return _control_dependency == Pinned; }
305
306 LoadNode* pin_array_access_node() const;
307
308 #ifndef PRODUCT
309 virtual void dump_spec(outputStream *st) const;
310 #endif
311 #ifdef ASSERT
312 // Helper function to allow a raw load without control edge for some cases
313 static bool is_immutable_value(Node* adr);
314 #endif
315 protected:
316 const Type* load_array_final_field(const TypeKlassPtr *tkls,
317 ciKlass* klass) const;
318
319 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
320
321 // depends_only_on_test is almost always true, and needs to be almost always
322 // true to enable key hoisting & commoning optimizations. However, for the
323 // special case of RawPtr loads from TLS top & end, and other loads performed by
324 // GC barriers, the control edge carries the dependence preventing hoisting past
325 // a Safepoint instead of the memory edge. (An unfortunate consequence of having
326 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
327 // which produce results (new raw memory state) inside of loops preventing all
328 // manner of other optimizations). Basically, it's ugly but so is the alternative.
329 // See comment in macro.cpp, around line 125 expand_allocate_common().
330 virtual bool depends_only_on_test() const {
331 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
332 }
333
334 LoadNode* clone_pinned() const;
335 };
336
337 //------------------------------LoadBNode--------------------------------------
338 // Load a byte (8bits signed) from memory
339 class LoadBNode : public LoadNode {
340 public:
341 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
342 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
343 virtual int Opcode() const;
344 virtual uint ideal_reg() const { return Op_RegI; }
345 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
346 virtual const Type* Value(PhaseGVN* phase) const;
347 virtual int store_Opcode() const { return Op_StoreB; }
348 virtual BasicType value_basic_type() const { return T_BYTE; }
349 };
350
351 //------------------------------LoadUBNode-------------------------------------
352 // Load a unsigned byte (8bits unsigned) from memory
353 class LoadUBNode : public LoadNode {
354 public:
355 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
356 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
357 virtual int Opcode() const;
358 virtual uint ideal_reg() const { return Op_RegI; }
359 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
360 virtual const Type* Value(PhaseGVN* phase) const;
361 virtual int store_Opcode() const { return Op_StoreB; }
362 virtual BasicType value_basic_type() const { return T_BYTE; }
363 };
364
365 //------------------------------LoadUSNode-------------------------------------
366 // Load an unsigned short/char (16bits unsigned) from memory
367 class LoadUSNode : public LoadNode {
368 public:
369 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
370 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
371 virtual int Opcode() const;
372 virtual uint ideal_reg() const { return Op_RegI; }
373 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
374 virtual const Type* Value(PhaseGVN* phase) const;
375 virtual int store_Opcode() const { return Op_StoreC; }
376 virtual BasicType value_basic_type() const { return T_CHAR; }
377 };
378
379 //------------------------------LoadSNode--------------------------------------
380 // Load a short (16bits signed) from memory
381 class LoadSNode : public LoadNode {
382 public:
383 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
384 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
385 virtual int Opcode() const;
386 virtual uint ideal_reg() const { return Op_RegI; }
387 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
388 virtual const Type* Value(PhaseGVN* phase) const;
389 virtual int store_Opcode() const { return Op_StoreC; }
390 virtual BasicType value_basic_type() const { return T_SHORT; }
391 };
392
393 //------------------------------LoadINode--------------------------------------
394 // Load an integer from memory
395 class LoadINode : public LoadNode {
396 public:
397 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
398 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
399 virtual int Opcode() const;
400 virtual uint ideal_reg() const { return Op_RegI; }
401 virtual int store_Opcode() const { return Op_StoreI; }
402 virtual BasicType value_basic_type() const { return T_INT; }
403 };
404
405 //------------------------------LoadRangeNode----------------------------------
406 // Load an array length from the array
407 class LoadRangeNode : public LoadINode {
408 public:
409 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
410 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
411 virtual int Opcode() const;
412 virtual const Type* Value(PhaseGVN* phase) const;
413 virtual Node* Identity(PhaseGVN* phase);
414 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
415 };
416
417 //------------------------------LoadLNode--------------------------------------
418 // Load a long from memory
419 class LoadLNode : public LoadNode {
420 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
421 virtual bool cmp( const Node &n ) const {
422 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
423 && LoadNode::cmp(n);
424 }
425 virtual uint size_of() const { return sizeof(*this); }
426 const bool _require_atomic_access; // is piecewise load forbidden?
427
428 public:
429 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
430 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
431 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
432 virtual int Opcode() const;
433 virtual uint ideal_reg() const { return Op_RegL; }
434 virtual int store_Opcode() const { return Op_StoreL; }
435 virtual BasicType value_basic_type() const { return T_LONG; }
436 bool require_atomic_access() const { return _require_atomic_access; }
437
438 #ifndef PRODUCT
439 virtual void dump_spec(outputStream *st) const {
440 LoadNode::dump_spec(st);
441 if (_require_atomic_access) st->print(" Atomic!");
442 }
443 #endif
444 };
445
446 //------------------------------LoadL_unalignedNode----------------------------
447 // Load a long from unaligned memory
448 class LoadL_unalignedNode : public LoadLNode {
449 public:
450 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
451 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
452 virtual int Opcode() const;
453 };
454
455 //------------------------------LoadFNode--------------------------------------
456 // Load a float (64 bits) from memory
457 class LoadFNode : public LoadNode {
458 public:
459 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
460 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
461 virtual int Opcode() const;
462 virtual uint ideal_reg() const { return Op_RegF; }
463 virtual int store_Opcode() const { return Op_StoreF; }
464 virtual BasicType value_basic_type() const { return T_FLOAT; }
465 };
466
467 //------------------------------LoadDNode--------------------------------------
468 // Load a double (64 bits) from memory
469 class LoadDNode : public LoadNode {
470 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
471 virtual bool cmp( const Node &n ) const {
472 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
473 && LoadNode::cmp(n);
474 }
475 virtual uint size_of() const { return sizeof(*this); }
476 const bool _require_atomic_access; // is piecewise load forbidden?
477
478 public:
479 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
480 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
481 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
482 virtual int Opcode() const;
483 virtual uint ideal_reg() const { return Op_RegD; }
484 virtual int store_Opcode() const { return Op_StoreD; }
485 virtual BasicType value_basic_type() const { return T_DOUBLE; }
486 bool require_atomic_access() const { return _require_atomic_access; }
487
488 #ifndef PRODUCT
489 virtual void dump_spec(outputStream *st) const {
490 LoadNode::dump_spec(st);
491 if (_require_atomic_access) st->print(" Atomic!");
492 }
493 #endif
494 };
495
496 //------------------------------LoadD_unalignedNode----------------------------
497 // Load a double from unaligned memory
498 class LoadD_unalignedNode : public LoadDNode {
499 public:
500 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
501 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
502 virtual int Opcode() const;
503 };
504
505 //------------------------------LoadPNode--------------------------------------
506 // Load a pointer from memory (either object or array)
507 class LoadPNode : public LoadNode {
508 public:
509 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
510 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
511 virtual int Opcode() const;
512 virtual uint ideal_reg() const { return Op_RegP; }
513 virtual int store_Opcode() const { return Op_StoreP; }
514 virtual BasicType value_basic_type() const { return T_ADDRESS; }
515 };
516
517
518 //------------------------------LoadNNode--------------------------------------
519 // Load a narrow oop from memory (either object or array)
520 class LoadNNode : public LoadNode {
521 public:
522 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
523 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
524 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
525 virtual int Opcode() const;
526 virtual uint ideal_reg() const { return Op_RegN; }
527 virtual int store_Opcode() const { return Op_StoreN; }
528 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
529 };
530
531 //------------------------------LoadKlassNode----------------------------------
532 // Load a Klass from an object
533 class LoadKlassNode : public LoadPNode {
534 private:
535 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
536 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
537
538 public:
539 virtual int Opcode() const;
540 virtual const Type* Value(PhaseGVN* phase) const;
541 virtual Node* Identity(PhaseGVN* phase);
542 virtual bool depends_only_on_test() const { return true; }
543
544 // Polymorphic factory method:
545 static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
546 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
547 };
548
549 //------------------------------LoadNKlassNode---------------------------------
550 // Load a narrow Klass from an object.
551 // With compact headers, the input address (adr) does not point at the exact
552 // header position where the (narrow) class pointer is located, but into the
553 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
554 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
555 // extract the actual class pointer. C2's type system is agnostic on whether the
556 // input address directly points into the class pointer.
557 class LoadNKlassNode : public LoadNNode {
558 private:
559 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
560 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
561 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
562
563 public:
564 virtual int Opcode() const;
565 virtual uint ideal_reg() const { return Op_RegN; }
566 virtual int store_Opcode() const { return Op_StoreNKlass; }
567 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
568
569 virtual const Type* Value(PhaseGVN* phase) const;
570 virtual Node* Identity(PhaseGVN* phase);
571 virtual bool depends_only_on_test() const { return true; }
572 };
573
574 //------------------------------StoreNode--------------------------------------
575 // Store value; requires Store, Address and Value
576 class StoreNode : public MemNode {
577 private:
578 // On platforms with weak memory ordering (e.g., PPC) we distinguish
579 // stores that can be reordered, and such requiring release semantics to
580 // adhere to the Java specification. The required behaviour is stored in
581 // this field.
582 const MemOrd _mo;
583 // Needed for proper cloning.
584 virtual uint size_of() const { return sizeof(*this); }
585 protected:
586 virtual bool cmp( const Node &n ) const;
587 virtual bool depends_only_on_test() const { return false; }
588
589 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
590 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
591
592 public:
593 // We must ensure that stores of object references will be visible
594 // only after the object's initialization. So the callers of this
595 // procedure must indicate that the store requires `release'
596 // semantics, if the stored value is an object reference that might
597 // point to a new object and may become externally visible.
598 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
599 : MemNode(c, mem, adr, at, val), _mo(mo) {
600 init_class_id(Class_Store);
601 }
602 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
603 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
604 init_class_id(Class_Store);
605 }
606
607 inline bool is_unordered() const { return !is_release(); }
608 inline bool is_release() const {
609 assert((_mo == unordered || _mo == release), "unexpected");
610 return _mo == release;
611 }
612
613 // Conservatively release stores of object references in order to
614 // ensure visibility of object initialization.
615 static inline MemOrd release_if_reference(const BasicType t) {
616 #ifdef AARCH64
617 // AArch64 doesn't need a release store here because object
618 // initialization contains the necessary barriers.
619 return unordered;
620 #else
621 const MemOrd mo = (t == T_ARRAY ||
622 t == T_ADDRESS || // Might be the address of an object reference (`boxing').
623 t == T_OBJECT) ? release : unordered;
624 return mo;
625 #endif
626 }
627
628 // Polymorphic factory method
629 //
630 // We must ensure that stores of object references will be visible
631 // only after the object's initialization. So the callers of this
632 // procedure must indicate that the store requires `release'
633 // semantics, if the stored value is an object reference that might
634 // point to a new object and may become externally visible.
635 static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
636 const TypePtr* at, Node* val, BasicType bt,
637 MemOrd mo, bool require_atomic_access = false);
638
639 virtual uint hash() const; // Check the type
640
641 // If the store is to Field memory and the pointer is non-null, we can
642 // zero out the control input.
643 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
644
645 // Compute a new Type for this node. Basically we just do the pre-check,
646 // then call the virtual add() to set the type.
647 virtual const Type* Value(PhaseGVN* phase) const;
648
649 // Check for identity function on memory (Load then Store at same address)
650 virtual Node* Identity(PhaseGVN* phase);
651
652 // Do not match memory edge
653 virtual uint match_edge(uint idx) const;
654
655 virtual const Type *bottom_type() const; // returns Type::MEMORY
656
657 // Map a store opcode to its corresponding own opcode, trivially.
658 virtual int store_Opcode() const { return Opcode(); }
659
660 // have all possible loads of the value stored been optimized away?
661 bool value_never_loaded(PhaseValues* phase) const;
662
663 bool has_reinterpret_variant(const Type* vt);
664 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt);
665
666 MemBarNode* trailing_membar() const;
667 };
668
669 //------------------------------StoreBNode-------------------------------------
670 // Store byte to memory
671 class StoreBNode : public StoreNode {
672 public:
673 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
674 : StoreNode(c, mem, adr, at, val, mo) {}
675 virtual int Opcode() const;
676 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
677 virtual BasicType value_basic_type() const { return T_BYTE; }
678 };
679
680 //------------------------------StoreCNode-------------------------------------
681 // Store char/short to memory
682 class StoreCNode : public StoreNode {
683 public:
684 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
685 : StoreNode(c, mem, adr, at, val, mo) {}
686 virtual int Opcode() const;
687 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
688 virtual BasicType value_basic_type() const { return T_CHAR; }
689 };
690
691 //------------------------------StoreINode-------------------------------------
692 // Store int to memory
693 class StoreINode : public StoreNode {
694 public:
695 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
696 : StoreNode(c, mem, adr, at, val, mo) {}
697 virtual int Opcode() const;
698 virtual BasicType value_basic_type() const { return T_INT; }
699 };
700
701 //------------------------------StoreLNode-------------------------------------
702 // Store long to memory
703 class StoreLNode : public StoreNode {
704 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
705 virtual bool cmp( const Node &n ) const {
706 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
707 && StoreNode::cmp(n);
708 }
709 virtual uint size_of() const { return sizeof(*this); }
710 const bool _require_atomic_access; // is piecewise store forbidden?
711
712 public:
713 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
714 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
715 virtual int Opcode() const;
716 virtual BasicType value_basic_type() const { return T_LONG; }
717 bool require_atomic_access() const { return _require_atomic_access; }
718
719 #ifndef PRODUCT
720 virtual void dump_spec(outputStream *st) const {
721 StoreNode::dump_spec(st);
722 if (_require_atomic_access) st->print(" Atomic!");
723 }
724 #endif
725 };
726
727 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
728 class StoreLSpecialNode : public StoreNode {
729
730 public:
731 StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
732 : StoreNode(c, mem, adr, at, val, mo) {
733 set_mismatched_access();
734 if (oop_off != nullptr) {
735 add_req(oop_off);
736 }
737 }
738 virtual int Opcode() const;
739 virtual BasicType value_basic_type() const { return T_LONG; }
740
741 virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
742 idx == MemNode::ValueIn ||
743 idx == MemNode::ValueIn + 1; }
744 };
745
746 //------------------------------StoreFNode-------------------------------------
747 // Store float to memory
748 class StoreFNode : public StoreNode {
749 public:
750 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
751 : StoreNode(c, mem, adr, at, val, mo) {}
752 virtual int Opcode() const;
753 virtual BasicType value_basic_type() const { return T_FLOAT; }
754 };
755
756 //------------------------------StoreDNode-------------------------------------
757 // Store double to memory
758 class StoreDNode : public StoreNode {
759 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
760 virtual bool cmp( const Node &n ) const {
761 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
762 && StoreNode::cmp(n);
763 }
764 virtual uint size_of() const { return sizeof(*this); }
765 const bool _require_atomic_access; // is piecewise store forbidden?
766 public:
767 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
768 MemOrd mo, bool require_atomic_access = false)
769 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
770 virtual int Opcode() const;
771 virtual BasicType value_basic_type() const { return T_DOUBLE; }
772 bool require_atomic_access() const { return _require_atomic_access; }
773
774 #ifndef PRODUCT
775 virtual void dump_spec(outputStream *st) const {
776 StoreNode::dump_spec(st);
777 if (_require_atomic_access) st->print(" Atomic!");
778 }
779 #endif
780
781 };
782
783 //------------------------------StorePNode-------------------------------------
784 // Store pointer to memory
785 class StorePNode : public StoreNode {
786 public:
787 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
788 : StoreNode(c, mem, adr, at, val, mo) {}
789 virtual int Opcode() const;
790 virtual BasicType value_basic_type() const { return T_ADDRESS; }
791 };
792
793 //------------------------------StoreNNode-------------------------------------
794 // Store narrow oop to memory
795 class StoreNNode : public StoreNode {
796 public:
797 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
798 : StoreNode(c, mem, adr, at, val, mo) {}
799 virtual int Opcode() const;
800 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
801 };
802
803 //------------------------------StoreNKlassNode--------------------------------------
804 // Store narrow klass to memory
805 class StoreNKlassNode : public StoreNNode {
806 public:
807 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
808 : StoreNNode(c, mem, adr, at, val, mo) {}
809 virtual int Opcode() const;
810 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
811 };
812
813 //------------------------------SCMemProjNode---------------------------------------
814 // This class defines a projection of the memory state of a store conditional node.
815 // These nodes return a value, but also update memory.
816 class SCMemProjNode : public ProjNode {
817 public:
818 enum {SCMEMPROJCON = (uint)-2};
819 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
820 virtual int Opcode() const;
821 virtual bool is_CFG() const { return false; }
822 virtual const Type *bottom_type() const {return Type::MEMORY;}
823 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
824 virtual const Type* Value(PhaseGVN* phase) const;
825 #ifndef PRODUCT
826 virtual void dump_spec(outputStream *st) const {};
827 #endif
828 };
829
830 //------------------------------LoadStoreNode---------------------------
831 // Note: is_Mem() method returns 'true' for this class.
832 class LoadStoreNode : public Node {
833 private:
834 const Type* const _type; // What kind of value is loaded?
835 uint8_t _barrier_data; // Bit field with barrier information
836 virtual uint size_of() const; // Size is bigger
837 #ifdef ASSERT
838 const TypePtr* _adr_type; // What kind of memory is being addressed?
839 #endif // ASSERT
840 public:
841 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
842 virtual bool depends_only_on_test() const { return false; }
843 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
844
845 virtual const Type *bottom_type() const { return _type; }
846 virtual uint ideal_reg() const;
847 virtual const TypePtr* adr_type() const;
848 virtual const Type* Value(PhaseGVN* phase) const;
849
850 bool result_not_used() const;
851 MemBarNode* trailing_membar() const;
852
853 uint8_t barrier_data() { return _barrier_data; }
854 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
855 };
856
857 class LoadStoreConditionalNode : public LoadStoreNode {
858 public:
859 enum {
860 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
861 };
862 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
863 virtual const Type* Value(PhaseGVN* phase) const;
864 };
865
866 class CompareAndSwapNode : public LoadStoreConditionalNode {
867 private:
868 const MemNode::MemOrd _mem_ord;
869 public:
870 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
871 MemNode::MemOrd order() const {
872 return _mem_ord;
873 }
874 virtual uint size_of() const { return sizeof(*this); }
875 };
876
877 class CompareAndExchangeNode : public LoadStoreNode {
878 private:
879 const MemNode::MemOrd _mem_ord;
880 public:
881 enum {
882 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
883 };
884 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
885 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
886 init_req(ExpectedIn, ex );
887 }
888
889 MemNode::MemOrd order() const {
890 return _mem_ord;
891 }
892 virtual uint size_of() const { return sizeof(*this); }
893 };
894
895 //------------------------------CompareAndSwapBNode---------------------------
896 class CompareAndSwapBNode : public CompareAndSwapNode {
897 public:
898 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
899 virtual int Opcode() const;
900 };
901
902 //------------------------------CompareAndSwapSNode---------------------------
903 class CompareAndSwapSNode : public CompareAndSwapNode {
904 public:
905 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
906 virtual int Opcode() const;
907 };
908
909 //------------------------------CompareAndSwapINode---------------------------
910 class CompareAndSwapINode : public CompareAndSwapNode {
911 public:
912 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
913 virtual int Opcode() const;
914 };
915
916 //------------------------------CompareAndSwapLNode---------------------------
917 class CompareAndSwapLNode : public CompareAndSwapNode {
918 public:
919 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
920 virtual int Opcode() const;
921 };
922
923 //------------------------------CompareAndSwapPNode---------------------------
924 class CompareAndSwapPNode : public CompareAndSwapNode {
925 public:
926 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
927 virtual int Opcode() const;
928 };
929
930 //------------------------------CompareAndSwapNNode---------------------------
931 class CompareAndSwapNNode : public CompareAndSwapNode {
932 public:
933 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
934 virtual int Opcode() const;
935 };
936
937 //------------------------------WeakCompareAndSwapBNode---------------------------
938 class WeakCompareAndSwapBNode : public CompareAndSwapNode {
939 public:
940 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
941 virtual int Opcode() const;
942 };
943
944 //------------------------------WeakCompareAndSwapSNode---------------------------
945 class WeakCompareAndSwapSNode : public CompareAndSwapNode {
946 public:
947 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
948 virtual int Opcode() const;
949 };
950
951 //------------------------------WeakCompareAndSwapINode---------------------------
952 class WeakCompareAndSwapINode : public CompareAndSwapNode {
953 public:
954 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
955 virtual int Opcode() const;
956 };
957
958 //------------------------------WeakCompareAndSwapLNode---------------------------
959 class WeakCompareAndSwapLNode : public CompareAndSwapNode {
960 public:
961 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
962 virtual int Opcode() const;
963 };
964
965 //------------------------------WeakCompareAndSwapPNode---------------------------
966 class WeakCompareAndSwapPNode : public CompareAndSwapNode {
967 public:
968 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
969 virtual int Opcode() const;
970 };
971
972 //------------------------------WeakCompareAndSwapNNode---------------------------
973 class WeakCompareAndSwapNNode : public CompareAndSwapNode {
974 public:
975 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
976 virtual int Opcode() const;
977 };
978
979 //------------------------------CompareAndExchangeBNode---------------------------
980 class CompareAndExchangeBNode : public CompareAndExchangeNode {
981 public:
982 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { }
983 virtual int Opcode() const;
984 };
985
986
987 //------------------------------CompareAndExchangeSNode---------------------------
988 class CompareAndExchangeSNode : public CompareAndExchangeNode {
989 public:
990 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { }
991 virtual int Opcode() const;
992 };
993
994 //------------------------------CompareAndExchangeLNode---------------------------
995 class CompareAndExchangeLNode : public CompareAndExchangeNode {
996 public:
997 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
998 virtual int Opcode() const;
999 };
1000
1001
1002 //------------------------------CompareAndExchangeINode---------------------------
1003 class CompareAndExchangeINode : public CompareAndExchangeNode {
1004 public:
1005 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
1006 virtual int Opcode() const;
1007 };
1008
1009
1010 //------------------------------CompareAndExchangePNode---------------------------
1011 class CompareAndExchangePNode : public CompareAndExchangeNode {
1012 public:
1013 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1014 virtual int Opcode() const;
1015 };
1016
1017 //------------------------------CompareAndExchangeNNode---------------------------
1018 class CompareAndExchangeNNode : public CompareAndExchangeNode {
1019 public:
1020 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1021 virtual int Opcode() const;
1022 };
1023
1024 //------------------------------GetAndAddBNode---------------------------
1025 class GetAndAddBNode : public LoadStoreNode {
1026 public:
1027 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1028 virtual int Opcode() const;
1029 };
1030
1031 //------------------------------GetAndAddSNode---------------------------
1032 class GetAndAddSNode : public LoadStoreNode {
1033 public:
1034 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1035 virtual int Opcode() const;
1036 };
1037
1038 //------------------------------GetAndAddINode---------------------------
1039 class GetAndAddINode : public LoadStoreNode {
1040 public:
1041 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1042 virtual int Opcode() const;
1043 };
1044
1045 //------------------------------GetAndAddLNode---------------------------
1046 class GetAndAddLNode : public LoadStoreNode {
1047 public:
1048 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1049 virtual int Opcode() const;
1050 };
1051
1052 //------------------------------GetAndSetBNode---------------------------
1053 class GetAndSetBNode : public LoadStoreNode {
1054 public:
1055 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1056 virtual int Opcode() const;
1057 };
1058
1059 //------------------------------GetAndSetSNode---------------------------
1060 class GetAndSetSNode : public LoadStoreNode {
1061 public:
1062 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1063 virtual int Opcode() const;
1064 };
1065
1066 //------------------------------GetAndSetINode---------------------------
1067 class GetAndSetINode : public LoadStoreNode {
1068 public:
1069 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1070 virtual int Opcode() const;
1071 };
1072
1073 //------------------------------GetAndSetLNode---------------------------
1074 class GetAndSetLNode : public LoadStoreNode {
1075 public:
1076 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1077 virtual int Opcode() const;
1078 };
1079
1080 //------------------------------GetAndSetPNode---------------------------
1081 class GetAndSetPNode : public LoadStoreNode {
1082 public:
1083 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1084 virtual int Opcode() const;
1085 };
1086
1087 //------------------------------GetAndSetNNode---------------------------
1088 class GetAndSetNNode : public LoadStoreNode {
1089 public:
1090 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1091 virtual int Opcode() const;
1092 };
1093
1094 //------------------------------ClearArray-------------------------------------
1095 class ClearArrayNode: public Node {
1096 private:
1097 bool _is_large;
1098 bool _word_copy_only;
1099 public:
1100 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1101 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1102 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1103 init_class_id(Class_ClearArray);
1104 }
1105 virtual int Opcode() const;
1106 virtual const Type *bottom_type() const { return Type::MEMORY; }
1107 // ClearArray modifies array elements, and so affects only the
1108 // array memory addressed by the bottom_type of its base address.
1109 virtual const class TypePtr *adr_type() const;
1110 virtual Node* Identity(PhaseGVN* phase);
1111 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1112 virtual uint match_edge(uint idx) const;
1113 bool is_large() const { return _is_large; }
1114 bool word_copy_only() const { return _word_copy_only; }
1115 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1116 virtual uint hash() const { return Node::hash() + _is_large; }
1117 virtual bool cmp(const Node& n) const {
1118 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1119 }
1120
1121 // Clear the given area of an object or array.
1122 // The start offset must always be aligned mod BytesPerInt.
1123 // The end offset must always be aligned mod BytesPerLong.
1124 // Return the new memory.
1125 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1126 Node* val,
1127 Node* raw_val,
1128 intptr_t start_offset,
1129 intptr_t end_offset,
1130 PhaseGVN* phase);
1131 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1132 Node* val,
1133 Node* raw_val,
1134 intptr_t start_offset,
1135 Node* end_offset,
1136 PhaseGVN* phase);
1137 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1138 Node* raw_val,
1139 Node* start_offset,
1140 Node* end_offset,
1141 PhaseGVN* phase);
1142 // Return allocation input memory edge if it is different instance
1143 // or itself if it is the one we are looking for.
1144 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1145 };
1146
1147 //------------------------------MemBar-----------------------------------------
1148 // There are different flavors of Memory Barriers to match the Java Memory
1149 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1150 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1151 // volatile-load. Monitor-exit and volatile-store act as Release: no
1152 // preceding ref can be moved to after them. We insert a MemBar-Release
1153 // before a FastUnlock or volatile-store. All volatiles need to be
1154 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1155 // separate it from any following volatile-load.
1156 class MemBarNode: public MultiNode {
1157 virtual uint hash() const ; // { return NO_HASH; }
1158 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1159
1160 virtual uint size_of() const { return sizeof(*this); }
1161 // Memory type this node is serializing. Usually either rawptr or bottom.
1162 const TypePtr* _adr_type;
1163
1164 // How is this membar related to a nearby memory access?
1165 enum {
1166 Standalone,
1167 TrailingLoad,
1168 TrailingStore,
1169 LeadingStore,
1170 TrailingLoadStore,
1171 LeadingLoadStore,
1172 TrailingExpandedArrayCopy
1173 } _kind;
1174
1175 #ifdef ASSERT
1176 uint _pair_idx;
1177 #endif
1178
1179 public:
1180 enum {
1181 Precedent = TypeFunc::Parms // optional edge to force precedence
1182 };
1183 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1184 virtual int Opcode() const = 0;
1185 virtual const class TypePtr *adr_type() const { return _adr_type; }
1186 virtual const Type* Value(PhaseGVN* phase) const;
1187 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1188 virtual uint match_edge(uint idx) const { return 0; }
1189 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1190 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1191 // Factory method. Builds a wide or narrow membar.
1192 // Optional 'precedent' becomes an extra edge if not null.
1193 static MemBarNode* make(Compile* C, int opcode,
1194 int alias_idx = Compile::AliasIdxBot,
1195 Node* precedent = nullptr);
1196
1197 MemBarNode* trailing_membar() const;
1198 MemBarNode* leading_membar() const;
1199
1200 void set_trailing_load() { _kind = TrailingLoad; }
1201 bool trailing_load() const { return _kind == TrailingLoad; }
1202 bool trailing_store() const { return _kind == TrailingStore; }
1203 bool leading_store() const { return _kind == LeadingStore; }
1204 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1205 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1206 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1207 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1208 bool standalone() const { return _kind == Standalone; }
1209 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1210 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
1211
1212 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
1213 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
1214
1215 void remove(PhaseIterGVN *igvn);
1216 };
1217
1218 // "Acquire" - no following ref can move before (but earlier refs can
1219 // follow, like an early Load stalled in cache). Requires multi-cpu
1220 // visibility. Inserted after a volatile load.
1221 class MemBarAcquireNode: public MemBarNode {
1222 public:
1223 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1224 : MemBarNode(C, alias_idx, precedent) {}
1225 virtual int Opcode() const;
1226 };
1227
1228 // "Acquire" - no following ref can move before (but earlier refs can
1229 // follow, like an early Load stalled in cache). Requires multi-cpu
1230 // visibility. Inserted independent of any load, as required
1231 // for intrinsic Unsafe.loadFence().
1232 class LoadFenceNode: public MemBarNode {
1233 public:
1234 LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1235 : MemBarNode(C, alias_idx, precedent) {}
1236 virtual int Opcode() const;
1237 };
1238
1239 // "Release" - no earlier ref can move after (but later refs can move
1240 // up, like a speculative pipelined cache-hitting Load). Requires
1241 // multi-cpu visibility. Inserted before a volatile store.
1242 class MemBarReleaseNode: public MemBarNode {
1243 public:
1244 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1245 : MemBarNode(C, alias_idx, precedent) {}
1246 virtual int Opcode() const;
1247 };
1248
1249 // "Release" - no earlier ref can move after (but later refs can move
1250 // up, like a speculative pipelined cache-hitting Load). Requires
1251 // multi-cpu visibility. Inserted independent of any store, as required
1252 // for intrinsic Unsafe.storeFence().
1253 class StoreFenceNode: public MemBarNode {
1254 public:
1255 StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1256 : MemBarNode(C, alias_idx, precedent) {}
1257 virtual int Opcode() const;
1258 };
1259
1260 // "Acquire" - no following ref can move before (but earlier refs can
1261 // follow, like an early Load stalled in cache). Requires multi-cpu
1262 // visibility. Inserted after a FastLock.
1263 class MemBarAcquireLockNode: public MemBarNode {
1264 public:
1265 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1266 : MemBarNode(C, alias_idx, precedent) {}
1267 virtual int Opcode() const;
1268 };
1269
1270 // "Release" - no earlier ref can move after (but later refs can move
1271 // up, like a speculative pipelined cache-hitting Load). Requires
1272 // multi-cpu visibility. Inserted before a FastUnLock.
1273 class MemBarReleaseLockNode: public MemBarNode {
1274 public:
1275 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1276 : MemBarNode(C, alias_idx, precedent) {}
1277 virtual int Opcode() const;
1278 };
1279
1280 class MemBarStoreStoreNode: public MemBarNode {
1281 public:
1282 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1283 : MemBarNode(C, alias_idx, precedent) {
1284 init_class_id(Class_MemBarStoreStore);
1285 }
1286 virtual int Opcode() const;
1287 };
1288
1289 class StoreStoreFenceNode: public MemBarNode {
1290 public:
1291 StoreStoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1292 : MemBarNode(C, alias_idx, precedent) {}
1293 virtual int Opcode() const;
1294 };
1295
1296 // Ordering between a volatile store and a following volatile load.
1297 // Requires multi-CPU visibility?
1298 class MemBarVolatileNode: public MemBarNode {
1299 public:
1300 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1301 : MemBarNode(C, alias_idx, precedent) {}
1302 virtual int Opcode() const;
1303 };
1304
1305 // Ordering within the same CPU. Used to order unsafe memory references
1306 // inside the compiler when we lack alias info. Not needed "outside" the
1307 // compiler because the CPU does all the ordering for us.
1308 class MemBarCPUOrderNode: public MemBarNode {
1309 public:
1310 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1311 : MemBarNode(C, alias_idx, precedent) {}
1312 virtual int Opcode() const;
1313 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1314 };
1315
1316 class OnSpinWaitNode: public MemBarNode {
1317 public:
1318 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1319 : MemBarNode(C, alias_idx, precedent) {}
1320 virtual int Opcode() const;
1321 };
1322
1323 // Isolation of object setup after an AllocateNode and before next safepoint.
1324 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1325 class InitializeNode: public MemBarNode {
1326 friend class AllocateNode;
1327
1328 enum {
1329 Incomplete = 0,
1330 Complete = 1,
1331 WithArraycopy = 2
1332 };
1333 int _is_complete;
1334
1335 bool _does_not_escape;
1336
1337 public:
1338 enum {
1339 Control = TypeFunc::Control,
1340 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
1341 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
1342 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
1343 };
1344
1345 InitializeNode(Compile* C, int adr_type, Node* rawoop);
1346 virtual int Opcode() const;
1347 virtual uint size_of() const { return sizeof(*this); }
1348 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1349 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
1350
1351 // Manage incoming memory edges via a MergeMem on in(Memory):
1352 Node* memory(uint alias_idx);
1353
1354 // The raw memory edge coming directly from the Allocation.
1355 // The contents of this memory are *always* all-zero-bits.
1356 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1357
1358 // Return the corresponding allocation for this initialization (or null if none).
1359 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1360 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1361 AllocateNode* allocation();
1362
1363 // Anything other than zeroing in this init?
1364 bool is_non_zero();
1365
1366 // An InitializeNode must completed before macro expansion is done.
1367 // Completion requires that the AllocateNode must be followed by
1368 // initialization of the new memory to zero, then to any initializers.
1369 bool is_complete() { return _is_complete != Incomplete; }
1370 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1371
1372 // Mark complete. (Must not yet be complete.)
1373 void set_complete(PhaseGVN* phase);
1374 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1375
1376 bool does_not_escape() { return _does_not_escape; }
1377 void set_does_not_escape() { _does_not_escape = true; }
1378
1379 #ifdef ASSERT
1380 // ensure all non-degenerate stores are ordered and non-overlapping
1381 bool stores_are_sane(PhaseValues* phase);
1382 #endif //ASSERT
1383
1384 // See if this store can be captured; return offset where it initializes.
1385 // Return 0 if the store cannot be moved (any sort of problem).
1386 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape);
1387
1388 // Capture another store; reformat it to write my internal raw memory.
1389 // Return the captured copy, else null if there is some sort of problem.
1390 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape);
1391
1392 // Find captured store which corresponds to the range [start..start+size).
1393 // Return my own memory projection (meaning the initial zero bits)
1394 // if there is no such store. Return null if there is a problem.
1395 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseValues* phase);
1396
1397 // Called when the associated AllocateNode is expanded into CFG.
1398 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1399 intptr_t header_size, Node* size_in_bytes,
1400 PhaseIterGVN* phase);
1401
1402 // An Initialize node has multiple memory projections. Helper methods used when the node is removed.
1403 // For use at parse time
1404 void replace_mem_projs_by(Node* mem, Compile* C);
1405 // For use with IGVN
1406 void replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn);
1407
1408 // Does a NarrowMemProj with this adr_type and this node as input already exist?
1409 bool already_has_narrow_mem_proj_with_adr_type(const TypePtr* adr_type) const;
1410
1411 // Used during matching: find the MachProj memory projection if there's one. Expectation is that there should be at
1412 // most one.
1413 MachProjNode* mem_mach_proj() const;
1414
1415 private:
1416 void remove_extra_zeroes();
1417
1418 // Find out where a captured store should be placed (or already is placed).
1419 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1420 PhaseValues* phase);
1421
1422 static intptr_t get_store_offset(Node* st, PhaseValues* phase);
1423
1424 Node* make_raw_address(intptr_t offset, PhaseGVN* phase);
1425
1426 bool detect_init_independence(Node* value, PhaseGVN* phase);
1427
1428 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1429 PhaseGVN* phase);
1430
1431 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1432
1433 // Iterate with i over all NarrowMemProj uses calling callback
1434 template <class Callback, class Iterator> NarrowMemProjNode* apply_to_narrow_mem_projs_any_iterator(Iterator i, Callback callback) const {
1435 auto filter = [&](ProjNode* proj) {
1436 if (proj->is_NarrowMemProj() && callback(proj->as_NarrowMemProj()) == BREAK_AND_RETURN_CURRENT_PROJ) {
1437 return BREAK_AND_RETURN_CURRENT_PROJ;
1438 }
1439 return CONTINUE;
1440 };
1441 ProjNode* res = apply_to_projs_any_iterator(i, filter);
1442 if (res == nullptr) {
1443 return nullptr;
1444 }
1445 return res->as_NarrowMemProj();
1446 }
1447
1448 public:
1449
1450 // callback is allowed to add new uses that will then be iterated over
1451 template <class Callback> void for_each_narrow_mem_proj_with_new_uses(Callback callback) const {
1452 auto callback_always_continue = [&](NarrowMemProjNode* proj) {
1453 callback(proj);
1454 return MultiNode::CONTINUE;
1455 };
1456 DUIterator i = outs();
1457 apply_to_narrow_mem_projs_any_iterator(UsesIterator(i, this), callback_always_continue);
1458 }
1459 };
1460
1461 //------------------------------MergeMem---------------------------------------
1462 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1463 class MergeMemNode: public Node {
1464 virtual uint hash() const ; // { return NO_HASH; }
1465 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1466 friend class MergeMemStream;
1467 MergeMemNode(Node* def); // clients use MergeMemNode::make
1468
1469 public:
1470 // If the input is a whole memory state, clone it with all its slices intact.
1471 // Otherwise, make a new memory state with just that base memory input.
1472 // In either case, the result is a newly created MergeMem.
1473 static MergeMemNode* make(Node* base_memory);
1474
1475 virtual int Opcode() const;
1476 virtual Node* Identity(PhaseGVN* phase);
1477 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1478 virtual uint ideal_reg() const { return NotAMachineReg; }
1479 virtual uint match_edge(uint idx) const { return 0; }
1480 virtual const RegMask &out_RegMask() const;
1481 virtual const Type *bottom_type() const { return Type::MEMORY; }
1482 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1483 // sparse accessors
1484 // Fetch the previously stored "set_memory_at", or else the base memory.
1485 // (Caller should clone it if it is a phi-nest.)
1486 Node* memory_at(uint alias_idx) const;
1487 // set the memory, regardless of its previous value
1488 void set_memory_at(uint alias_idx, Node* n);
1489 // the "base" is the memory that provides the non-finite support
1490 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1491 // warning: setting the base can implicitly set any of the other slices too
1492 void set_base_memory(Node* def);
1493 // sentinel value which denotes a copy of the base memory:
1494 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1495 static Node* make_empty_memory(); // where the sentinel comes from
1496 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1497 // hook for the iterator, to perform any necessary setup
1498 void iteration_setup(const MergeMemNode* other = nullptr);
1499 // push sentinels until I am at least as long as the other (semantic no-op)
1500 void grow_to_match(const MergeMemNode* other);
1501 bool verify_sparse() const PRODUCT_RETURN0;
1502 #ifndef PRODUCT
1503 virtual void dump_spec(outputStream *st) const;
1504 #endif
1505 };
1506
1507 class MergeMemStream : public StackObj {
1508 private:
1509 MergeMemNode* _mm;
1510 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1511 Node* _mm_base; // loop-invariant base memory of _mm
1512 int _idx;
1513 int _cnt;
1514 Node* _mem;
1515 Node* _mem2;
1516 int _cnt2;
1517
1518 void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) {
1519 // subsume_node will break sparseness at times, whenever a memory slice
1520 // folds down to a copy of the base ("fat") memory. In such a case,
1521 // the raw edge will update to base, although it should be top.
1522 // This iterator will recognize either top or base_memory as an
1523 // "empty" slice. See is_empty, is_empty2, and next below.
1524 //
1525 // The sparseness property is repaired in MergeMemNode::Ideal.
1526 // As long as access to a MergeMem goes through this iterator
1527 // or the memory_at accessor, flaws in the sparseness will
1528 // never be observed.
1529 //
1530 // Also, iteration_setup repairs sparseness.
1531 assert(mm->verify_sparse(), "please, no dups of base");
1532 assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base");
1533
1534 _mm = mm;
1535 _mm_base = mm->base_memory();
1536 _mm2 = mm2;
1537 _cnt = mm->req();
1538 _idx = Compile::AliasIdxBot-1; // start at the base memory
1539 _mem = nullptr;
1540 _mem2 = nullptr;
1541 }
1542
1543 #ifdef ASSERT
1544 Node* check_memory() const {
1545 if (at_base_memory())
1546 return _mm->base_memory();
1547 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1548 return _mm->memory_at(_idx);
1549 else
1550 return _mm_base;
1551 }
1552 Node* check_memory2() const {
1553 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1554 }
1555 #endif
1556
1557 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1558 void assert_synch() const {
1559 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1560 "no side-effects except through the stream");
1561 }
1562
1563 public:
1564
1565 // expected usages:
1566 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1567 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1568
1569 // iterate over one merge
1570 MergeMemStream(MergeMemNode* mm) {
1571 mm->iteration_setup();
1572 init(mm);
1573 DEBUG_ONLY(_cnt2 = 999);
1574 }
1575 // iterate in parallel over two merges
1576 // only iterates through non-empty elements of mm2
1577 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1578 assert(mm2, "second argument must be a MergeMem also");
1579 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1580 mm->iteration_setup(mm2);
1581 init(mm, mm2);
1582 _cnt2 = mm2->req();
1583 }
1584 #ifdef ASSERT
1585 ~MergeMemStream() {
1586 assert_synch();
1587 }
1588 #endif
1589
1590 MergeMemNode* all_memory() const {
1591 return _mm;
1592 }
1593 Node* base_memory() const {
1594 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1595 return _mm_base;
1596 }
1597 const MergeMemNode* all_memory2() const {
1598 assert(_mm2 != nullptr, "");
1599 return _mm2;
1600 }
1601 bool at_base_memory() const {
1602 return _idx == Compile::AliasIdxBot;
1603 }
1604 int alias_idx() const {
1605 assert(_mem, "must call next 1st");
1606 return _idx;
1607 }
1608
1609 const TypePtr* adr_type() const {
1610 return Compile::current()->get_adr_type(alias_idx());
1611 }
1612
1613 const TypePtr* adr_type(Compile* C) const {
1614 return C->get_adr_type(alias_idx());
1615 }
1616 bool is_empty() const {
1617 assert(_mem, "must call next 1st");
1618 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1619 return _mem->is_top();
1620 }
1621 bool is_empty2() const {
1622 assert(_mem2, "must call next 1st");
1623 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1624 return _mem2->is_top();
1625 }
1626 Node* memory() const {
1627 assert(!is_empty(), "must not be empty");
1628 assert_synch();
1629 return _mem;
1630 }
1631 // get the current memory, regardless of empty or non-empty status
1632 Node* force_memory() const {
1633 assert(!is_empty() || !at_base_memory(), "");
1634 // Use _mm_base to defend against updates to _mem->base_memory().
1635 Node *mem = _mem->is_top() ? _mm_base : _mem;
1636 assert(mem == check_memory(), "");
1637 return mem;
1638 }
1639 Node* memory2() const {
1640 assert(_mem2 == check_memory2(), "");
1641 return _mem2;
1642 }
1643 void set_memory(Node* mem) {
1644 if (at_base_memory()) {
1645 // Note that this does not change the invariant _mm_base.
1646 _mm->set_base_memory(mem);
1647 } else {
1648 _mm->set_memory_at(_idx, mem);
1649 }
1650 _mem = mem;
1651 assert_synch();
1652 }
1653
1654 // Recover from a side effect to the MergeMemNode.
1655 void set_memory() {
1656 _mem = _mm->in(_idx);
1657 }
1658
1659 bool next() { return next(false); }
1660 bool next2() { return next(true); }
1661
1662 bool next_non_empty() { return next_non_empty(false); }
1663 bool next_non_empty2() { return next_non_empty(true); }
1664 // next_non_empty2 can yield states where is_empty() is true
1665
1666 private:
1667 // find the next item, which might be empty
1668 bool next(bool have_mm2) {
1669 assert((_mm2 != nullptr) == have_mm2, "use other next");
1670 assert_synch();
1671 if (++_idx < _cnt) {
1672 // Note: This iterator allows _mm to be non-sparse.
1673 // It behaves the same whether _mem is top or base_memory.
1674 _mem = _mm->in(_idx);
1675 if (have_mm2)
1676 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1677 return true;
1678 }
1679 return false;
1680 }
1681
1682 // find the next non-empty item
1683 bool next_non_empty(bool have_mm2) {
1684 while (next(have_mm2)) {
1685 if (!is_empty()) {
1686 // make sure _mem2 is filled in sensibly
1687 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1688 return true;
1689 } else if (have_mm2 && !is_empty2()) {
1690 return true; // is_empty() == true
1691 }
1692 }
1693 return false;
1694 }
1695 };
1696
1697 // cachewb node for guaranteeing writeback of the cache line at a
1698 // given address to (non-volatile) RAM
1699 class CacheWBNode : public Node {
1700 public:
1701 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {}
1702 virtual int Opcode() const;
1703 virtual uint ideal_reg() const { return NotAMachineReg; }
1704 virtual uint match_edge(uint idx) const { return (idx == 2); }
1705 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1706 virtual const Type *bottom_type() const { return Type::MEMORY; }
1707 };
1708
1709 // cachewb pre sync node for ensuring that writebacks are serialised
1710 // relative to preceding or following stores
1711 class CacheWBPreSyncNode : public Node {
1712 public:
1713 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1714 virtual int Opcode() const;
1715 virtual uint ideal_reg() const { return NotAMachineReg; }
1716 virtual uint match_edge(uint idx) const { return false; }
1717 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1718 virtual const Type *bottom_type() const { return Type::MEMORY; }
1719 };
1720
1721 // cachewb pre sync node for ensuring that writebacks are serialised
1722 // relative to preceding or following stores
1723 class CacheWBPostSyncNode : public Node {
1724 public:
1725 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
1726 virtual int Opcode() const;
1727 virtual uint ideal_reg() const { return NotAMachineReg; }
1728 virtual uint match_edge(uint idx) const { return false; }
1729 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1730 virtual const Type *bottom_type() const { return Type::MEMORY; }
1731 };
1732
1733 //------------------------------Prefetch---------------------------------------
1734
1735 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1736 class PrefetchAllocationNode : public Node {
1737 public:
1738 PrefetchAllocationNode(Node *mem, Node *adr) : Node(nullptr,mem,adr) {}
1739 virtual int Opcode() const;
1740 virtual uint ideal_reg() const { return NotAMachineReg; }
1741 virtual uint match_edge(uint idx) const { return idx==2; }
1742 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1743 };
1744
1745 #endif // SHARE_OPTO_MEMNODE_HPP