< prev index next > src/hotspot/share/opto/memnode.hpp
Print this page
// product builds and retain the debug info for debug builds.
const TypePtr *raw_adr_type() const {
return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
}
+ #ifdef ASSERT
+ void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
+ #endif
+
// Return the barrier data of n, if available, or 0 otherwise.
static uint8_t barrier_data(const Node* n);
// Map a load or store opcode to its corresponding store opcode.
// (Return -1 if unknown.)
// Compute a new Type for this node. Basically we just do the pre-check,
// then call the virtual add() to set the type.
virtual const Type* Value(PhaseGVN* phase) const;
// Common methods for LoadKlass and LoadNKlass nodes.
! const Type* klass_value_common(PhaseGVN* phase) const;
Node* klass_identity_common(PhaseGVN* phase);
virtual uint ideal_reg() const;
virtual const Type *bottom_type() const;
// Following method is copied from TypeNode:
// Compute a new Type for this node. Basically we just do the pre-check,
// then call the virtual add() to set the type.
virtual const Type* Value(PhaseGVN* phase) const;
// Common methods for LoadKlass and LoadNKlass nodes.
! const Type* klass_value_common(PhaseGVN* phase, bool fold_for_arrays) const;
Node* klass_identity_common(PhaseGVN* phase);
virtual uint ideal_reg() const;
virtual const Type *bottom_type() const;
// Following method is copied from TypeNode:
// Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode {
public:
LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreN; }
virtual BasicType memory_type() const { return T_NARROWOOP; }
};
//------------------------------LoadKlassNode----------------------------------
// Load a Klass from an object
class LoadKlassNode : public LoadPNode {
private:
! LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
! : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
public:
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
virtual bool depends_only_on_test() const { return true; }
// Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
! const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
};
//------------------------------LoadNKlassNode---------------------------------
// Load a narrow Klass from an object.
// With compact headers, the input address (adr) does not point at the exact
// Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode {
public:
LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
+ virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreN; }
virtual BasicType memory_type() const { return T_NARROWOOP; }
};
//------------------------------LoadKlassNode----------------------------------
// Load a Klass from an object
class LoadKlassNode : public LoadPNode {
+ bool _fold_for_arrays;
+
+ virtual uint size_of() const { return sizeof(*this); }
+ virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
+ virtual bool cmp( const Node &n ) const {
+ return _fold_for_arrays == ((LoadKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
+ }
+
private:
! LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo, bool fold_for_arrays)
! : LoadPNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
public:
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
virtual bool depends_only_on_test() const { return true; }
// Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
! const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT, bool fold_for_arrays = true);
};
//------------------------------LoadNKlassNode---------------------------------
// Load a narrow Klass from an object.
// With compact headers, the input address (adr) does not point at the exact
// middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
// implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
// extract the actual class pointer. C2's type system is agnostic on whether the
// input address directly points into the class pointer.
class LoadNKlassNode : public LoadNNode {
private:
! friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
! LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
! : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
public:
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreNKlass; }
// middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
// implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
// extract the actual class pointer. C2's type system is agnostic on whether the
// input address directly points into the class pointer.
class LoadNKlassNode : public LoadNNode {
+ bool _fold_for_arrays;
+
+ virtual uint size_of() const { return sizeof(*this); }
+ virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
+ virtual bool cmp( const Node &n ) const {
+ return _fold_for_arrays == ((LoadNKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
+ }
+
private:
! friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*, bool fold_for_arrays);
! LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo, bool fold_for_arrays)
! : LoadNNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
public:
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreNKlass; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
virtual bool depends_only_on_test() const { return true; }
};
-
//------------------------------StoreNode--------------------------------------
// Store value; requires Store, Address and Value
class StoreNode : public MemNode {
private:
// On platforms with weak memory ordering (e.g., PPC) we distinguish
if (_require_atomic_access) st->print(" Atomic!");
}
#endif
};
+ // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
+ class StoreLSpecialNode : public StoreNode {
+
+ public:
+ StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
+ : StoreNode(c, mem, adr, at, val, mo) {
+ set_mismatched_access();
+ if (oop_off != nullptr) {
+ add_req(oop_off);
+ }
+ }
+ virtual int Opcode() const;
+ virtual BasicType memory_type() const { return T_LONG; }
+
+ virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
+ idx == MemNode::ValueIn ||
+ idx == MemNode::ValueIn + 1; }
+ };
+
//------------------------------StoreFNode-------------------------------------
// Store float to memory
class StoreFNode : public StoreNode {
public:
StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
//------------------------------ClearArray-------------------------------------
class ClearArrayNode: public Node {
private:
bool _is_large;
public:
! ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
! : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
init_class_id(Class_ClearArray);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::MEMORY; }
// ClearArray modifies array elements, and so affects only the
//------------------------------ClearArray-------------------------------------
class ClearArrayNode: public Node {
private:
bool _is_large;
+ bool _word_copy_only;
public:
! ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
! : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
+ _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
init_class_id(Class_ClearArray);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::MEMORY; }
// ClearArray modifies array elements, and so affects only the
virtual const class TypePtr *adr_type() const;
virtual Node* Identity(PhaseGVN* phase);
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint match_edge(uint idx) const;
bool is_large() const { return _is_large; }
+ bool word_copy_only() const { return _word_copy_only; }
// Clear the given area of an object or array.
// The start offset must always be aligned mod BytesPerInt.
// The end offset must always be aligned mod BytesPerLong.
// Return the new memory.
static Node* clear_memory(Node* control, Node* mem, Node* dest,
+ Node* val,
+ Node* raw_val,
intptr_t start_offset,
intptr_t end_offset,
PhaseGVN* phase);
static Node* clear_memory(Node* control, Node* mem, Node* dest,
+ Node* val,
+ Node* raw_val,
intptr_t start_offset,
Node* end_offset,
PhaseGVN* phase);
static Node* clear_memory(Node* control, Node* mem, Node* dest,
+ Node* raw_val,
Node* start_offset,
Node* end_offset,
PhaseGVN* phase);
// Return allocation input memory edge if it is different instance
// or itself if it is the one we are looking for.
virtual const class TypePtr *adr_type() const { return _adr_type; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint match_edge(uint idx) const { return 0; }
virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
! virtual Node *match( const ProjNode *proj, const Matcher *m );
// Factory method. Builds a wide or narrow membar.
// Optional 'precedent' becomes an extra edge if not null.
static MemBarNode* make(Compile* C, int opcode,
int alias_idx = Compile::AliasIdxBot,
Node* precedent = nullptr);
virtual const class TypePtr *adr_type() const { return _adr_type; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint match_edge(uint idx) const { return 0; }
virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
! virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
// Factory method. Builds a wide or narrow membar.
// Optional 'precedent' becomes an extra edge if not null.
static MemBarNode* make(Compile* C, int opcode,
int alias_idx = Compile::AliasIdxBot,
Node* precedent = nullptr);
< prev index next >