< prev index next > src/hotspot/share/opto/callnode.hpp
Print this page
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
virtual const RegMask &in_RegMask(uint) const;
- virtual Node *match( const ProjNode *proj, const Matcher *m );
+ virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
virtual uint ideal_reg() const { return 0; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
virtual void dump_compact_spec(outputStream *st) const;
#endif
// The method start node for on stack replacement code
class StartOSRNode : public StartNode {
public:
StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
virtual int Opcode() const;
- static const TypeTuple *osr_domain();
};
//------------------------------ParmNode---------------------------------------
// Incoming parameters
#endif
};
// Simple container for the outgoing projections of a call. Useful
// for serious surgery on calls.
- class CallProjections : public StackObj {
+ class CallProjections {
public:
Node* fallthrough_proj;
Node* fallthrough_catchproj;
Node* fallthrough_memproj;
Node* fallthrough_ioproj;
Node* catchall_catchproj;
Node* catchall_memproj;
Node* catchall_ioproj;
- Node* resproj;
Node* exobj;
+ uint nb_resproj;
+ Node* resproj[1]; // at least one projection
+
+ CallProjections(uint nbres) {
+ fallthrough_proj = nullptr;
+ fallthrough_catchproj = nullptr;
+ fallthrough_memproj = nullptr;
+ fallthrough_ioproj = nullptr;
+ catchall_catchproj = nullptr;
+ catchall_memproj = nullptr;
+ catchall_ioproj = nullptr;
+ exobj = nullptr;
+ nb_resproj = nbres;
+ resproj[0] = nullptr;
+ for (uint i = 1; i < nb_resproj; i++) {
+ resproj[i] = nullptr;
+ }
+ }
+
};
class CallGenerator;
//------------------------------CallNode---------------------------------------
float _cnt; // Estimate of number of times called
CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
const char* _name; // Printable name, if _method is null
CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
- : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
+ : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
_tf(tf),
_entry_point(addr),
_cnt(COUNT_UNKNOWN),
_generator(nullptr),
_name(nullptr)
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
virtual Node* Identity(PhaseGVN* phase) { return this; }
virtual bool cmp(const Node &n) const;
virtual uint size_of() const = 0;
virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
- virtual Node* match(const ProjNode* proj, const Matcher* m);
+ virtual Node* match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
virtual uint ideal_reg() const { return NotAMachineReg; }
// Are we guaranteed that this node is a safepoint? Not true for leaf calls and
// for some macro nodes whose expansion does not have a safepoint on the fast path.
virtual bool guaranteed_safepoint() { return true; }
// For macro nodes, the JVMState gets modified during expansion. If calls
// Returns true if the call may modify n
virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
// Does this node have a use of n other than in debug information?
bool has_non_debug_use(Node* n);
+ bool has_debug_use(Node* n);
// Returns the unique CheckCastPP of a call
// or result projection is there are several CheckCastPP
// or returns null if there is no one.
Node* result_cast();
// Does this node returns pointer?
bool returns_pointer() const {
- const TypeTuple* r = tf()->range();
- return (r->cnt() > TypeFunc::Parms &&
+ const TypeTuple* r = tf()->range_sig();
+ return (!tf()->returns_inline_type_as_fields() &&
+ r->cnt() > TypeFunc::Parms &&
r->field_at(TypeFunc::Parms)->isa_ptr());
}
// Collect all the interesting edges from a call for use in
// replacing the call by something else. Used by macro expansion
// and the late inlining support.
- void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
+ CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
virtual uint match_edge(uint idx) const;
bool is_call_to_arraycopystub() const;
// calls and optimized virtual calls, plus calls to wrappers for run-time
// routines); generates static stub.
class CallStaticJavaNode : public CallJavaNode {
virtual bool cmp( const Node &n ) const;
virtual uint size_of() const; // Size is bigger
+
+ bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
+
public:
CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
: CallJavaNode(tf, addr, method) {
init_class_id(Class_CallStaticJava);
if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
+ const TypeTuple *r = tf->range_sig();
+ if (InlineTypeReturnedAsFields &&
+ method != nullptr &&
+ method->is_method_handle_intrinsic() &&
+ r->cnt() > TypeFunc::Parms &&
+ r->field_at(TypeFunc::Parms)->isa_oopptr() &&
+ r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
+ // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
+ init_flags(Flag_is_macro);
+ C->add_macro_node(this);
+ }
}
CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
: CallJavaNode(tf, addr, nullptr) {
init_class_id(Class_CallStaticJava);
// This node calls a runtime stub, which often has narrow memory effects.
: CallLeafNode(tf, addr, name, adr_type)
{
init_class_id(Class_CallLeafNoFP);
}
virtual int Opcode() const;
+ virtual uint match_edge(uint idx) const;
};
//------------------------------CallLeafVectorNode-------------------------------
// CallLeafNode but calling with vector calling convention instead.
class CallLeafVectorNode : public CallLeafNode {
AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
KlassNode, // type (maybe dynamic) of the obj.
InitialTest, // slow-path test (may be constant)
ALength, // array length (or TOP if none)
ValidLengthTest,
+ InlineType, // InlineTypeNode if this is an inline type allocation
+ DefaultValue, // default value in case of non-flat inline type array
+ RawDefaultValue, // same as above but as raw machine word
ParmLimit
};
static const TypeFunc* alloc_type(const Type* t) {
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
fields[AllocSize] = TypeInt::POS;
fields[KlassNode] = TypeInstPtr::NOTNULL;
fields[InitialTest] = TypeInt::BOOL;
fields[ALength] = t; // length (can be a bad length)
fields[ValidLengthTest] = TypeInt::BOOL;
+ fields[InlineType] = Type::BOTTOM;
+ fields[DefaultValue] = TypeInstPtr::NOTNULL;
+ fields[RawDefaultValue] = TypeX_X;
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
// create result type (range)
fields = TypeTuple::fields(1);
// Result of Escape Analysis
bool _is_scalar_replaceable;
bool _is_non_escaping;
// True when MemBar for new is redundant with MemBar at initialzer exit
bool _is_allocation_MemBar_redundant;
+ bool _larval;
virtual uint size_of() const; // Size is bigger
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
- Node *size, Node *klass_node, Node *initial_test);
+ Node *size, Node *klass_node, Node *initial_test,
+ InlineTypeNode* inline_type_node = nullptr);
// Expansion modifies the JVMState, so we need to deep clone it
virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
virtual bool guaranteed_safepoint() { return false; }
// Inovke this method when MemBar at exit of initializer and post-dominate
// allocation node.
void compute_MemBar_redundancy(ciMethod* initializer);
bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
- Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
+ Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
};
//------------------------------AllocateArray---------------------------------
//
// High-level array allocation
//
class AllocateArrayNode : public AllocateNode {
public:
AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
- Node* initial_test, Node* count_val, Node* valid_length_test)
+ Node* initial_test, Node* count_val, Node* valid_length_test,
+ Node* default_value, Node* raw_default_value)
: AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
initial_test)
{
init_class_id(Class_AllocateArray);
set_req(AllocateNode::ALength, count_val);
set_req(AllocateNode::ValidLengthTest, valid_length_test);
+ init_req(AllocateNode::DefaultValue, default_value);
+ init_req(AllocateNode::RawDefaultValue, raw_default_value);
}
+ virtual uint size_of() const { return sizeof(*this); }
virtual int Opcode() const;
// Dig the length operand out of a array allocation site.
Node* Ideal_length() {
return in(AllocateNode::ALength);
// create result type (range)
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
- return TypeFunc::make(domain,range);
+ return TypeFunc::make(domain, range);
}
virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
< prev index next >