< prev index next > src/hotspot/share/c1/c1_GraphBuilder.hpp
Print this page
#include "ci/ciStreams.hpp"
#include "compiler/compileLog.hpp"
class MemoryBuffer;
+ class DelayedFieldAccess : public CompilationResourceObj {
+ private:
+ Value _obj;
+ ciInstanceKlass* _holder;
+ int _offset;
+ ValueStack* _state_before;
+
+ public:
+ DelayedFieldAccess(Value obj, ciInstanceKlass* holder, int offset, ValueStack* state_before)
+ : _obj(obj), _holder(holder) , _offset(offset), _state_before(state_before) { }
+
+ Value obj() const { return _obj; }
+ ciInstanceKlass* holder() const { return _holder; }
+ int offset() const { return _offset; }
+ void inc_offset(int offset) { _offset += offset; }
+ ValueStack* state_before() const { return _state_before; }
+ };
+
class GraphBuilder {
friend class JfrResolution;
private:
// Per-scope data. These are pushed and popped as we descend into
// inlined methods. Currently in order to generate good code in the
BlockBegin* _block; // the current block
ValueStack* _state; // the current execution state
Instruction* _last; // the last instruction added
bool _skip_block; // skip processing of the rest of this block
+ // support for optimization of accesses to flat fields and flat arrays
+ DelayedFieldAccess* _pending_field_access;
+ DelayedLoadIndexed* _pending_load_indexed;
+
// accessors
ScopeData* scope_data() const { return _scope_data; }
Compilation* compilation() const { return _compilation; }
BlockList* bci2block() const { return scope_data()->bci2block(); }
ValueMap* vmap() const { assert(UseLocalValueNumbering, "should not access otherwise"); return _vmap; }
ciBytecodeStream* stream() const { return scope_data()->stream(); }
Instruction* last() const { return _last; }
Bytecodes::Code code() const { return stream()->cur_bc(); }
int bci() const { return stream()->cur_bci(); }
int next_bci() const { return stream()->next_bci(); }
+ bool has_pending_field_access() { return _pending_field_access != nullptr; }
+ DelayedFieldAccess* pending_field_access() { return _pending_field_access; }
+ void set_pending_field_access(DelayedFieldAccess* delayed) { _pending_field_access = delayed; }
+ bool has_pending_load_indexed() { return _pending_load_indexed != nullptr; }
+ DelayedLoadIndexed* pending_load_indexed() { return _pending_load_indexed; }
+ void set_pending_load_indexed(DelayedLoadIndexed* delayed) { _pending_load_indexed = delayed; }
// unified bailout support
void bailout(const char* msg) const { compilation()->bailout(msg); }
bool bailed_out() const { return compilation()->bailed_out(); }
void monitorexit(Value x, int bci);
void new_multi_array(int dimensions);
void throw_op(int bci);
Value round_fp(Value fp_value);
+ // inline types
+ void copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* encloding_field = nullptr);
+
// stack/code manipulation helpers
Instruction* append_with_bci(Instruction* instr, int bci);
Instruction* append(Instruction* instr);
Instruction* append_split(StateSplit* instr);
bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
bool profile_parameters() { return _compilation->profile_parameters(); }
bool profile_arguments() { return _compilation->profile_arguments(); }
bool profile_return() { return _compilation->profile_return(); }
+ bool profile_array_accesses(){ return _compilation->profile_array_accesses();}
Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver);
Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver);
void check_args_for_profiling(Values* obj_args, int expected);
< prev index next >