< prev index next >

src/hotspot/share/opto/parseHelper.cpp

Print this page
*** 32,10 ***
--- 32,21 ---
  #include "opto/parse.hpp"
  #include "opto/rootnode.hpp"
  #include "opto/runtime.hpp"
  #include "runtime/sharedRuntime.hpp"
  
+ #ifndef PRODUCT
+ unsigned peaNumAllocsTracked = 0;
+ unsigned peaNumMaterializations = 0;
+ 
+ void printPeaStatistics() {
+   tty->print("PEA: ");
+   tty->print("num allocations tracked = %u, ", peaNumAllocsTracked);
+   tty->print_cr("num materializations = %u", peaNumMaterializations);
+ }
+ #endif
+ 
  //------------------------------make_dtrace_method_entry_exit ----------------
  // Dtrace -- record entry or exit of a method if compiled with dtrace support
  void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
    const TypeFunc *call_type    = OptoRuntime::dtrace_method_entry_exit_Type();
    address         call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :

*** 278,10 ***
--- 289,15 ---
  
    // Keep track of boxed values for EliminateAutoBox optimizations.
    if (C->eliminate_boxing() && klass->is_box_klass()) {
      C->set_has_boxed_value(true);
    }
+ 
+   if (DoPartialEscapeAnalysis) {
+     // obj is a CheckCastPP Node, aka. cooked oop.
+     jvms()->alloc_state().add_new_allocation(this, obj);
+   }
  }
  
  #ifndef PRODUCT
  //------------------------------dump_map_adr_mem-------------------------------
  // Debug dump of the mapping from address types to MergeMemNode indices.

*** 300,5 ***
--- 316,828 ---
      }
    }
  }
  
  #endif
+ 
+ #include "ci/ciUtilities.inline.hpp"
+ #include "compiler/methodMatcher.hpp"
+ 
+ class PEAContext {
+ private:
+   BasicMatcher* _matcher;
+ 
+   PEAContext() {
+     if (PEAMethodOnly != nullptr) {
+       const char* error_msg = nullptr;
+       _matcher = BasicMatcher::parse_method_pattern((char*)PEAMethodOnly, error_msg, false);
+       if (error_msg != nullptr) {
+         tty->print_cr("Invalid PEAMethodOnly: %s", error_msg);
+       }
+     }
+   }
+ 
+   NONCOPYABLE(PEAContext);
+ public:
+   bool match(ciMethod* method) const;
+   // mayer's singleton.
+   static PEAContext& instance() {
+     static PEAContext s;
+     return s;
+   }
+ };
+ 
+ //
+ // Partial Escape Analysis
+ // Stadler, Lukas, Thomas Würthinger, and Hanspeter Mössenböck. "Partial escape analysis and scalar replacement for Java."
+ //
+ // Our adaption to C2.
+ // https://gist.github.com/navyxliu/62a510a5c6b0245164569745d758935b
+ //
+ 
+ VirtualState::VirtualState(const TypeOopPtr* oop_type): _oop_type(oop_type), _lockcnt(0) {
+   Compile* C = Compile::current();
+   int nof = nfields();
+   _entries = NEW_ARENA_ARRAY(C->parser_arena(), Node*, nof);
+   // only track explicit stores.
+   // see IntializeNode semantics in memnode.cpp
+   for (int i = 0; i < nof; ++i) {
+     _entries[i] = nullptr;
+   }
+ }
+ 
+ // do NOT call base's copy constructor. we would like to reset refcnt!
+ VirtualState::VirtualState(const VirtualState& other) : _oop_type(other._oop_type), _lockcnt(other._lockcnt) {
+   int nof = nfields();
+   _entries = NEW_ARENA_ARRAY(Compile::current()->parser_arena(), Node*, nof);
+ 
+   // Using arraycopy stub is more efficient?
+   Node** dst = _entries;
+   Node** src = other._entries;
+   while (nof-- > 0) {
+     *dst++ = *src++;
+   }
+ }
+ 
+ int VirtualState::nfields() const {
+   ciInstanceKlass* holder = _oop_type->is_instptr()->instance_klass();
+   return holder->nof_nonstatic_fields();
+ }
+ 
+ void VirtualState::set_field(ciField* field, Node* val) {
+   // We can't trust field->holder() here. It may reference to the super class.
+   // field layouter may flip order in jdk15+, refer to:
+   // https://shipilev.net/jvm/objects-inside-out/#_superhierarchy_gaps_in_java_15
+   //
+   // _oop_type is the exact type when we registered ObjID in allocation state.
+   //
+   ciInstanceKlass* holder = _oop_type->is_instptr()->instance_klass();
+ 
+   for (int i = 0; i < holder->nof_nonstatic_fields(); ++i) {
+     if (field->offset_in_bytes() == holder->nonstatic_field_at(i)->offset_in_bytes()) {
+       _entries[i] = val;
+       return;
+     }
+   }
+ 
+   ShouldNotReachHere();
+ }
+ 
+ Node* VirtualState::get_field(ciField* field) const {
+   ciInstanceKlass* holder = _oop_type->is_instptr()->instance_klass();
+ 
+   for (int i = 0; i < holder->nof_nonstatic_fields(); ++i) {
+     if (field->offset_in_bytes() == holder->nonstatic_field_at(i)->offset_in_bytes()) {
+       return _entries[i];
+     }
+   }
+ 
+   ShouldNotReachHere();
+   return nullptr;
+ }
+ 
+ static void ensure_phi(PhiNode* phi, uint pnum) {
+     while (phi->req() <= pnum) {
+       phi->add_req(nullptr);
+     }
+ }
+ 
+ static const Type* initialize_null_field(GraphKit* kit, ciField* field, Node*& val) {
+   assert(val == nullptr, "must been a null field");
+   const Type* type;
+   BasicType bt = field->layout_type();
+ 
+   if (bt == T_OBJECT && field->type()->is_instance_klass()) {
+     val = kit->gvn().makecon(TypePtr::NULL_PTR);
+     type = TypeInstPtr::make(TypePtr::BotPTR, field->type()->as_instance_klass());
+   } else {
+     val = kit->zerocon(bt);
+     type = Type::get_const_basic_type(bt);
+   }
+ 
+   return type;
+ }
+ 
+ ObjectState& VirtualState::merge(ObjectState* newin, GraphKit* kit, RegionNode* r, int pnum) {
+   assert(newin->is_virtual(), "only support VirtualState");
+ 
+   if (this != newin) {
+     VirtualState* vs = static_cast<VirtualState*>(newin);
+     ciInstanceKlass* ik = _oop_type->is_instptr()->instance_klass();
+     assert(nfields() == ik->nof_nonstatic_fields(), "_nfields should be consistent with instanceKlass");
+ 
+     for (int i = 0; i < nfields(); ++i) {
+       Node* m = _entries[i];
+ 
+       if (m != vs->_entries[i]) {
+         ciField* field = ik->nonstatic_field_at(i);
+         Node* n = vs->_entries[i];
+         const Type* tn;
+         if (n == nullptr) {
+           tn = initialize_null_field(kit, field, n);
+         } else {
+           tn = kit->gvn().type(n);
+         }
+ 
+         if (m == nullptr || !m->is_Phi() || m->in(0) != r) {
+           const Type* type;
+ 
+           if (m == nullptr) {
+             type = initialize_null_field(kit, field, m);
+           } else {
+             type = kit->gvn().type(m);
+           }
+           type = type->meet(tn);
+ 
+           m = PhiNode::make(r, m, type);
+           kit->gvn().set_type(m, type);
+           _entries[i] = m;
+         }
+ 
+         ensure_phi(m->as_Phi(), pnum);
+         m->set_req(pnum, n);
+         if (pnum == 1) {
+           _entries[i] = kit->gvn().transform(m);
+         }
+       }
+     }
+   }
+ 
+   return *this;
+ }
+ 
+ #ifndef PRODUCT
+ void VirtualState::print_on(outputStream* os) const {
+   os->print_cr("Virt = %p", this);
+ 
+   for (int i = 0; i < nfields(); ++i) {
+     Node* val = _entries[i];
+     os->print("#%d: ", i);
+     if (val != nullptr) {
+       val->dump();
+     } else {
+       os->print_cr("_");
+     }
+   }
+ }
+ 
+ void EscapedState::print_on(outputStream* os) const {
+   os->print_cr("Escaped = %p %d", this, _materialized);
+   if (_merged_value == nullptr) {
+     os->print_cr(" null");
+   } else {
+     _merged_value->dump();
+   }
+ }
+ 
+ #endif
+ 
+ void PEAState::add_new_allocation(GraphKit* kit, Node* obj) {
+   PartialEscapeAnalysis* pea = kit->PEA();
+   int nfields;
+   const TypeOopPtr* oop_type = obj->as_Type()->type()->is_oopptr();
+ 
+   if (oop_type->isa_aryptr()) {
+     const TypeAryPtr* ary_type = oop_type->is_aryptr();
+     const TypeInt* size = ary_type->size();
+     if (size->is_con() && size->get_con() <= EliminateAllocationArraySizeLimit) {
+       nfields = size->get_con();
+     } else {
+       // length of array is too long or unknown
+       return;
+     }
+   } else {
+     const TypeInstPtr* inst_type = oop_type->is_instptr();
+     nfields = inst_type->instance_klass()->nof_nonstatic_fields();
+   }
+ 
+   if (nfields >= 0) {
+     AllocateNode* alloc = obj->in(1)->in(0)->as_Allocate();
+     int idx = pea->add_object(alloc);
+ #ifndef PRODUCT
+     // node_idx_t is unsigned. Use static_cast<> here to avoid comparison between signed and unsigned.
+     if (PEA_debug_idx > 0 && alloc->_idx != static_cast<node_idx_t>(PEA_debug_idx)) {         // only allow PEA_debug_idx
+       return;
+     } else if (PEA_debug_idx < 0 && alloc->_idx == static_cast<node_idx_t>(-PEA_debug_idx)) { // block PEA_debug_idx
+       return;
+     }
+     Atomic::inc(&peaNumAllocsTracked);
+ #endif
+     // Opt out all subclasses of Throwable because C2 will not inline all methods of them including <init>.
+     // PEA needs to materialize it at <init>.
+     ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
+     ciEnv* env = ciEnv::current();
+     if (ik->is_subclass_of(env->Throwable_klass())) {
+       return;
+     }
+     // Opt out of all subclasses that non-partial escape analysis opts out of. Opt out of StringBuffer/Builder and
+     // defer those objects to StringOpts.
+     if (ik->is_subclass_of(env->Thread_klass()) ||
+         ik->is_subclass_of(env->Reference_klass()) ||
+         ik->is_subclass_of(env->StringBuffer_klass()) ||
+         ik->is_subclass_of(env->StringBuilder_klass()) ||
+         !ik->can_be_instantiated() || ik->has_finalizer()) {
+       return;
+     }
+     if (idx < PEA_debug_start || idx >= PEA_debug_stop) {
+       return;
+     }
+ 
+     ciMethod* method = kit->jvms()->method();
+     if (PEAContext::instance().match(method)) {
+ #ifndef PRODUCT
+       if (PEAVerbose) {
+         if (method != nullptr) {
+           method->dump_name_as_ascii(tty);
+         }
+         tty->print_cr(" start tracking %d | obj#%d", idx, alloc->_idx);
+         alloc->dump();
+       }
+ #endif
+       bool result = _state.put(alloc, new VirtualState(oop_type));
+       assert(result, "the key existed in _state");
+       pea->add_alias(alloc, obj);
+     }
+   }
+ }
+ 
+ PEAState& PEAState::operator=(const PEAState& init) {
+   if (this != &init) {
+     clear();
+ 
+     init._state.iterate([&](ObjID key, ObjectState* value) {
+       _state.put(key, value->clone());
+       return true;
+     });
+   }
+ 
+ #ifdef ASSERT
+     validate();
+ #endif
+   return *this;
+ }
+ 
+ void PEAState::put_field(GraphKit* kit, ciField* field, Node* objx, Node* val) {
+   Compile* C = kit->C;
+   PartialEscapeAnalysis* pea = C->PEA();
+   int offset = field->offset_in_bytes();
+   Node* adr = kit->basic_plus_adr(objx, objx, offset);
+   const TypePtr* adr_type = C->alias_type(field)->adr_type();
+   DecoratorSet decorators = IN_HEAP;
+ 
+   BasicType bt = field->layout_type();
+   const Type* type = Type::get_const_basic_type(bt);
+   bool is_obj = is_reference_type(bt);
+ 
+   if (is_obj && pea->is_alias(val)) {
+     // recurse if val is a virtual object.
+     if (as_virtual(pea, val)) {
+       materialize(kit, val);
+     }
+     EscapedState* es = as_escaped(pea, val);
+     assert(es != nullptr, "the object of val is not Escaped");
+     val = es->merged_value();
+   }
+   // Store the value.
+   const Type* field_type;
+   if (!field->type()->is_loaded()) {
+     field_type = TypeInstPtr::BOTTOM;
+   } else {
+     if (is_obj) {
+       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
+     } else {
+       field_type = Type::BOTTOM;
+     }
+   }
+   decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED;
+ 
+ #ifndef PRODUCT
+   if (PEAVerbose) {
+     val->dump();
+   }
+ #endif
+   kit->access_store_at(objx, adr, adr_type, val, field_type, bt, decorators);
+ }
+ 
+ // Because relevant objects may form a directed cyclic graph, materialization is a DFS process.
+ // PEA clones the object and marks escaped in allocation state. PEA then iterates all fields
+ // and recursively materializes the references which are still aliasing with virtual objects in
+ // allocation state.
+ Node* PEAState::materialize(GraphKit* kit, Node* var) {
+   Compile* C = kit->C;
+   PartialEscapeAnalysis* pea = C->PEA();
+   ObjID alloc = pea->is_alias(var);
+   VirtualState* virt = static_cast<VirtualState*>(get_object_state(alloc));
+ 
+   assert(alloc != nullptr && get_object_state(alloc)->is_virtual(), "sanity check");
+ #ifndef PRODUCT
+   if (PEAVerbose) {
+     tty->print_cr("PEA materializes a virtual %d obj%d ", pea->object_idx(alloc), alloc->_idx);
+   }
+   Atomic::inc(&peaNumMaterializations);
+ #endif
+ 
+   const TypeOopPtr* oop_type = var->as_Type()->type()->is_oopptr();
+   Node* objx = kit->materialize_object(alloc, oop_type);
+ 
+   // we save VirtualState beforehand.
+   escape(alloc, objx, true);
+   pea->add_alias(alloc, objx);
+ 
+   if (virt->lockcnt() > 0 && GenerateSynchronizationCode) {
+     if (PEAVerbose) {
+       tty->print_cr("materializing an object with unbalanced monitor");
+     }
+ 
+     int mon_id = 0;
+     JVMState* jvms = kit->jvms();
+     int cnt = 0;
+     // It's possible that the locked moninitor is not in the youngest JVMState,
+     // so we have to follow the stacktrace to discover them all.
+     //
+     // PEA Materialization steals those monitors from the original object. Here is the scheme:
+     // 1. unlock the original object.
+     // 2. lock the materialized object.
+     // 3. backfill the obj for Monitor 'obj|box' pair when Parse translates 'monitor-exit'.
+     // 4. split Phi-Unlock in the upcoming monitor_exit (Parse::do_monitor_exit).
+     //
+     while (jvms != nullptr) {
+       for (mon_id = 0;  mon_id < jvms->nof_monitors() && jvms->map()->monitor_obj(jvms, mon_id) != var; ++mon_id);
+ 
+       if (mon_id < jvms->nof_monitors()) {
+         cnt++;
+         Node* box = jvms->map()->monitor_box(jvms, mon_id);
+         kit->shared_unlock(box, var, true/*preserve_monitor*/); // PEA pops the monitor in Parse::monitor_exit().
+         kit->clone_shared_lock(box, objx);
+       }
+       jvms = jvms->caller();
+     }
+     assert(cnt == virt->lockcnt(), "steal all locks from var");
+   }
+ 
+   kit->replace_in_map(var, objx);
+ 
+ #ifndef PRODUCT
+   if (PEAVerbose) {
+     tty->print("new object: ");
+     objx->dump();
+   }
+ #endif
+ 
+   if (oop_type->isa_instptr()) {
+     // virt->_oop_type is an exact non-null pointer. oop_type may not be exact, or BOT
+     // We check that they both refer to the same java type.
+     assert(virt->_oop_type->is_instptr()->is_same_java_type_as(oop_type), "type of oopptr is inconsistent!");
+ #ifndef PRODUCT
+     if (PEAVerbose) {
+       ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
+       tty->print("ciInstanceKlass: ");
+       ik->print_name_on(tty);
+       tty->cr();
+     }
+ #endif
+ 
+     for (auto&& it = virt->field_iterator(); it.has_next(); ++it) {
+       ciField* field = it.field();
+       Node* val = it.value();
+ 
+ #ifndef PRODUCT
+       if (PEAVerbose) {
+         tty->print("field: ");
+         field->print_name_on(tty);
+         tty->cr();
+       }
+ #endif
+       // no initial value or is captured by InitializeNode
+       if (val == nullptr) continue;
+ 
+       put_field(kit, field, objx, val);
+     }
+ 
+     // back from DFS, we still need to check again for all virtual states.
+     // they may have a field 'var' which has committed to memory via prior putfield. We emit a store with updated objx.
+     // Hopefully, two consecutive stores coalesce.
+     _state.iterate([&](ObjID obj, ObjectState* os) {
+       if (os->is_virtual()) {
+         VirtualState* vs = static_cast<VirtualState*>(os);
+ 
+         for (auto&& i = vs->field_iterator(); i.has_next(); ++i) {
+           if (i.value() == var) {
+             vs->set_field(i.field(), objx);
+             put_field(kit, i.field(), get_java_oop(obj), objx);
+           }
+         }
+       }
+       return true;
+     });
+ 
+     // if var is associated with MemBarRelease, copy it for objx
+     for (DUIterator_Fast kmax, k = var->fast_outs(kmax); k < kmax; k++) {
+       Node* use = var->fast_out(k);
+ 
+       if (use->Opcode() == Op_MemBarRelease) {
+         kit->insert_mem_bar(Op_MemBarRelease, objx);
+         break;
+       }
+     }
+   } else {
+     assert(false, "array not support yet!");
+   }
+ 
+ #ifdef ASSERT
+   validate();
+ #endif
+   return objx;
+ }
+ 
+ #ifndef PRODUCT
+ void PEAState::print_on(outputStream* os) const {
+   if (size() > 0) {
+     os->print_cr("PEAState:");
+   }
+ 
+   _state.iterate([&](ObjID obj, ObjectState* state) {
+     bool is_virt = state->is_virtual();
+     os->print("Obj#%d(%s) ref = %d\n", obj->_idx, is_virt ? "Virt" : "Mat", state->ref_cnt());
+ 
+     if (is_virt) {
+       VirtualState* vs = static_cast<VirtualState*>(state);
+       vs->print_on(os);
+     } else {
+       EscapedState* es = static_cast<EscapedState*>(state);
+       es->print_on(tty);
+     }
+     return true;
+   });
+ }
+ 
+ #endif
+ 
+ #ifdef ASSERT
+ void PEAState::validate() const {
+ }
+ #endif
+ 
+ bool safepointContains(SafePointNode* sfpt, Node *oop) {
+   for (uint i = TypeFunc::Parms; i < sfpt->req(); ++i) {
+     if (oop == sfpt->in(i)) {
+       return true;
+     }
+   }
+   return false;
+ }
+ 
+ void PEAState::mark_all_escaped(PartialEscapeAnalysis* pea, ObjID id, Node* obj) {
+   VirtualState* virt = as_virtual(pea, obj);
+   escape(id, obj, false);
+ 
+   for (auto&& it = virt->field_iterator(); it.has_next(); ++it) {
+     ciField* field = it.field();
+     Node* val = it.value();
+ 
+     BasicType bt = field->layout_type();
+     bool is_obj = is_reference_type(bt);
+ 
+     ObjID alias = pea->is_alias(val);
+     if (is_obj && alias != nullptr) {
+       // recurse if val is a virtual object.
+       if (get_object_state(alias)->is_virtual()) {
+         mark_all_escaped(pea, alias, val);
+       }
+       assert(as_escaped(pea, val) != nullptr, "the object of val is not Escaped");
+     }
+   }
+ }
+ 
+ void PEAState::mark_all_live_objects_escaped(PartialEscapeAnalysis *pea, SafePointNode* sfpt) {
+   Unique_Node_List objs;
+   int sz = objects(objs);
+ 
+   for (int i = 0; i < sz; ++i) {
+     ObjID id = static_cast<ObjID>(objs.at(i));
+     ObjectState* os = get_object_state(id);
+ 
+     if (os->is_virtual()) {
+       Node *oop = get_java_oop(id);
+       // We only need to mark objects that are live as escaped.
+       if (safepointContains(sfpt, oop)) {
+         mark_all_escaped(pea, id, oop);
+       }
+     }
+   }
+ }
+ 
+ // get the key set from _state. we stop maintaining aliases for the materialized objects.
+ int PEAState::objects(Unique_Node_List& nodes) const {
+   _state.iterate([&](ObjID obj, ObjectState* state) {
+                    nodes.push(obj); return true;
+                  });
+   return nodes.size();
+ }
+ 
+ // We track '_merged_value' along with control-flow but only return it if _materialized = true;
+ // GraphKit::backfill_materialized() replaces the original CheckCastPP with it at do_exits() or at safepoints.
+ // If materialization doesn't take place, replacement shouldn't happen either.
+ //
+ // @return: nullptr if id has not been materialized, or the SSA java_oop that denotes the original object.
+ Node* PEAState::get_materialized_value(ObjID id) const {
+   assert(contains(id), "must exists in allocation");
+   ObjectState* os = get_object_state(id);
+ 
+   if (os->is_virtual()) {
+     return nullptr;
+   } else {
+     return static_cast<EscapedState*>(os)->materialized_value();
+   }
+ }
+ 
+ Node* PEAState::get_java_oop(ObjID id) const {
+   if (!contains(id)) return nullptr;
+ 
+   Node* obj = get_materialized_value(id);
+   if (obj != nullptr) {
+     return obj;
+   }
+ 
+   ProjNode* resproj = id->proj_out_or_null(TypeFunc::Parms);
+   if (resproj != nullptr) {
+     for (DUIterator_Fast imax, i = resproj->fast_outs(imax); i < imax; i++) {
+       Node* p = resproj->fast_out(i);
+       if (p->is_CheckCastPP()) {
+         assert(obj == nullptr, "multiple CheckCastPP?");
+         obj = p;
+       }
+     }
+   }
+   assert(obj == nullptr || AllocateNode::Ideal_allocation(obj) == id, "sanity check");
+   return obj;
+ }
+ 
+ AllocationStateMerger::AllocationStateMerger(PEAState& target) : _state(target) {}
+ 
+ void AllocationStateMerger::merge(PEAState& newin, GraphKit* kit, RegionNode* region, int pnum) {
+   PartialEscapeAnalysis* pea = kit->PEA();
+   Unique_Node_List set1, set2;
+ 
+   _state.objects(set1);
+   newin.objects(set2);
+ 
+   VectorSet intersection = intersect(set1.member_set(), set2.member_set());
+   set1.remove_useless_nodes(intersection);
+ 
+   for (uint i = 0; i < set1.size(); ++i) {
+     ObjID obj = static_cast<ObjID>(set1.at(i));
+     ObjectState* os1 = _state.get_object_state(obj);
+     ObjectState* os2 = newin.get_object_state(obj);
+     if (os1->is_virtual() && os2->is_virtual()) {
+       os1->merge(os2, kit, region, pnum);
+     } else {
+       assert(os1 != nullptr && os2 != nullptr, "sanity check");
+       Node* m;
+       Node* n;
+       bool materialized;
+       EscapedState* es;
+ 
+       if (os1->is_virtual()) {
+         // If obj is virtual in current state,  it must be escaped in newin.
+         // Mark it escaped in current state.
+         EscapedState* es2 = static_cast<EscapedState*>(os2);
+         materialized = es2->has_materialized();
+         m = _state.get_java_oop(obj);
+         n = es2->merged_value();
+         es = _state.escape(obj, m, materialized);
+       } else if (os2->is_virtual()) {
+         // If obj is virtual in newin,  it must be escaped in current state.
+         // Mark it escaped  in newin
+         es = static_cast<EscapedState*>(os1);
+         materialized = es->has_materialized();
+         m = es->merged_value();
+         n = newin.get_java_oop(obj);
+         os2 = newin.escape(obj, n, false);
+       } else {
+         // obj is escaped in both newin and current state.
+         es = static_cast<EscapedState*>(os1);
+         EscapedState* es2 = static_cast<EscapedState*>(os2);
+         m = es->merged_value();
+         n = es2->merged_value();
+         materialized = es->has_materialized() || es2->has_materialized();
+       }
+ 
+       if (m->is_Phi() && m->in(0) == region) {
+         ensure_phi(m->as_Phi(), pnum);
+         // only update the pnum if we have never seen it before.
+         if (m->in(pnum) == nullptr) {
+           m->set_req(pnum, n);
+         }
+       } else if (m != n) {
+         const Type* type = obj->oop_type(kit->gvn());
+         Node* phi = PhiNode::make(region, m, type);
+         phi->set_req(pnum, n);
+         kit->gvn().set_type(phi, type);
+         es->update(materialized, phi);
+       }
+     }
+   }
+ 
+   // process individual phi
+   SafePointNode* map = kit->map();
+   for (uint i = 0; i < map->req(); ++i) {
+     Node* node = map->in(i);
+ 
+     if (node != nullptr && node->is_Phi() && node->as_Phi()->region() == region) {
+       process_phi(node->as_Phi(), kit, region, pnum);
+     }
+   }
+ 
+ #ifdef ASSERT
+   _state.validate();
+ #endif
+ }
+ 
+ // Passive Materialization
+ // ------------------------
+ // Materialize an object at the phi node because at least one of its predecessors has materialized the object.
+ // Since C2 PEA does not eliminate the original allocation, we skip passive materializaiton and keep using it.
+ // The only problem is partial redudancy. JDK-8287061 should address this issue.
+ //
+ // PEA split a object based on its escapement. At the merging point, the original object is NonEscape, or it has already
+ // been materialized before. the phi is 'reducible Object-Phi' in JDK-828706 and the original object is scalar replaceable!
+ //
+ // obj' = PHI(Region, OriginalObj, ClonedObj)
+ // and OriginalObj is NonEscape but NSR; CloendObj is Global/ArgEscape
+ //
+ // JDK-8287061 transforms it to =>
+ // obj' = PHI(Region, null, ClonedObj)
+ // selector = PHI(Region, 0, 1)
+ //
+ // since OriginalObj is NonEscape, it is replaced by scalars.
+ //
+ static Node* ensure_object_materialized(Node* var, PEAState& state, SafePointNode* from_map, RegionNode* r, int pnum) {
+   // skip passive materialize for time being.
+   // if JDK-8287061 can guarantee to replace the orignial allocation, we don't need to worry about partial redundancy.
+   return var;
+ }
+ 
+ // Merge phi node incrementally.
+ // we check all merged inputs in _state.
+ // 1. all inputs refer to the same ObjID, then phi is created as alias of ObjID
+ // 2. otherwise, any input is alias with a 'virtual' object needs to convert to 'Escaped'. replace input with merged_value.
+ // 3. otherwise, if any input is aliased with an Escaped object. replace input with merged value.
+ void AllocationStateMerger::process_phi(PhiNode* phi, GraphKit* kit, RegionNode* region, int pnum) {
+   ObjID unique = nullptr;
+   bool materialized = false;
+   bool same_obj = true;
+   PartialEscapeAnalysis* pea = kit->PEA();
+ 
+   if (pea == nullptr) return;
+ 
+   for (uint i = 1; i < phi->req(); ++i) {
+     if (region->in(i) == nullptr || region->in(i)->is_top())
+       continue;
+ 
+     Node* node = phi->in(i);
+     ObjID obj = pea->is_alias(node);
+     if (obj != nullptr) {
+       if (unique == nullptr) {
+         unique = obj;
+       } else if (unique != obj) {
+         same_obj = false;
+       }
+       EscapedState* es = _state.as_escaped(pea, node);
+       if (es != nullptr) {
+         materialized |= es->has_materialized();
+       }
+     } else {
+       same_obj = false;
+     }
+   }
+ 
+   if (same_obj) {
+     //xliu: should I also check pnum == 1?
+     // phi nodes for exception handler may have leave normal paths vacant.
+     pea->add_alias(unique, phi);
+   } else {
+     bool printed = false;
+ 
+     for (uint i = 1; i < phi->req(); ++i) {
+       if (region->in(i) == nullptr || region->in(i)->is_top())
+         continue;
+ 
+       Node* node = phi->in(i);
+       ObjID obj = pea->is_alias(node);
+       if (obj != nullptr && _state.contains(obj)) {
+         ObjectState* os = _state.get_object_state(obj);
+         if (os->is_virtual()) {
+           Node* n = ensure_object_materialized(node, _state, kit->map(), region, pnum);
+           os = _state.escape(obj, n, materialized);
+         }
+         EscapedState* es = static_cast<EscapedState*>(os);
+         Node* value = es->merged_value();
+         if (value->is_Phi() && value->in(0) == region) {
+           value = value->in(i);
+         }
+ 
+         if (node != value) {
+           assert(value != phi, "sanity");
+ #ifndef PRODUCT
+           if (PEAVerbose) {
+             if (!printed) {
+               phi->dump();
+               printed = true;
+             }
+             tty->print_cr("[PEA] replace %dth input with node %d", i, value->_idx);
+           }
+ #endif
+           phi->replace_edge(node, value);
+         }
+       }
+     }
+     ObjID obj = pea->is_alias(phi);
+     if (obj != nullptr) {
+       pea->remove_alias(obj, phi);
+     }
+   }
+ }
+ 
+ void AllocationStateMerger::merge_at_phi_creation(const PartialEscapeAnalysis* pea, PEAState& newin, PhiNode* phi, Node* m, Node* n) {
+   ObjID obj1 = pea->is_alias(m);
+   ObjID obj2 = pea->is_alias(n);
+ 
+   if (_state.contains(obj1)) { // m points to an object that as is tracking.
+     ObjectState* os1 = _state.get_object_state(obj1);
+     ObjectState* os2 = newin.contains(obj2) ? newin.get_object_state(obj2) : nullptr;
+ 
+     // obj1 != obj2 if n points to something else. It could be the other object, null or a ConP.
+     // we do nothing here because PEA doesn't create phi in this case.
+     if (obj1 == obj2 && os2 != nullptr) { // n points to the same object and pred_as is trakcing.
+       if (!os1->is_virtual() || !os2->is_virtual()) {
+         if (os2->is_virtual()) {
+           // passive materialize
+           os2 = newin.escape(obj2, n, false);
+         }
+ 
+         if (os1->is_virtual()) {
+           bool materialized = static_cast<EscapedState*>(os2)->has_materialized();
+           _state.escape(obj1, phi, materialized);
+         } else {
+           static_cast<EscapedState*>(os1)->update(phi);
+         }
+       }
+     }
+   }
+ }
+ 
+ AllocationStateMerger::~AllocationStateMerger() {
+ }
+ 
+ bool PEAContext::match(ciMethod* method) const {
+   if (_matcher != nullptr && method != nullptr) {
+     VM_ENTRY_MARK;
+     methodHandle mh(THREAD, method->get_Method());
+     return _matcher->match(mh);
+   }
+   return true;
+ }
+ 
+ EscapedState* PEAState::escape(ObjID id, Node* p, bool materialized) {
+   assert(p != nullptr, "the new alias must be non-null");
+   Node* old = nullptr;
+   EscapedState* es;
+ 
+   if (contains(id)) {
+     ObjectState* os = get_object_state(id);
+     // if os is EscapedState and its materialized_value is not-null,
+     if (!os->is_virtual()) {
+       materialized |= static_cast<EscapedState*>(os)->has_materialized();
+     }
+     es = new EscapedState(materialized, p);
+     es->ref_cnt(os->ref_cnt()); // copy the refcnt from the original ObjectState.
+   } else {
+     es = new EscapedState(materialized, p);
+   }
+   _state.put(id, es);
+   if (materialized) {
+     static_cast<AllocateNode*>(id)->inc_materialized();
+   }
+   assert(contains(id), "sanity check");
+   return es;
+ }
< prev index next >