< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page
*** 93,10 ***
--- 93,14 ---
    }
  
    if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
      BytecodeParseHistogram::print();
    }
+ 
+   if (DoPartialEscapeAnalysis) {
+     printPeaStatistics();
+   }
  }
  #endif
  
  //------------------------------ON STACK REPLACEMENT---------------------------
  

*** 386,25 ***
    }
  }
  
  //------------------------------Parse------------------------------------------
  // Main parser constructor.
! Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
    : _exits(caller)
  {
    // Init some variables
    _caller = caller;
    _method = parse_method;
    _expected_uses = expected_uses;
    _depth = 1 + (caller->has_method() ? caller->depth() : 0);
    _wrote_final = false;
    _wrote_volatile = false;
    _wrote_stable = false;
    _wrote_fields = false;
    _alloc_with_final = nullptr;
    _block = nullptr;
!   _first_return = true;
    _replaced_nodes_for_exceptions = false;
    _new_idx = C->unique();
    DEBUG_ONLY(_entry_bci = UnknownBci);
    DEBUG_ONLY(_block_count = -1);
    DEBUG_ONLY(_blocks = (Block*)-1);
--- 390,26 ---
    }
  }
  
  //------------------------------Parse------------------------------------------
  // Main parser constructor.
! Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, PEAState* caller_state)
    : _exits(caller)
  {
    // Init some variables
    _caller = caller;
+   _caller_state = caller_state;
    _method = parse_method;
    _expected_uses = expected_uses;
    _depth = 1 + (caller->has_method() ? caller->depth() : 0);
    _wrote_final = false;
    _wrote_volatile = false;
    _wrote_stable = false;
    _wrote_fields = false;
    _alloc_with_final = nullptr;
    _block = nullptr;
!   _first_return = 0;
    _replaced_nodes_for_exceptions = false;
    _new_idx = C->unique();
    DEBUG_ONLY(_entry_bci = UnknownBci);
    DEBUG_ONLY(_block_count = -1);
    DEBUG_ONLY(_blocks = (Block*)-1);

*** 413,10 ***
--- 418,16 ---
      // Make sure I have an inline tree, so I can print messages about it.
      InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
    }
    _max_switch_depth = 0;
    _est_switch_depth = 0;
+ 
+   if (TraceOptoParse) {
+     tty->print_raw("Parsing method ");
+     parse_method->print_name(tty);
+     tty->print_cr(" {");
+   }
  #endif
  
    if (parse_method->has_reserved_stack_access()) {
      C->set_has_reserved_stack_access(true);
    }

*** 528,10 ***
--- 539,15 ---
      assert(!this->is_osr_parse(), "no recursive OSR");
    }
  #endif
  
  #ifndef PRODUCT
+   // Dump CFG in RPO order before Parsing.
+   if (Verbose && !CITraceTypeFlow) {
+     _flow->rpo_print_on(tty);
+   }
+ 
    if (_flow->has_irreducible_entry()) {
      C->set_parsed_irreducible_loop(true);
    }
  
    methods_parsed++;

*** 603,12 ***
    // We begin parsing as if we have just encountered a jump to the
    // method entry.
    Block* entry_block = start_block();
    assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
    set_map_clone(entry_map);
-   merge_common(entry_block, entry_block->next_path_num());
  
  #ifndef PRODUCT
    BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
    set_parse_histogram( parse_histogram_obj );
  #endif
  
--- 619,12 ---
    // We begin parsing as if we have just encountered a jump to the
    // method entry.
    Block* entry_block = start_block();
    assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
    set_map_clone(entry_map);
  
+   merge_common(entry_block, entry_block->next_path_num());
  #ifndef PRODUCT
    BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
    set_parse_histogram( parse_histogram_obj );
  #endif
  

*** 631,10 ***
--- 647,38 ---
  
    if (log)  log->done("parse nodes='%d' live='%d' memory='" SIZE_FORMAT "'",
                        C->unique(), C->live_nodes(), C->node_arena()->used());
  }
  
+ #ifndef PRODUCT
+ Parse::~Parse() {
+   if (TraceOptoParse) {
+     tty->print("} // ");
+     method()->print_short_name(tty);
+     tty->cr();
+   }
+ 
+   if (DoPartialEscapeAnalysis && PEAVerbose) {
+     PEAState& as = _exits.jvms()->alloc_state();
+     auto objs = PEA()->all_objects();
+     for (int i = 0; i < objs.length(); ++i) {
+       ObjID obj = objs.at(i);
+ 
+       if (as.contains(obj)) {
+         ObjectState* os = as.get_object_state(obj);
+         tty->print("%4d | Obj%d\t", i, obj->_idx);
+ 
+         if (os->is_virtual()) {
+           tty->print_cr("V");
+         } else {
+           tty->print_cr("M");
+         }
+       }
+     }
+   }
+ }
+ #endif
  //---------------------------do_all_blocks-------------------------------------
  void Parse::do_all_blocks() {
    bool has_irreducible = flow()->has_irreducible_entry();
  
    // Walk over all blocks in Reverse Post-Order.

*** 660,10 ***
--- 704,15 ---
  
        NOT_PRODUCT(blocks_parsed++);
  
        progress = true;
        if (block->is_loop_head() || block->is_handler() || (has_irreducible && !block->is_ready())) {
+         // mark live objects 'Escaped' in map before mounting phi nodes.
+         if (DoPartialEscapeAnalysis && block->is_loop_head()) {
+           PEAState& as = jvms()->alloc_state();
+           as.mark_all_live_objects_escaped(PEA(), map());
+         }
          // Not all preds have been parsed.  We must build phis everywhere.
          // (Note that dead locals do not get phis built, ever.)
          ensure_phis_everywhere();
  
          if (block->is_SEL_head()) {

*** 1002,19 ***
    //
    if (method()->is_initializer() &&
         (wrote_final() ||
           (AlwaysSafeConstructors && wrote_fields()) ||
           (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
!     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
! 
!     // If Memory barrier is created for final fields write
!     // and allocation node does not escape the initialize method,
!     // then barrier introduced by allocation node can be removed.
!     if (DoEscapeAnalysis && alloc_with_final()) {
!       AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
!       alloc->compute_MemBar_redundancy(method());
      }
      if (PrintOpto && (Verbose || WizardMode)) {
        method()->print_name();
        tty->print_cr(" writes finals and needs a memory barrier");
      }
    }
--- 1051,29 ---
    //
    if (method()->is_initializer() &&
         (wrote_final() ||
           (AlwaysSafeConstructors && wrote_fields()) ||
           (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
!     if (!DoPartialEscapeAnalysis) {
!       // If Memory barrier is created for final fields write
!       // and allocation node does not escape the initialize method,
!       // then barrier introduced by allocation node can be removed.
!       if (DoEscapeAnalysis && alloc_with_final()) {
!         AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final());
!         alloc->compute_MemBar_redundancy(method());
!       }
+     } else {
+       // in PEA, alloc_with_final stores ObjID
+       AllocateNode* alloc = (ObjID)alloc_with_final();
+ 
+       if (DoEscapeAnalysis && alloc != nullptr) {
+         Node* obj = _exits.jvms()->alloc_state().get_java_oop(alloc);
+         _exits.insert_mem_bar(Op_MemBarRelease, obj);
+         alloc->compute_MemBar_redundancy(method());
+       }
      }
+ 
      if (PrintOpto && (Verbose || WizardMode)) {
        method()->print_name();
        tty->print_cr(" writes finals and needs a memory barrier");
      }
    }

*** 1038,11 ***
    // Clean up input MergeMems created by transforming the slices
    _gvn.transform(_exits.merged_memory());
  
    if (tf()->range()->cnt() > TypeFunc::Parms) {
      const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
!     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
      if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
        // If the type we set for the ret_phi in build_exits() is too optimistic and
        // the ret_phi is top now, there's an extremely small chance that it may be due to class
        // loading.  It could also be due to an error, so mark this method as not compilable because
        // otherwise this could lead to an infinite compile loop.
--- 1097,22 ---
    // Clean up input MergeMems created by transforming the slices
    _gvn.transform(_exits.merged_memory());
  
    if (tf()->range()->cnt() > TypeFunc::Parms) {
      const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
!     Node* const ret_phi_old = _exits.argument(0);
+     Node*       ret_phi  = _gvn.transform(ret_phi_old);
+     if (DoPartialEscapeAnalysis && ret_phi_old != ret_phi) {
+       PEAState& as = _exits.jvms()->alloc_state();
+       EscapedState* es = as.as_escaped(PEA(), ret_phi_old);
+ 
+       if (es != nullptr && es->merged_value() == ret_phi_old) {
+         es->update(ret_phi);
+         ObjID obj = PEA()->is_alias(ret_phi_old);
+         PEA()->add_alias(obj, ret_phi);
+       }
+     }
      if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
        // If the type we set for the ret_phi in build_exits() is too optimistic and
        // the ret_phi is top now, there's an extremely small chance that it may be due to class
        // loading.  It could also be due to an error, so mark this method as not compilable because
        // otherwise this could lead to an infinite compile loop.

*** 1119,11 ***
--- 1189,19 ---
      SafePointNode* ex_map;
      while ((ex_map = caller.pop_exception_state()) != nullptr) {
        _exits.add_exception_state(ex_map);
      }
    }
+ 
    _exits.map()->apply_replaced_nodes(_new_idx);
+   // don't trust replace list. return_current() may mess it up.
+   // use AllocationState to update it.
+   if (DoPartialEscapeAnalysis) {
+     PEAState& as = _exits.jvms()->alloc_state();
+     SafePointNode* map = _exits.map();
+     backfill_materialized(map, TypeFunc::Parms, map->req(), as);
+   }
  }
  
  //-----------------------------create_entry_map-------------------------------
  // Initialize our parser map to contain the types at method entry.
  // For OSR, the map contains a single RawPtr parameter.

*** 1154,10 ***
--- 1232,14 ---
  
    assert(method() != nullptr, "parser must have a method");
  
    // Create an initial safepoint to hold JVM state during parsing
    JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
+   if (_caller != nullptr && DoPartialEscapeAnalysis) {
+     jvms->alloc_state() = _caller->alloc_state();
+   }
+ 
    set_map(new SafePointNode(len, jvms));
    jvms->set_map(map());
    record_for_igvn(map());
    assert(jvms->endoff() == len, "correct jvms sizing");
  

*** 1301,11 ***
    _num_successors = 0;
    _all_successors = 0;
    _successors = nullptr;
    assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
    assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
!   assert(_live_locals.size() == 0, "sanity");
  
    // entry point has additional predecessor
    if (flow()->is_start())  _pred_count++;
    assert(flow()->is_start() == (this == outer->start_block()), "");
  }
--- 1383,11 ---
    _num_successors = 0;
    _all_successors = 0;
    _successors = nullptr;
    assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
    assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
!   assert(!_live_locals.is_valid(), "sanity");
  
    // entry point has additional predecessor
    if (flow()->is_start())  _pred_count++;
    assert(flow()->is_start() == (this == outer->start_block()), "");
  }

*** 1317,12 ***
    GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
    int ns = tfs->length();
    int ne = tfe->length();
    _num_successors = ns;
    _all_successors = ns+ne;
!   _successors = (ns+ne == 0) ? nullptr : NEW_RESOURCE_ARRAY(Block*, ns+ne);
-   int p = 0;
    for (int i = 0; i < ns+ne; i++) {
      ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
      Block* block2 = outer->rpo_at(tf2->rpo());
      _successors[i] = block2;
  
--- 1399,11 ---
    GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
    int ns = tfs->length();
    int ne = tfe->length();
    _num_successors = ns;
    _all_successors = ns+ne;
!   _successors = (ns+ne == 0) ? nullptr: NEW_RESOURCE_ARRAY(Block*, ns+ne);
    for (int i = 0; i < ns+ne; i++) {
      ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
      Block* block2 = outer->rpo_at(tf2->rpo());
      _successors[i] = block2;
  

*** 1344,10 ***
--- 1425,21 ---
        if (block1 == block2)  continue;  // duplicates are OK
        assert(block1->start() != block2->start(), "successors have unique bcis");
      }
      #endif
    }
+ 
+   if (DoPartialEscapeAnalysis) {
+     GrowableArray<ciTypeFlow::Block*>* tfp = flow()->predecessors();
+     int np = tfp->length();
+     _predecessors = np > 0 ? NEW_RESOURCE_ARRAY(Block*, np) : nullptr;
+     for (int i = 0; i < np; ++i) {
+       ciTypeFlow::Block* tf2 = tfp->at(i);
+       Block* block2 = outer->rpo_at(tf2->rpo());
+       _predecessors[i] = block2;
+     }
+   }
  }
  
  //---------------------------successor_for_bci---------------------------------
  Parse::Block* Parse::Block::successor_for_bci(int bci) {
    for (int i = 0; i < all_successors(); i++) {

*** 1372,18 ***
  }
  
  
  //-----------------------------local_type_at-----------------------------------
  const Type* Parse::Block::local_type_at(int i) const {
!   // Make dead locals fall to bottom.
!   if (_live_locals.size() == 0) {
!     MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start());
!     // This bitmap can be zero length if we saw a breakpoint.
-     // In such cases, pretend they are all live.
-     ((Block*)this)->_live_locals = live_locals;
-   }
-   if (_live_locals.size() > 0 && !_live_locals.at(i))
      return Type::BOTTOM;
  
    return get_type(flow()->local_type_at(i));
  }
  
--- 1464,14 ---
  }
  
  
  //-----------------------------local_type_at-----------------------------------
  const Type* Parse::Block::local_type_at(int i) const {
!   // This bitmap can be zero length if we saw a breakpoint.
!   // In such cases, pretend they are all live.
!   auto live_locals = liveness();
!   if (live_locals.size() > 0 && !live_locals.at(i))
      return Type::BOTTOM;
  
    return get_type(flow()->local_type_at(i));
  }
  

*** 1495,14 ***
    set_sp( block->start_sp());
  }
  
  
  //-----------------------------record_state------------------------------------
! void Parse::Block::record_state(Parse* p) {
    assert(!is_merged(), "can only record state once, on 1st inflow");
    assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
    set_start_map(p->stop());
  }
  
  
  //------------------------------do_one_block-----------------------------------
  void Parse::do_one_block() {
--- 1583,17 ---
    set_sp( block->start_sp());
  }
  
  
  //-----------------------------record_state------------------------------------
! void Parse::Block::record_state(Parse* p, int pnum) {
    assert(!is_merged(), "can only record state once, on 1st inflow");
    assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
    set_start_map(p->stop());
+ 
+   _from_block = p->block();
+   _init_pnum = pnum;
  }
  
  
  //------------------------------do_one_block-----------------------------------
  void Parse::do_one_block() {

*** 1523,10 ***
--- 1614,16 ---
        tty->print("  irreducible");
      }
      tty->cr();
    }
  
+ #ifndef PRODUCT
+   if (PEAVerbose) {
+     PEAState& as = jvms()->alloc_state();
+     as.print_on(tty);
+   }
+ #endif
    assert(block()->is_merged(), "must be merged before being parsed");
    block()->mark_parsed();
  
    // Set iterator to start of block.
    iter().reset_to_bci(block()->start());

*** 1680,19 ***
  
  //--------------------------merge_common---------------------------------------
  void Parse::merge_common(Parse::Block* target, int pnum) {
    if (TraceOptoParse) {
      tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
    }
  
    // Zap extra stack slots to top
    assert(sp() == target->start_sp(), "");
    clean_stack(sp());
  
    if (!target->is_merged()) {   // No prior mapping at this bci
-     if (TraceOptoParse) { tty->print(" with empty state");  }
- 
      // If this path is dead, do not bother capturing it as a merge.
      // It is "as if" we had 1 fewer predecessors from the beginning.
      if (stopped()) {
        if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
        return;
--- 1777,23 ---
  
  //--------------------------merge_common---------------------------------------
  void Parse::merge_common(Parse::Block* target, int pnum) {
    if (TraceOptoParse) {
      tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
+     if (!target->is_merged()) {
+       tty->print(" with empty state");
+     } else {
+       tty->print(" with previous state");
+     }
+     tty->print_cr(" on path %d", pnum);
    }
  
    // Zap extra stack slots to top
    assert(sp() == target->start_sp(), "");
    clean_stack(sp());
  
    if (!target->is_merged()) {   // No prior mapping at this bci
      // If this path is dead, do not bother capturing it as a merge.
      // It is "as if" we had 1 fewer predecessors from the beginning.
      if (stopped()) {
        if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
        return;

*** 1730,15 ***
        target->copy_irreducible_status_to(r, jvms());
        set_parse_bci(current_bci); // Restore bci
      }
  
      // Convert the existing Parser mapping into a mapping at this bci.
!     store_state_to(target);
      assert(target->is_merged(), "do not come here twice");
! 
    } else {                      // Prior mapping at this bci
!     if (TraceOptoParse) {  tty->print(" with previous state"); }
  #ifdef ASSERT
      if (target->is_SEL_head()) {
        target->mark_merged_backedge(block());
      }
  #endif
--- 1831,17 ---
        target->copy_irreducible_status_to(r, jvms());
        set_parse_bci(current_bci); // Restore bci
      }
  
      // Convert the existing Parser mapping into a mapping at this bci.
!     store_state_to(target, pnum);
      assert(target->is_merged(), "do not come here twice");
! #ifdef ASSERT
+     target->state().validate();
+ #endif
    } else {                      // Prior mapping at this bci
! 
  #ifdef ASSERT
      if (target->is_SEL_head()) {
        target->mark_merged_backedge(block());
      }
  #endif

*** 1775,11 ***
      }
  
      // Update all the non-control inputs to map:
      assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
      bool check_elide_phi = target->is_SEL_backedge(save_block);
!     for (uint j = 1; j < newin->req(); j++) {
        Node* m = map()->in(j);   // Current state of target.
        Node* n = newin->in(j);   // Incoming change to target state.
        PhiNode* phi;
        if (m->is_Phi() && m->as_Phi()->region() == r)
          phi = m->as_Phi();
--- 1878,15 ---
      }
  
      // Update all the non-control inputs to map:
      assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
      bool check_elide_phi = target->is_SEL_backedge(save_block);
!     PEAState& pred_as = newin->jvms()->alloc_state();
+     PEAState& as = block()->state();
+     AllocationStateMerger as_merger(as);
+ 
+     for (uint j = 1; j < newin->req(); ++j) {
        Node* m = map()->in(j);   // Current state of target.
        Node* n = newin->in(j);   // Incoming change to target state.
        PhiNode* phi;
        if (m->is_Phi() && m->as_Phi()->region() == r)
          phi = m->as_Phi();

*** 1806,10 ***
--- 1913,18 ---
                // This assert also tests that nodes are BoxLock.
                assert(BoxLockNode::same_slot(n, m), "sanity");
                C->gvn_replace_by(n, m);
              } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
                phi = ensure_phi(j, nophi);
+ 
+               // We merges allocation state according to 5.2.4 of the dissertation
+               // Becase C2 Parse is merging basic blocks, we have to intercept some phi creation or
+               // PEA MergeProcessor creates duplicated phi nodes.
+               if (DoPartialEscapeAnalysis && phi != nullptr) {
+                 PartialEscapeAnalysis* pea = PEA();
+                 as_merger.merge_at_phi_creation(pea, pred_as, phi, m, n);
+               } // DoPartialEscapeAnalysis
              }
            }
            break;
          }
        }

*** 1819,10 ***
--- 1934,11 ---
        // It is a bug if we create a phi which sees a garbage value on a live path.
  
        if (phi != nullptr) {
          assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
          assert(phi->region() == r, "");
+ 
          phi->set_req(pnum, n);  // Then add 'n' to the merge
          if (pnum == PhiNode::Input) {
            // Last merge for this Phi.
            // So far, Phis have had a reasonable type from ciTypeFlow.
            // Now _gvn will join that with the meet of current inputs.

*** 1836,28 ***
            record_for_igvn(phi);
          }
        }
      } // End of for all values to be merged
  
      if (pnum == PhiNode::Input &&
          !r->in(0)) {         // The occasional useless Region
        assert(control() == r, "");
        set_control(r->nonnull_req());
      }
  
      map()->merge_replaced_nodes_with(newin);
  
      // newin has been subsumed into the lazy merge, and is now dead.
      set_block(save_block);
  
      stop();                     // done with this guy, for now
    }
  
-   if (TraceOptoParse) {
-     tty->print_cr(" on path %d", pnum);
-   }
- 
    // Done with this parser state.
    assert(stopped(), "");
  }
  
  
--- 1952,32 ---
            record_for_igvn(phi);
          }
        }
      } // End of for all values to be merged
  
+ 
+     if (DoPartialEscapeAnalysis) {
+       as_merger.merge(pred_as, this, r, pnum);
+     }
+ 
      if (pnum == PhiNode::Input &&
          !r->in(0)) {         // The occasional useless Region
        assert(control() == r, "");
        set_control(r->nonnull_req());
      }
  
      map()->merge_replaced_nodes_with(newin);
  
+ #ifdef ASSERT
+     block()->state().validate();
+ #endif
      // newin has been subsumed into the lazy merge, and is now dead.
      set_block(save_block);
  
      stop();                     // done with this guy, for now
    }
  
    // Done with this parser state.
    assert(stopped(), "");
  }
  
  

*** 1971,29 ***
    if (!map->control()->is_Region())
      return pred_count()+1;  // there may be a region some day
    RegionNode* r = map->control()->as_Region();
  
    // Add new path to the region.
!   uint pnum = r->req();
    r->add_req(nullptr);
  
!   for (uint i = 1; i < map->req(); i++) {
!     Node* n = map->in(i);
!     if (i == TypeFunc::Memory) {
        // Ensure a phi on all currently known memories.
        for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
          Node* phi = mms.memory();
!         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
            assert(phi->req() == pnum, "must be same size as region");
            phi->add_req(nullptr);
          }
        }
!     } else {
!       if (n->is_Phi() && n->as_Phi()->region() == r) {
!         assert(n->req() == pnum, "must be same size as region");
-         n->add_req(nullptr);
-       }
      }
    }
  
    return pnum;
  }
--- 2091,28 ---
    if (!map->control()->is_Region())
      return pred_count()+1;  // there may be a region some day
    RegionNode* r = map->control()->as_Region();
  
    // Add new path to the region.
!   const uint pnum = r->req();
    r->add_req(nullptr);
  
!   for (DUIterator_Fast imax, i = r->fast_outs(imax); i < imax; i++) {
!     Node* n = r->fast_out(i);
! 
+     if (n->is_MergeMem()) {
        // Ensure a phi on all currently known memories.
        for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
          Node* phi = mms.memory();
!         if (phi->is_Phi() && phi->as_Phi()->region() == r && phi->req() <= pnum) {
            assert(phi->req() == pnum, "must be same size as region");
            phi->add_req(nullptr);
          }
        }
!     } else if (n->is_Phi() && n->as_Phi()->region() == r && n->req() <= pnum) {
!       assert(n->req() == pnum, "must be same size as region");
!       n->add_req(nullptr);
      }
    }
  
    return pnum;
  }

*** 2048,10 ***
--- 2167,11 ---
  
    PhiNode* phi = PhiNode::make(region, o, t);
    gvn().set_type(phi, t);
    if (C->do_escape_analysis()) record_for_igvn(phi);
    map->set_req(idx, phi);
+ 
    return phi;
  }
  
  //--------------------------ensure_memory_phi----------------------------------
  // Turn the idx'th slice of the current memory into a Phi

*** 2249,15 ***
      // here.
      Node* phi = _exits.argument(0);
      phi->add_req(value);
    }
  
!   if (_first_return) {
      _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
!     _first_return = false;
    } else {
      _exits.map()->merge_replaced_nodes_with(map());
    }
  
    stop_and_kill_map();          // This CFG path dies here
  }
  
--- 2369,30 ---
      // here.
      Node* phi = _exits.argument(0);
      phi->add_req(value);
    }
  
!   if (_first_return++ == 0) {
      _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
!     // copy assignment
+     _exits.jvms()->alloc_state() = jvms()->alloc_state();
    } else {
      _exits.map()->merge_replaced_nodes_with(map());
+ 
+     if (DoPartialEscapeAnalysis) {
+       PEAState& as =_exits.jvms()->alloc_state();
+       PEAState& newin = jvms()->alloc_state();
+       AllocationStateMerger mp(as);
+       // if value is a tracking object and PEA needs to create a phi node to merge it,
+       // we need to use _exits.argument(0)
+       ObjID obj = PEA()->is_alias(value);
+       if (as.contains(obj) && newin.contains(obj)) {
+         Node* phi = _exits.argument(0);
+         mp.merge_at_phi_creation(PEA(), newin, phi->as_Phi(), phi->in(_first_return-1), value);
+       }
+       mp.merge(newin, &_exits, _exits.control()->as_Region(), _first_return);
+     }
    }
  
    stop_and_kill_map();          // This CFG path dies here
  }
  
< prev index next >