< prev index next >

src/hotspot/share/opto/callnode.cpp

Print this page
@@ -22,29 +22,32 @@
   *
   */
  
  #include "precompiled.hpp"
  #include "compiler/compileLog.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
  #include "ci/bcEscapeAnalyzer.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/c2/barrierSetC2.hpp"
  #include "interpreter/interpreter.hpp"
  #include "opto/callGenerator.hpp"
  #include "opto/callnode.hpp"
  #include "opto/castnode.hpp"
  #include "opto/convertnode.hpp"
  #include "opto/escape.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/locknode.hpp"
  #include "opto/machnode.hpp"
  #include "opto/matcher.hpp"
  #include "opto/parse.hpp"
  #include "opto/regalloc.hpp"
  #include "opto/regmask.hpp"
  #include "opto/rootnode.hpp"
  #include "opto/runtime.hpp"
  #include "runtime/sharedRuntime.hpp"
+ #include "runtime/stubRoutines.hpp"
  #include "utilities/powerOfTwo.hpp"
  #include "code/vmreg.hpp"
  
  // Portions of code courtesy of Clifford Click
  

@@ -76,11 +79,11 @@
    return RegMask::Empty;
  }
  
  //------------------------------match------------------------------------------
  // Construct projections for incoming parameters, and their RegMask info
- Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
+ Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
    switch (proj->_con) {
    case TypeFunc::Control:
    case TypeFunc::I_O:
    case TypeFunc::Memory:
      return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);

@@ -100,21 +103,10 @@
      }
    }
    return NULL;
  }
  
- //------------------------------StartOSRNode----------------------------------
- // The method start node for an on stack replacement adapter
- 
- //------------------------------osr_domain-----------------------------
- const TypeTuple *StartOSRNode::osr_domain() {
-   const Type **fields = TypeTuple::fields(2);
-   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM;  // address of osr buffer
- 
-   return TypeTuple::make(TypeFunc::Parms+1, fields);
- }
- 
  //=============================================================================
  const char * const ParmNode::names[TypeFunc::Parms+1] = {
    "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
  };
  

@@ -493,15 +485,30 @@
          st->print("[%d]", spobj->n_fields());
          int ndim = cik->as_array_klass()->dimension() - 1;
          while (ndim-- > 0) {
            st->print("[]");
          }
+       } else if (cik->is_flat_array_klass()) {
+         ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
+         cie->print_name_on(st);
+         st->print("[%d]", spobj->n_fields());
+         int ndim = cik->as_array_klass()->dimension() - 1;
+         while (ndim-- > 0) {
+           st->print("[]");
+         }
        }
        st->print("={");
        uint nf = spobj->n_fields();
        if (nf > 0) {
          uint first_ind = spobj->first_index(mcall->jvms());
+         if (iklass != NULL && iklass->is_inlinetype()) {
+           Node* init_node = mcall->in(first_ind++);
+           if (!init_node->is_top()) {
+             st->print(" [is_init");
+             format_helper(regalloc, st, init_node, ":", -1, NULL);
+           }
+         }
          Node* fld_node = mcall->in(first_ind);
          ciField* cifield;
          if (iklass != NULL) {
            st->print(" [");
            cifield = iklass->nonstatic_field_at(0);

@@ -706,63 +713,82 @@
    if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
    if (jvms() != NULL)  jvms()->dump_spec(st);
  }
  #endif
  
- const Type *CallNode::bottom_type() const { return tf()->range(); }
+ const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
  const Type* CallNode::Value(PhaseGVN* phase) const {
-   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
-   return tf()->range();
+   if (!in(0) || phase->type(in(0)) == Type::TOP) {
+     return Type::TOP;
+   }
+   return tf()->range_cc();
  }
  
  //------------------------------calling_convention-----------------------------
  void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
+   if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
+     // The call to that stub is a special case: its inputs are
+     // multiple values returned from a call and so it should follow
+     // the return convention.
+     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
+     return;
+   }
    // Use the standard compiler calling convention
    SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
  }
  
  
  //------------------------------match------------------------------------------
  // Construct projections for control, I/O, memory-fields, ..., and
  // return result(s) along with their RegMask info
- Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
-   switch (proj->_con) {
-   case TypeFunc::Control:
-   case TypeFunc::I_O:
-   case TypeFunc::Memory:
-     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
- 
-   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
-     assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
-     // 2nd half of doubles and longs
-     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
- 
-   case TypeFunc::Parms: {       // Normal returns
-     uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
-     OptoRegPair regs = Opcode() == Op_CallLeafVector
-       ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
-       : is_CallRuntime()
-         ? match->c_return_value(ideal_reg)  // Calls into C runtime
-         : match->  return_value(ideal_reg); // Calls into compiled Java code
-     RegMask rm = RegMask(regs.first());
- 
-     if (Opcode() == Op_CallLeafVector) {
-       // If the return is in vector, compute appropriate regmask taking into account the whole range
-       if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
-         if(OptoReg::is_valid(regs.second())) {
-           for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
-             rm.Insert(r);
+ Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
+   uint con = proj->_con;
+   const TypeTuple* range_cc = tf()->range_cc();
+   if (con >= TypeFunc::Parms) {
+     if (tf()->returns_inline_type_as_fields()) {
+       // The call returns multiple values (inline type fields): we
+       // create one projection per returned value.
+       assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
+       uint ideal_reg = range_cc->field_at(con)->ideal_reg();
+       return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
+     } else {
+       if (con == TypeFunc::Parms) {
+         uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
+         OptoRegPair regs = Opcode() == Op_CallLeafVector
+           ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
+           : match->c_return_value(ideal_reg);
+         RegMask rm = RegMask(regs.first());
+ 
+         if (Opcode() == Op_CallLeafVector) {
+           // If the return is in vector, compute appropriate regmask taking into account the whole range
+           if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
+             if(OptoReg::is_valid(regs.second())) {
+               for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
+                 rm.Insert(r);
+               }
+             }
            }
          }
+ 
+         if (OptoReg::is_valid(regs.second())) {
+           rm.Insert(regs.second());
+         }
+         return new MachProjNode(this,con,rm,ideal_reg);
+       } else {
+         assert(con == TypeFunc::Parms+1, "only one return value");
+         assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
+         return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
        }
      }
- 
-     if( OptoReg::is_valid(regs.second()) )
-       rm.Insert( regs.second() );
-     return new MachProjNode(this,proj->_con,rm,ideal_reg);
    }
  
+   switch (con) {
+   case TypeFunc::Control:
+   case TypeFunc::I_O:
+   case TypeFunc::Memory:
+     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
+ 
    case TypeFunc::ReturnAdr:
    case TypeFunc::FramePtr:
    default:
      ShouldNotReachHere();
    }

@@ -779,11 +805,11 @@
  // instance at the specified offset.
  //
  bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
    assert((t_oop != NULL), "sanity");
    if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
-     const TypeTuple* args = _tf->domain();
+     const TypeTuple* args = _tf->domain_sig();
      Node* dest = NULL;
      // Stubs that can be called once an ArrayCopyNode is expanded have
      // different signatures. Look for the second pointer argument,
      // that is the destination of the copy.
      for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {

@@ -828,11 +854,11 @@
          if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
                                   (inst_t->klass() == boxing_klass))) {
            return true;
          }
        }
-       const TypeTuple* d = tf()->domain();
+       const TypeTuple* d = tf()->domain_cc();
        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
          const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
          if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
                                   (inst_t->klass() == boxing_klass))) {
            return true;

@@ -843,21 +869,31 @@
    }
    return true;
  }
  
  // Does this call have a direct reference to n other than debug information?
- bool CallNode::has_non_debug_use(Node *n) {
-   const TypeTuple * d = tf()->domain();
+ bool CallNode::has_non_debug_use(Node* n) {
+   const TypeTuple* d = tf()->domain_cc();
    for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
-     Node *arg = in(i);
-     if (arg == n) {
+     if (in(i) == n) {
        return true;
      }
    }
    return false;
  }
  
+ bool CallNode::has_debug_use(Node* n) {
+   if (jvms() != NULL) {
+     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
+       if (in(i) == n) {
+         return true;
+       }
+     }
+   }
+   return false;
+ }
+ 
  // Returns the unique CheckCastPP of a call
  // or 'this' if there are several CheckCastPP or unexpected uses
  // or returns NULL if there is no one.
  Node *CallNode::result_cast() {
    Node *cast = NULL;

@@ -885,20 +921,25 @@
    }
    return cast;
  }
  
  
- void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
-   projs->fallthrough_proj      = NULL;
-   projs->fallthrough_catchproj = NULL;
-   projs->fallthrough_ioproj    = NULL;
-   projs->catchall_ioproj       = NULL;
-   projs->catchall_catchproj    = NULL;
-   projs->fallthrough_memproj   = NULL;
-   projs->catchall_memproj      = NULL;
-   projs->resproj               = NULL;
-   projs->exobj                 = NULL;
+ CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
+   uint max_res = TypeFunc::Parms-1;
+   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+     ProjNode *pn = fast_out(i)->as_Proj();
+     max_res = MAX2(max_res, pn->_con);
+   }
+ 
+   assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
+ 
+   uint projs_size = sizeof(CallProjections);
+   if (max_res > TypeFunc::Parms) {
+     projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
+   }
+   char* projs_storage = resource_allocate_bytes(projs_size);
+   CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
  
    for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
      ProjNode *pn = fast_out(i)->as_Proj();
      if (pn->outcnt() == 0) continue;
      switch (pn->_con) {

@@ -940,30 +981,33 @@
          projs->catchall_memproj = pn;
        else
          projs->fallthrough_memproj = pn;
        break;
      case TypeFunc::Parms:
-       projs->resproj = pn;
+       projs->resproj[0] = pn;
        break;
      default:
-       assert(false, "unexpected projection from allocation node.");
+       assert(pn->_con <= max_res, "unexpected projection from allocation node.");
+       projs->resproj[pn->_con-TypeFunc::Parms] = pn;
+       break;
      }
    }
  
    // The resproj may not exist because the result could be ignored
    // and the exception object may not exist if an exception handler
    // swallows the exception but all the other must exist and be found.
-   assert(projs->fallthrough_proj      != NULL, "must be found");
    do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
+   assert(!do_asserts || projs->fallthrough_proj      != NULL, "must be found");
    assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
    assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
    assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
    assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
    if (separate_io_proj) {
      assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
      assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
    }
+   return projs;
  }
  
  Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
  #ifdef ASSERT
    // Validate attached generator

@@ -991,12 +1035,12 @@
           _override_symbolic_info == call._override_symbolic_info;
  }
  
  void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
    // Copy debug information and adjust JVMState information
-   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
-   uint new_dbg_start = tf()->domain()->cnt();
+   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
+   uint new_dbg_start = tf()->domain_sig()->cnt();
    int jvms_adj  = new_dbg_start - old_dbg_start;
    assert (new_dbg_start == req(), "argument count mismatch");
    Compile* C = phase->C;
  
    // SafePointScalarObject node could be referenced several times in debug info.

@@ -1033,10 +1077,14 @@
  #ifdef ASSERT
  bool CallJavaNode::validate_symbolic_info() const {
    if (method() == NULL) {
      return true; // call into runtime or uncommon trap
    }
+   Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
+   if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
+     return true;
+   }
    ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
    ciMethod* callee = method();
    if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
      assert(override_symbolic_info(), "should be set");
    }

@@ -1066,10 +1114,20 @@
    CallStaticJavaNode &call = (CallStaticJavaNode&)n;
    return CallJavaNode::cmp(call);
  }
  
  Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+   if (can_reshape && uncommon_trap_request() != 0) {
+     if (remove_useless_allocation(phase, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
+       if (!in(0)->is_Region()) {
+         PhaseIterGVN* igvn = phase->is_IterGVN();
+         igvn->replace_input_of(this, 0, phase->C->top());
+       }
+       return this;
+     }
+   }
+ 
    CallGenerator* cg = generator();
    if (can_reshape && cg != NULL) {
      assert(IncrementalInlineMH, "required");
      assert(cg->call_node() == this, "mismatch");
      assert(cg->is_mh_late_inline(), "not virtual");

@@ -1119,10 +1177,134 @@
    }
  #endif
    return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
  }
  
+ bool CallStaticJavaNode::remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg) {
+   // Split if can cause the flattened array branch of an array load to
+   // end in an uncommon trap. In that case, the allocation of the
+   // loaded value and its initialization is useless. Eliminate it. use
+   // the jvm state of the allocation to create a new uncommon trap
+   // call at the load.
+   if (ctl == NULL || ctl->is_top() || mem == NULL || mem->is_top() || !mem->is_MergeMem()) {
+     return false;
+   }
+   PhaseIterGVN* igvn = phase->is_IterGVN();
+   if (ctl->is_Region()) {
+     bool res = false;
+     for (uint i = 1; i < ctl->req(); i++) {
+       MergeMemNode* mm = mem->clone()->as_MergeMem();
+       for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
+         Node* m = mms.memory();
+         if (m->is_Phi() && m->in(0) == ctl) {
+           mms.set_memory(m->in(i));
+         }
+       }
+       if (remove_useless_allocation(phase, ctl->in(i), mm, unc_arg)) {
+         res = true;
+         if (!ctl->in(i)->is_Region()) {
+           igvn->replace_input_of(ctl, i, phase->C->top());
+         }
+       }
+       igvn->remove_dead_node(mm);
+     }
+     return res;
+   }
+   // verify the control flow is ok
+   Node* call = ctl;
+   MemBarNode* membar = NULL;
+   for (;;) {
+     if (call == NULL || call->is_top()) {
+       return false;
+     }
+     if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
+       call = call->in(0);
+     } else if (call->Opcode() == Op_CallStaticJava &&
+                call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
+       assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
+       membar = call->in(0)->in(0)->as_MemBar();
+       break;
+     } else {
+       return false;
+     }
+   }
+ 
+   JVMState* jvms = call->jvms();
+   if (phase->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
+     return false;
+   }
+ 
+   Node* alloc_mem = call->in(TypeFunc::Memory);
+   if (alloc_mem == NULL || alloc_mem->is_top()) {
+     return false;
+   }
+   if (!alloc_mem->is_MergeMem()) {
+     alloc_mem = MergeMemNode::make(alloc_mem);
+     igvn->register_new_node_with_optimizer(alloc_mem);
+   }
+ 
+   // and that there's no unexpected side effect
+   for (MergeMemStream mms2(mem->as_MergeMem(), alloc_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
+     Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
+     Node* m2 = mms2.memory2();
+ 
+     for (uint i = 0; i < 100; i++) {
+       if (m1 == m2) {
+         break;
+       } else if (m1->is_Proj()) {
+         m1 = m1->in(0);
+       } else if (m1->is_MemBar()) {
+         m1 = m1->in(TypeFunc::Memory);
+       } else if (m1->Opcode() == Op_CallStaticJava &&
+                  m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
+         if (m1 != call) {
+           return false;
+         }
+         break;
+       } else if (m1->is_MergeMem()) {
+         MergeMemNode* mm = m1->as_MergeMem();
+         int idx = mms2.alias_idx();
+         if (idx == Compile::AliasIdxBot) {
+           m1 = mm->base_memory();
+         } else {
+           m1 = mm->memory_at(idx);
+         }
+       } else {
+         return false;
+       }
+     }
+   }
+   if (alloc_mem->outcnt() == 0) {
+     igvn->remove_dead_node(alloc_mem);
+   }
+ 
+   // Remove membar preceding the call
+   membar->remove(igvn);
+ 
+   address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
+   CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", NULL);
+   unc->init_req(TypeFunc::Control, call->in(0));
+   unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
+   unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
+   unc->init_req(TypeFunc::FramePtr,  call->in(TypeFunc::FramePtr));
+   unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
+   unc->init_req(TypeFunc::Parms+0, unc_arg);
+   unc->set_cnt(PROB_UNLIKELY_MAG(4));
+   unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
+ 
+   igvn->replace_input_of(call, 0, phase->C->top());
+ 
+   igvn->register_new_node_with_optimizer(unc);
+ 
+   Node* ctrl = phase->transform(new ProjNode(unc, TypeFunc::Control));
+   Node* halt = phase->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
+   phase->C->root()->add_req(halt);
+ 
+   return true;
+ }
+ 
+ 
  #ifndef PRODUCT
  void CallStaticJavaNode::dump_spec(outputStream *st) const {
    st->print("# Static ");
    if (_name != NULL) {
      st->print("%s", _name);

@@ -1228,21 +1410,21 @@
  bool CallNativeNode::cmp( const Node &n ) const {
    CallNativeNode &call = (CallNativeNode&)n;
    return CallNode::cmp(call) && !strcmp(_name,call._name)
      && _arg_regs == call._arg_regs && _ret_regs == call._ret_regs;
  }
- Node* CallNativeNode::match(const ProjNode *proj, const Matcher *matcher) {
+ Node* CallNativeNode::match(const ProjNode *proj, const Matcher *matcher, const RegMask* mask) {
    switch (proj->_con) {
      case TypeFunc::Control:
      case TypeFunc::I_O:
      case TypeFunc::Memory:
        return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
      case TypeFunc::ReturnAdr:
      case TypeFunc::FramePtr:
        ShouldNotReachHere();
      case TypeFunc::Parms: {
-       const Type* field_at_con = tf()->range()->field_at(proj->_con);
+       const Type* field_at_con = tf()->range_sig()->field_at(proj->_con);
        const BasicType bt = field_at_con->basic_type();
        OptoReg::Name optoreg = OptoReg::as_OptoReg(_ret_regs.at(proj->_con - TypeFunc::Parms));
        OptoRegPair regs;
        if (bt == T_DOUBLE || bt == T_LONG) {
          regs.set2(optoreg);

@@ -1253,11 +1435,11 @@
        if(OptoReg::is_valid(regs.second()))
          rm.Insert(regs.second());
        return new MachProjNode(this, proj->_con, rm, field_at_con->ideal_reg());
      }
      case TypeFunc::Parms + 1: {
-       assert(tf()->range()->field_at(proj->_con) == Type::HALF, "Expected HALF");
+       assert(tf()->range_sig()->field_at(proj->_con) == Type::HALF, "Expected HALF");
        assert(_ret_regs.at(proj->_con - TypeFunc::Parms) == VMRegImpl::Bad(), "Unexpected register for Type::HALF");
        // 2nd half of doubles and longs
        return new MachProjNode(this, proj->_con, RegMask::Empty, (uint) OptoReg::Bad);
      }
      default:

@@ -1288,18 +1470,25 @@
  }
  #endif
  
  //------------------------------calling_convention-----------------------------
  void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
+   if (_entry_point == NULL) {
+     // The call to that stub is a special case: its inputs are
+     // multiple values returned from a call and so it should follow
+     // the return convention.
+     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
+     return;
+   }
    SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
  }
  
  void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
  #ifdef ASSERT
-   assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
+   assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
           "return vector size must match");
-   const TypeTuple* d = tf()->domain();
+   const TypeTuple* d = tf()->domain_sig();
    for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
      Node* arg = in(i);
      assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
             "vector argument size must match");
    }

@@ -1307,14 +1496,14 @@
  
    SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
  }
  
  void CallNativeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
-   assert((tf()->domain()->cnt() - TypeFunc::Parms) == argcnt, "arg counts must match!");
+   assert((tf()->domain_sig()->cnt() - TypeFunc::Parms) == argcnt, "arg counts must match!");
  #ifdef ASSERT
    for (uint i = 0; i < argcnt; i++) {
-     assert(tf()->domain()->field_at(TypeFunc::Parms + i)->basic_type() == sig_bt[i], "types must match!");
+     assert(tf()->domain_sig()->field_at(TypeFunc::Parms + i)->basic_type() == sig_bt[i], "types must match!");
    }
  #endif
    for (uint i = 0; i < argcnt; i++) {
      switch (sig_bt[i]) {
        case T_BOOLEAN:

@@ -1353,10 +1542,16 @@
    st->print("%s", _name);
    CallNode::dump_spec(st);
  }
  #endif
  
+ uint CallLeafNoFPNode::match_edge(uint idx) const {
+   // Null entry point is a special case for which the target is in a
+   // register. Need to match that edge.
+   return entry_point() == NULL && idx == TypeFunc::Parms;
+ }
+ 
  //=============================================================================
  
  void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
    assert(verify_jvms(jvms), "jvms must match");
    int loc = jvms->locoff() + idx;

@@ -1403,11 +1598,24 @@
  
  //------------------------------Ideal------------------------------------------
  // Skip over any collapsed Regions
  Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
    assert(_jvms == NULL || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
-   return remove_dead_region(phase, can_reshape) ? this : NULL;
+   if (remove_dead_region(phase, can_reshape)) {
+     return this;
+   }
+   // Scalarize inline types in safepoint debug info.
+   // Delay this until all inlining is over to avoid getting inconsistent debug info.
+   if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != NULL) {
+     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
+       Node* n = in(i)->uncast();
+       if (n->is_InlineTypeBase()) {
+         n->as_InlineTypeBase()->make_scalar_in_safepoints(phase->is_IterGVN());
+       }
+     }
+   }
+   return NULL;
  }
  
  //------------------------------Identity---------------------------------------
  // Remove obviously duplicate safepoints
  Node* SafePointNode::Identity(PhaseGVN* phase) {

@@ -1576,11 +1784,11 @@
  #ifdef ASSERT
    , _alloc(alloc)
  #endif
  {
  #ifdef ASSERT
-   if (!alloc->is_Allocate()
+   if (alloc != NULL && !alloc->is_Allocate()
        && !(alloc->Opcode() == Op_VectorBox)
        && (!alloc->is_CallStaticJava() || !alloc->as_CallStaticJava()->is_boxing_method())) {
      alloc->dump();
      assert(false, "unexpected call node");
    }

@@ -1635,18 +1843,21 @@
  //=============================================================================
  uint AllocateNode::size_of() const { return sizeof(*this); }
  
  AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
                             Node *ctrl, Node *mem, Node *abio,
-                            Node *size, Node *klass_node, Node *initial_test)
+                            Node *size, Node *klass_node,
+                            Node* initial_test,
+                            InlineTypeBaseNode* inline_type_node)
    : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
  {
    init_class_id(Class_Allocate);
    init_flags(Flag_is_macro);
    _is_scalar_replaceable = false;
    _is_non_escaping = false;
    _is_allocation_MemBar_redundant = false;
+   _larval = false;
    Node *topnode = C->top();
  
    init_req( TypeFunc::Control  , ctrl );
    init_req( TypeFunc::I_O      , abio );
    init_req( TypeFunc::Memory   , mem );

@@ -1654,39 +1865,53 @@
    init_req( TypeFunc::FramePtr , topnode );
    init_req( AllocSize          , size);
    init_req( KlassNode          , klass_node);
    init_req( InitialTest        , initial_test);
    init_req( ALength            , topnode);
+   init_req( InlineTypeNode     , inline_type_node);
+   // DefaultValue defaults to NULL
+   // RawDefaultValue defaults to NULL
    C->add_macro_node(this);
  }
  
  void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
  {
    assert(initializer != NULL &&
-          initializer->is_initializer() &&
-          !initializer->is_static(),
-              "unexpected initializer method");
+          initializer->is_object_constructor_or_class_initializer(),
+          "unexpected initializer method");
    BCEscapeAnalyzer* analyzer = initializer->get_bcea();
    if (analyzer == NULL) {
      return;
    }
  
    // Allocation node is first parameter in its initializer
    if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
      _is_allocation_MemBar_redundant = true;
    }
  }
- Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
+ 
+ Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
    Node* mark_node = NULL;
-   // For now only enable fast locking for non-array types
-   mark_node = phase->MakeConX(markWord::prototype().value());
-   return mark_node;
+   if (EnableValhalla) {
+     Node* klass_node = in(AllocateNode::KlassNode);
+     Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
+     mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+   } else {
+     mark_node = phase->MakeConX(markWord::prototype().value());
+   }
+   mark_node = phase->transform(mark_node);
+   // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
+   return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
  }
  
+ 
  //=============================================================================
  Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-   if (remove_dead_region(phase, can_reshape))  return this;
+   Node* res = SafePointNode::Ideal(phase, can_reshape);
+   if (res != NULL) {
+     return res;
+   }
    // Don't bother trying to transform a dead node
    if (in(0) && in(0)->is_top())  return NULL;
  
    const Type* type = phase->type(Ideal_length());
    if (type->isa_int() && type->is_int()->_hi < 0) {

@@ -2111,11 +2336,13 @@
    // Now see if we can optimize away this lock.  We don't actually
    // remove the locking here, we simply set the _eliminate flag which
    // prevents macro expansion from expanding the lock.  Since we don't
    // modify the graph, the value returned from this function is the
    // one computed above.
-   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
+   const Type* obj_type = phase->type(obj_node());
+   if (can_reshape && EliminateLocks && !is_non_esc_obj() &&
+       !obj_type->isa_inlinetype() && !obj_type->is_inlinetypeptr()) {
      //
      // If we are locking an non-escaped object, the lock/unlock is unnecessary
      //
      ConnectionGraph *cgr = phase->C->congraph();
      if (cgr != NULL && cgr->not_global_escape(obj_node())) {

@@ -2307,11 +2534,13 @@
    // remove the unlocking here, we simply set the _eliminate flag which
    // prevents macro expansion from expanding the unlock.  Since we don't
    // modify the graph, the value returned from this function is the
    // one computed above.
    // Escape state is defined after Parse phase.
-   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
+   const Type* obj_type = phase->type(obj_node());
+   if (can_reshape && EliminateLocks && !is_non_esc_obj() &&
+       !obj_type->isa_inlinetype() && !obj_type->is_inlinetypeptr()) {
      //
      // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
      //
      ConnectionGraph *cgr = phase->C->congraph();
      if (cgr != NULL && cgr->not_global_escape(obj_node())) {

@@ -2387,11 +2616,12 @@
      if (elem == Type::BOTTOM) {
        // An array but we don't know what elements are
        return true;
      }
  
-     dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
+     dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
+     t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
      uint dest_alias = phase->C->get_alias_index(dest_t);
      uint t_oop_alias = phase->C->get_alias_index(t_oop);
  
      return dest_alias == t_oop_alias;
    }
< prev index next >