< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page
*** 33,10 ***
--- 33,11 ---
  #include "opto/addnode.hpp"
  #include "opto/callGenerator.hpp"
  #include "opto/callnode.hpp"
  #include "opto/castnode.hpp"
  #include "opto/cfgnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/parse.hpp"
  #include "opto/rootnode.hpp"
  #include "opto/runtime.hpp"
  #include "opto/subnode.hpp"
  #include "runtime/sharedRuntime.hpp"

*** 116,21 ***
  // Internal class which handles all out-of-line calls w/o receiver type checks.
  class DirectCallGenerator : public CallGenerator {
   private:
    CallStaticJavaNode* _call_node;
    // Force separate memory and I/O projections for the exceptional
!   // paths to facilitate late inlinig.
    bool                _separate_io_proj;
  
  protected:
    void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
  
   public:
    DirectCallGenerator(ciMethod* method, bool separate_io_proj)
      : CallGenerator(method),
        _separate_io_proj(separate_io_proj)
    {
    }
    virtual JVMState* generate(JVMState* jvms);
  
    virtual CallNode* call_node() const { return _call_node; }
    virtual CallGenerator* with_call_node(CallNode* call) {
--- 117,30 ---
  // Internal class which handles all out-of-line calls w/o receiver type checks.
  class DirectCallGenerator : public CallGenerator {
   private:
    CallStaticJavaNode* _call_node;
    // Force separate memory and I/O projections for the exceptional
!   // paths to facilitate late inlining.
    bool                _separate_io_proj;
  
  protected:
    void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
  
   public:
    DirectCallGenerator(ciMethod* method, bool separate_io_proj)
      : CallGenerator(method),
+       _call_node(NULL),
        _separate_io_proj(separate_io_proj)
    {
+     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
+       // If that call has not been optimized by the time optimizations are over,
+       // we'll need to add a call to create an inline type instance from the klass
+       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
+       // Separating memory and I/O projections for exceptions is required to
+       // perform that graph transformation.
+       _separate_io_proj = true;
+     }
    }
    virtual JVMState* generate(JVMState* jvms);
  
    virtual CallNode* call_node() const { return _call_node; }
    virtual CallGenerator* with_call_node(CallNode* call) {

*** 141,10 ***
--- 151,11 ---
  };
  
  JVMState* DirectCallGenerator::generate(JVMState* jvms) {
    GraphKit kit(jvms);
    kit.C->print_inlining_update(this);
+   PhaseGVN& gvn = kit.gvn();
    bool is_static = method()->is_static();
    address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
                               : SharedRuntime::get_resolve_opt_virtual_call_stub();
  
    if (kit.C->log() != NULL) {

*** 173,11 ***
      if (method()->is_method_handle_intrinsic() ||
          method()->is_compiled_lambda_form()) {
        call->set_method_handle_invoke(true);
      }
    }
!   kit.set_arguments_for_java_call(call);
    kit.set_edges_for_java_call(call, false, _separate_io_proj);
    Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
    kit.push_node(method()->return_type()->basic_type(), ret);
    return kit.transfer_exceptions_into_jvms();
  }
--- 184,14 ---
      if (method()->is_method_handle_intrinsic() ||
          method()->is_compiled_lambda_form()) {
        call->set_method_handle_invoke(true);
      }
    }
!   kit.set_arguments_for_java_call(call, is_late_inline());
+   if (kit.stopped()) {
+     return kit.transfer_exceptions_into_jvms();
+   }
    kit.set_edges_for_java_call(call, false, _separate_io_proj);
    Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
    kit.push_node(method()->return_type()->basic_type(), ret);
    return kit.transfer_exceptions_into_jvms();
  }

*** 214,11 ***
  };
  
  JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
    GraphKit kit(jvms);
    Node* receiver = kit.argument(0);
- 
    kit.C->print_inlining_update(this);
  
    if (kit.C->log() != NULL) {
      kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
    }
--- 228,10 ---

*** 226,11 ***
    // If the receiver is a constant null, do not torture the system
    // by attempting to call through it.  The compile will proceed
    // correctly, but may bail out in final_graph_reshaping, because
    // the call instruction will have a seemingly deficient out-count.
    // (The bailout says something misleading about an "infinite loop".)
!   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
      assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
      ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
      int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
      kit.inc_sp(arg_size);  // restore arguments
      kit.uncommon_trap(Deoptimization::Reason_null_check,
--- 239,11 ---
    // If the receiver is a constant null, do not torture the system
    // by attempting to call through it.  The compile will proceed
    // correctly, but may bail out in final_graph_reshaping, because
    // the call instruction will have a seemingly deficient out-count.
    // (The bailout says something misleading about an "infinite loop".)
!   if (!receiver->is_InlineType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
      assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
      ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
      int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
      kit.inc_sp(arg_size);  // restore arguments
      kit.uncommon_trap(Deoptimization::Reason_null_check,

*** 274,10 ***
--- 287,13 ---
      call->set_override_symbolic_info(true);
    }
    _call_node = call;  // Save the call node in case we need it later
  
    kit.set_arguments_for_java_call(call);
+   if (kit.stopped()) {
+     return kit.transfer_exceptions_into_jvms();
+   }
    kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
    Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
    kit.push_node(method()->return_type()->basic_type(), ret);
  
    // Represent the effect of an implicit receiver null_check

*** 367,10 ***
--- 383,14 ---
  
    virtual jlong unique_id() const {
      return _unique_id;
    }
  
+   virtual CallGenerator* inline_cg() {
+     return _inline_cg;
+   }
+ 
    virtual CallGenerator* with_call_node(CallNode* call) {
      LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
      cg->set_call_node(call->as_CallStaticJava());
      return cg;
    }

*** 421,10 ***
--- 441,17 ---
    bool input_not_const = true;
    CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
    assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
  
    if (cg != NULL) {
+     // AlwaysIncrementalInline causes for_method_handle_inline() to
+     // return a LateInlineCallGenerator. Extract the
+     // InlineCallGenerato from it.
+     if (AlwaysIncrementalInline && cg->is_late_inline()) {
+       cg = cg->inline_cg();
+     }
+ 
      assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
      _inline_cg = cg;
      C->dec_number_of_mh_late_inlines();
      return true;
    } else {

*** 631,13 ***
    if (call == NULL || call->outcnt() == 0 ||
        call->in(0) == NULL || call->in(0)->is_top()) {
      return;
    }
  
!   const TypeTuple *r = call->tf()->domain();
!   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
!     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
        assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
        return;
      }
    }
  
--- 658,13 ---
    if (call == NULL || call->outcnt() == 0 ||
        call->in(0) == NULL || call->in(0)->is_top()) {
      return;
    }
  
!   const TypeTuple* r = call->tf()->domain_cc();
!   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
!     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
        assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
        return;
      }
    }
  

*** 651,20 ***
        return; // dead path
      }
    }
  
    // check for unreachable loop
!   CallProjections callprojs;
!   call->extract_projections(&callprojs, true);
!   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
!       (callprojs.catchall_catchproj    == call->in(0)) ||
!       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
!       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
!       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
!       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
-       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
-       (callprojs.exobj   != NULL && call->find_edge(callprojs.exobj) != -1)) {
      return;
    }
  
    Compile* C = Compile::current();
    // Remove inlined methods from Compiler's lists.
--- 678,18 ---
        return; // dead path
      }
    }
  
    // check for unreachable loop
!   CallProjections* callprojs = call->extract_projections(true);
!   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
!       (callprojs->catchall_catchproj    == call->in(0)) ||
!       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
!       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
!       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
!       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
!       (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) {
      return;
    }
  
    Compile* C = Compile::current();
    // Remove inlined methods from Compiler's lists.

*** 673,21 ***
    }
  
    bool result_not_used = false;
  
    if (is_pure_call()) {
!     if (is_boxing_late_inline() && callprojs.resproj != nullptr) {
          // replace box node to scalar node only in case it is directly referenced by debug info
          assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
!         if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {
!           scalarize_debug_usages(call, callprojs.resproj);
          }
      }
  
      // The call is marked as pure (no important side effects), but result isn't used.
      // It's safe to remove the call.
!     result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
    }
  
    if (result_not_used) {
      GraphKit kit(call->jvms());
      kit.replace_call(call, C->top(), true);
--- 698,31 ---
    }
  
    bool result_not_used = false;
  
    if (is_pure_call()) {
!     if (is_boxing_late_inline() && callprojs->resproj[0] != nullptr) {
          // replace box node to scalar node only in case it is directly referenced by debug info
          assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
!         if (!has_non_debug_usages(callprojs->resproj[0]) && is_box_cache_valid(call)) {
!           scalarize_debug_usages(call, callprojs->resproj[0]);
          }
      }
  
      // The call is marked as pure (no important side effects), but result isn't used.
      // It's safe to remove the call.
!     result_not_used = true;
+     for (uint i = 0; i < callprojs->nb_resproj; i++) {
+       if (callprojs->resproj[i] != NULL) {
+         if (callprojs->resproj[i]->outcnt() != 0) {
+           result_not_used = false;
+         }
+         if (call->find_edge(callprojs->resproj[i]) != -1) {
+           return;
+         }
+       }
+     }
    }
  
    if (result_not_used) {
      GraphKit kit(call->jvms());
      kit.replace_call(call, C->top(), true);

*** 699,30 ***
      SafePointNode* map = new SafePointNode(size, jvms);
      for (uint i1 = 0; i1 < size; i1++) {
        map->init_req(i1, call->in(i1));
      }
  
      // Make sure the state is a MergeMem for parsing.
      if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
        Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
!       C->initial_gvn()->set_type_bottom(mem);
        map->set_req(TypeFunc::Memory, mem);
      }
  
-     uint nargs = method()->arg_size();
      // blow away old call arguments
!     Node* top = C->top();
!     for (uint i1 = 0; i1 < nargs; i1++) {
-       map->set_req(TypeFunc::Parms + i1, top);
      }
      jvms->set_map(map);
  
      // Make enough space in the expression stack to transfer
      // the incoming arguments and return value.
      map->ensure_stack(jvms, jvms->method()->max_stack());
      for (uint i1 = 0; i1 < nargs; i1++) {
!       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
      }
  
      C->print_inlining_assert_ready();
  
      C->print_inlining_move_to(this);
--- 734,44 ---
      SafePointNode* map = new SafePointNode(size, jvms);
      for (uint i1 = 0; i1 < size; i1++) {
        map->init_req(i1, call->in(i1));
      }
  
+     PhaseGVN& gvn = *C->initial_gvn();
      // Make sure the state is a MergeMem for parsing.
      if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
        Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
!       gvn.set_type_bottom(mem);
        map->set_req(TypeFunc::Memory, mem);
      }
  
      // blow away old call arguments
!     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
!       map->set_req(i1, C->top());
      }
      jvms->set_map(map);
  
      // Make enough space in the expression stack to transfer
      // the incoming arguments and return value.
      map->ensure_stack(jvms, jvms->method()->max_stack());
+     const TypeTuple* domain_sig = call->_tf->domain_sig();
+     uint nargs = method()->arg_size();
+     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
+ 
+     uint j = TypeFunc::Parms;
      for (uint i1 = 0; i1 < nargs; i1++) {
!       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
+       if (method()->has_scalarized_args() && t->is_inlinetypeptr() && !t->maybe_null() && t->inline_klass()->can_be_passed_as_fields()) {
+         // Inline type arguments are not passed by reference: we get an argument per
+         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
+         GraphKit arg_kit(jvms, &gvn);
+         InlineTypeNode* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, true);
+         map->set_control(arg_kit.control());
+         map->set_argument(jvms, i1, vt);
+       } else {
+         map->set_argument(jvms, i1, call->in(j++));
+       }
      }
  
      C->print_inlining_assert_ready();
  
      C->print_inlining_move_to(this);

*** 734,10 ***
--- 783,28 ---
        map->disconnect_inputs(C);
        C->print_inlining_update_delayed(this);
        return;
      }
  
+     // Check if we are late inlining a method handle call that returns an inline type as fields.
+     Node* buffer_oop = NULL;
+     ciType* mh_rt = inline_cg()->method()->return_type();
+     if (is_mh_late_inline() && mh_rt->is_inlinetype() && mh_rt->as_inline_klass()->can_be_returned_as_fields()) {
+       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
+       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
+       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
+       GraphKit arg_kit(jvms, &gvn);
+       {
+         PreserveReexecuteState preexecs(&arg_kit);
+         arg_kit.jvms()->set_should_reexecute(true);
+         arg_kit.inc_sp(nargs);
+         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(mh_rt->as_inline_klass()));
+         buffer_oop = arg_kit.new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true);
+       }
+       jvms = arg_kit.transfer_exceptions_into_jvms();
+     }
+ 
      // Setup default node notes to be picked up by the inlining
      Node_Notes* old_nn = C->node_notes_at(call->_idx);
      if (old_nn != NULL) {
        Node_Notes* entry_nn = old_nn->clone(C);
        entry_nn->set_jvms(jvms);

*** 763,10 ***
--- 830,37 ---
        C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
        C->env()->notice_inlined_method(inline_cg()->method());
      }
      C->set_inlining_progress(true);
      C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
+ 
+     // Handle inline type returns
+     InlineTypeNode* vt = result->isa_InlineType();
+     if (vt != NULL) {
+       if (call->tf()->returns_inline_type_as_fields()) {
+         vt->replace_call_results(&kit, call, C);
+       } else {
+         // Result might still be allocated (for example, if it has been stored to a non-flattened field)
+         if (!vt->is_allocated(&kit.gvn())) {
+           assert(buffer_oop != NULL, "should have allocated a buffer");
+           vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
+           // Do not let stores that initialize this buffer be reordered with a subsequent
+           // store that would make this buffer accessible by other threads.
+           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop, &kit.gvn());
+           assert(alloc != NULL, "must have an allocation node");
+           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
+           kit.gvn().hash_delete(vt);
+           vt->set_oop(buffer_oop);
+           vt = kit.gvn().transform(vt)->as_InlineType();
+         }
+         DEBUG_ONLY(buffer_oop = NULL);
+         // Convert to InlineTypePtrNode to keep track of field values
+         result = vt->as_ptr(&kit.gvn());
+       }
+     }
+     assert(buffer_oop == NULL, "unused buffer allocation");
+ 
      kit.replace_call(call, result, true);
    }
  }
  
  class LateInlineStringCallGenerator : public LateInlineCallGenerator {

*** 987,10 ***
--- 1081,32 ---
      // Inlined method threw an exception, so it's just the slow path after all.
      kit.set_jvms(slow_jvms);
      return kit.transfer_exceptions_into_jvms();
    }
  
+   // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
+   uint tos = kit.jvms()->stkoff() + kit.sp();
+   uint limit = slow_map->req();
+   for (uint i = TypeFunc::Parms; i < limit; i++) {
+     Node* m = kit.map()->in(i);
+     Node* n = slow_map->in(i);
+     const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
+     if (m->is_InlineType() && !t->isa_inlinetype()) {
+       // Allocate inline type in fast path
+       m = m->as_InlineType()->buffer(&kit);
+       kit.map()->set_req(i, m);
+     }
+     if (n->is_InlineType() && !t->isa_inlinetype()) {
+       // Allocate inline type in slow path
+       PreserveJVMState pjvms(&kit);
+       kit.set_map(slow_map);
+       n = n->as_InlineType()->buffer(&kit);
+       kit.map()->set_req(i, n);
+       slow_map = kit.stop();
+     }
+   }
+ 
    // There are 2 branches and the replaced nodes are only valid on
    // one: restore the replaced nodes to what they were before the
    // branch.
    kit.map()->set_replaced_nodes(replaced_nodes);
  

*** 1010,12 ***
      Node* phi = mms.memory();
      if (phi->is_Phi() && phi->in(0) == region) {
        mms.set_memory(gvn.transform(phi));
      }
    }
-   uint tos = kit.jvms()->stkoff() + kit.sp();
-   uint limit = slow_map->req();
    for (uint i = TypeFunc::Parms; i < limit; i++) {
      // Skip unused stack slots; fast forward to monoff();
      if (i == tos) {
        i = kit.jvms()->monoff();
        if( i >= limit ) break;
--- 1126,10 ---

*** 1047,19 ***
    }
    int bci = jvms->bci();
    ciCallProfile profile = caller->call_profile_at_bci(bci);
    int call_site_count = caller->scale_count(profile.count());
  
!   if (IncrementalInlineMH && call_site_count > 0 &&
!       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
      return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
    } else {
      // Out-of-line call.
      return CallGenerator::for_direct_call(callee);
    }
  }
  
  class NativeCallGenerator : public CallGenerator {
  private:
    address _call_addr;
    ciNativeEntryPoint* _nep;
  public:
--- 1161,38 ---
    }
    int bci = jvms->bci();
    ciCallProfile profile = caller->call_profile_at_bci(bci);
    int call_site_count = caller->scale_count(profile.count());
  
!   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
!                             (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
      return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
    } else {
      // Out-of-line call.
      return CallGenerator::for_direct_call(callee);
    }
  }
  
+ static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit, bool null_free) {
+   PhaseGVN& gvn = kit.gvn();
+   Node* arg = kit.argument(arg_nb);
+   const Type* arg_type = arg->bottom_type();
+   const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
+   if (t->as_klass()->is_inlinetype() && null_free) {
+     sig_type = sig_type->filter_speculative(TypePtr::NOTNULL);
+   }
+   if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
+     const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
+     arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
+     kit.set_argument(arg_nb, arg);
+   }
+   if (sig_type->is_inlinetypeptr() && !arg->is_InlineType()) {
+     arg = InlineTypeNode::make_from_oop(&kit, arg, t->as_inline_klass(), !kit.gvn().type(arg)->maybe_null());
+     kit.set_argument(arg_nb, arg);
+   }
+ }
+ 
  class NativeCallGenerator : public CallGenerator {
  private:
    address _call_addr;
    ciNativeEntryPoint* _nep;
  public:

*** 1110,12 ***
          }
  
          CallGenerator* cg = C->call_generator(target, vtable_index,
                                                false /* call_does_dispatch */,
                                                jvms,
!                                               allow_inline,
!                                               PROB_ALWAYS);
          return cg;
        } else {
          print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
                                 "receiver not constant");
        }
--- 1243,14 ---
          }
  
          CallGenerator* cg = C->call_generator(target, vtable_index,
                                                false /* call_does_dispatch */,
                                                jvms,
!                                               true /* allow_inline */,
!                                               PROB_ALWAYS,
+                                               NULL,
+                                               true);
          return cg;
        } else {
          print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
                                 "receiver not constant");
        }

*** 1125,12 ***
    case vmIntrinsics::_linkToVirtual:
    case vmIntrinsics::_linkToStatic:
    case vmIntrinsics::_linkToSpecial:
    case vmIntrinsics::_linkToInterface:
      {
        // Get MemberName argument:
!       Node* member_name = kit.argument(callee->arg_size() - 1);
        if (member_name->Opcode() == Op_ConP) {
          input_not_const = false;
          const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
          ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
  
--- 1260,13 ---
    case vmIntrinsics::_linkToVirtual:
    case vmIntrinsics::_linkToStatic:
    case vmIntrinsics::_linkToSpecial:
    case vmIntrinsics::_linkToInterface:
      {
+       int nargs = callee->arg_size();
        // Get MemberName argument:
!       Node* member_name = kit.argument(nargs - 1);
        if (member_name->Opcode() == Op_ConP) {
          input_not_const = false;
          const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
          ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
  

*** 1146,31 ***
          // actual types.
          ciSignature* signature = target->signature();
          const int receiver_skip = target->is_static() ? 0 : 1;
          // Cast receiver to its type.
          if (!target->is_static()) {
!           Node* arg = kit.argument(0);
-           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
-           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
-           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
-             const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
-             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
-             kit.set_argument(0, cast_obj);
-           }
          }
          // Cast reference arguments to its type.
          for (int i = 0, j = 0; i < signature->count(); i++) {
            ciType* t = signature->type_at(i);
            if (t->is_klass()) {
!             Node* arg = kit.argument(receiver_skip + j);
!             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
-             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
-             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
-               const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
-               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
-               kit.set_argument(receiver_skip + j, cast_obj);
-             }
            }
            j += t->size();  // long and double take two slots
          }
  
          // Try to get the most accurate receiver type
--- 1282,18 ---
          // actual types.
          ciSignature* signature = target->signature();
          const int receiver_skip = target->is_static() ? 0 : 1;
          // Cast receiver to its type.
          if (!target->is_static()) {
!           cast_argument(nargs, 0, signature->accessing_klass(), kit, false);
          }
          // Cast reference arguments to its type.
          for (int i = 0, j = 0; i < signature->count(); i++) {
            ciType* t = signature->type_at(i);
            if (t->is_klass()) {
!             bool null_free = signature->is_null_free_at(i);
!             cast_argument(nargs, receiver_skip + j, t, kit, null_free);
            }
            j += t->size();  // long and double take two slots
          }
  
          // Try to get the most accurate receiver type

*** 1197,11 ***
            speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
          }
          CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
                                                allow_inline,
                                                PROB_ALWAYS,
!                                               speculative_receiver_type);
          return cg;
        } else {
          print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
                                 "member_name not constant");
        }
--- 1320,12 ---
            speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
          }
          CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
                                                allow_inline,
                                                PROB_ALWAYS,
!                                               speculative_receiver_type,
+                                               true);
          return cg;
        } else {
          print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
                                 "member_name not constant");
        }

*** 1287,11 ***
    }
  
    if (!method()->is_static()) {
      // We need an explicit receiver null_check before checking its type in predicate.
      // We share a map with the caller, so his JVMS gets adjusted.
!     Node* receiver = kit.null_check_receiver_before_call(method());
      if (kit.stopped()) {
        return kit.transfer_exceptions_into_jvms();
      }
    }
  
--- 1411,11 ---
    }
  
    if (!method()->is_static()) {
      // We need an explicit receiver null_check before checking its type in predicate.
      // We share a map with the caller, so his JVMS gets adjusted.
!     kit.null_check_receiver_before_call(method());
      if (kit.stopped()) {
        return kit.transfer_exceptions_into_jvms();
      }
    }
  
< prev index next >