< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page
@@ -24,10 +24,11 @@
  
  #include "compiler/compileLog.hpp"
  #include "interpreter/linkResolver.hpp"
  #include "memory/resourceArea.hpp"
  #include "oops/method.hpp"
+ #include "oops/trainingData.hpp"
  #include "opto/addnode.hpp"
  #include "opto/c2compiler.hpp"
  #include "opto/castnode.hpp"
  #include "opto/idealGraphPrinter.hpp"
  #include "opto/locknode.hpp"

@@ -36,10 +37,11 @@
  #include "opto/parse.hpp"
  #include "opto/rootnode.hpp"
  #include "opto/runtime.hpp"
  #include "opto/type.hpp"
  #include "runtime/handles.inline.hpp"
+ #include "runtime/runtimeUpcalls.hpp"
  #include "runtime/safepointMechanism.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "utilities/bitMap.inline.hpp"
  #include "utilities/copy.hpp"
  

@@ -1130,13 +1132,22 @@
  
    // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
    _caller->map()->delete_replaced_nodes();
  
    // If this is an inlined method, we may have to do a receiver null check.
-   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
+   if (_caller->has_method() && is_normal_parse()) {
      GraphKit kit(_caller);
-     kit.null_check_receiver_before_call(method());
+     if (!method()->is_static()) {
+       kit.null_check_receiver_before_call(method());
+     } else if (C->do_clinit_barriers() && C->needs_clinit_barrier(method()->holder(), _caller->method())) {
+       ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
+       const int nargs = declared_method->arg_size();
+       kit.inc_sp(nargs);
+       Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
+       kit.guard_klass_is_initialized(holder);
+       kit.dec_sp(nargs);
+     }
      _caller = kit.transfer_exceptions_into_jvms();
      if (kit.stopped()) {
        _exits.add_exception_states_from(_caller);
        _exits.set_jvms(_caller);
        return nullptr;

@@ -1192,10 +1203,123 @@
  
    SafePointNode* entry_map = stop();
    return entry_map;
  }
  
+ #if INCLUDE_CDS
+ static int scale_limit(int64_t limit) {
+  // To scale invocation limit a hyperbolic saturation curve formula
+  // is used with upper limit 100K.
+  return (int)(AOTCodeInvokeBase + limit / (1.0 + limit / (100000.0 * AOTCodeInvokeScale)));
+ }
+ 
+ void Parse::count_aot_code_calls() {
+   bool is_aot_compilation = C->env()->is_precompile();
+   if (UseAOTCodeCounters && (depth() == 1) && (AOTRecordTraining || is_aot_compilation)) {
+     // Count nmethod invocations during training run and compare to
+     // invocations of AOT code during production run to trigger JIT compilation
+     // and replace AOT code with normal JITed code.
+     ciMetadata* mcp = method()->ensure_method_counters();
+     precond(mcp != nullptr);
+     const TypePtr* mc_type = TypeMetadataPtr::make(TypePtr::Constant, mcp, 0);
+     Node* mc = makecon(mc_type);
+     if (!is_aot_compilation) { // training
+       // Count C2 compiled code invocations (use 64 bits)
+       Node* cnt_adr = basic_plus_adr(C->top(), mc, in_bytes(MethodCounters::jit_code_invocation_counter_offset()));
+       Node* ctrl = control();
+       Node* cnt  = make_load(ctrl, cnt_adr, TypeLong::LONG, T_LONG, MemNode::unordered);
+       Node* incr = _gvn.transform(new AddLNode(cnt, longcon(1)));
+       store_to_memory(ctrl, cnt_adr, incr, T_LONG, MemNode::unordered);
+ 
+     } else { // assembly phase
+       // Clear out dead values from the debug info in following runtime call
+       kill_dead_locals();
+ 
+       precond(MethodTrainingData::have_data());
+       methodHandle mh(Thread::current(), method()->get_Method());
+       MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
+       precond(mtd != nullptr);
+       int64_t limit = mtd->aot_code_invocation_limit();
+       int scaled_limit = scale_limit(limit);
+       Node* lim = intcon(scaled_limit);
+ 
+       // Count AOT compiled code invocations (use 32 bits because scaled limit fits into 32 bits)
+       Node* cnt_adr = basic_plus_adr(C->top(), mc, in_bytes(MethodCounters::aot_code_invocation_counter_offset()));
+       Node* ctrl = control();
+       Node* cnt  = make_load(ctrl, cnt_adr, TypeInt::INT, T_INT, MemNode::unordered);
+       Node* incr = _gvn.transform(new AddINode(cnt, intcon(1)));
+       store_to_memory(ctrl, cnt_adr, incr, T_INT, MemNode::unordered);
+       // Preserve memory for Phi node below
+       Node* st_mem = MergeMemNode::make(map()->memory());
+       _gvn.set_type_bottom(st_mem);
+ 
+       Node* chk = _gvn.transform( new CmpINode(incr, lim) );
+       Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::lt) );
+       IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, (float)limit);
+ 
+       RegionNode* result_rgn = new RegionNode(4);
+       record_for_igvn(result_rgn);
+ 
+       Node*  skip_call = _gvn.transform(new IfTrueNode(iff));
+       result_rgn->init_req(1, skip_call);
+ 
+       Node* in1_io  = i_o();
+       Node* in1_mem = st_mem;
+       // These two phis are pre-filled with copies of the fast IO and Memory
+       Node* io_phi   = PhiNode::make(result_rgn, in1_io,  Type::ABIO);
+       Node* mem_phi  = PhiNode::make(result_rgn, in1_mem, Type::MEMORY, TypePtr::BOTTOM);
+ 
+       Node* needs_call = _gvn.transform(new IfFalseNode(iff));
+       set_control(needs_call);
+ 
+       // Check if we already requested compilation.
+       ByteSize flag_offset = MethodCounters::aot_code_recompile_requested_offset();
+       Node* flag_adr = basic_plus_adr(C->top(), mc, in_bytes(flag_offset));
+ 
+       // Load old value to check and store new (+1) unconditionally.
+       // It is fine if few threads see initial 0 value and request compilation:
+       // CompileBroker checks if such compilation is already in compilation queue.
+       Node* old_val = make_load(control(), flag_adr, TypeInt::INT, T_INT, MemNode::unordered);
+       Node* new_val = _gvn.transform(new AddINode(old_val, intcon(1)));
+       store_to_memory(control(), flag_adr, new_val, T_INT, MemNode::unordered);
+ 
+       Node* chk2 = _gvn.transform( new CmpINode(old_val, intcon(0)) );
+       Node* tst2 = _gvn.transform( new BoolNode(chk2, BoolTest::ne) );
+       IfNode* iff2 = create_and_map_if(control(), tst2, PROB_FAIR, COUNT_UNKNOWN);
+ 
+       Node*  skip_call2 = _gvn.transform(new IfTrueNode(iff2));
+       result_rgn->init_req(2, skip_call2);
+ 
+       Node* needs_call2 = _gvn.transform(new IfFalseNode(iff2));
+       set_control(needs_call2);
+ 
+       const TypePtr* m_type = TypeMetadataPtr::make(method());
+       Node* m = makecon(m_type);
+       Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
+                           OptoRuntime::compile_method_Type(),
+                           OptoRuntime::compile_method_Java(),
+                           "compile_method", TypePtr::BOTTOM, m);
+ 
+       // State before call
+       Node* in_io  = call->in(TypeFunc::I_O);
+       Node* in_mem = call->in(TypeFunc::Memory);
+       io_phi ->init_req(2, in_io);
+       mem_phi->init_req(2, in_mem);
+ 
+       // State after call
+       result_rgn->init_req(3, control());
+       io_phi ->init_req(3, i_o());
+       mem_phi->init_req(3, reset_memory());
+ 
+       set_all_memory( _gvn.transform(mem_phi) );
+       set_i_o(        _gvn.transform(io_phi) );
+       set_control(    _gvn.transform(result_rgn) );
+     }
+   }
+ }
+ #endif
+ 
  //-----------------------------do_method_entry--------------------------------
  // Emit any code needed in the pseudo-block before BCI zero.
  // The main thing to do is lock the receiver of a synchronized method.
  void Parse::do_method_entry() {
    set_parse_bci(InvocationEntryBci); // Pseudo-BCP

@@ -1205,10 +1329,12 @@
  
    if (C->env()->dtrace_method_probes()) {
      make_dtrace_method_entry(method());
    }
  
+   install_on_method_entry_runtime_upcalls(method());
+ 
  #ifdef ASSERT
    // Narrow receiver type when it is too broad for the method being parsed.
    if (!method()->is_static()) {
      ciInstanceKlass* callee_holder = method()->holder();
      const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);

@@ -1262,10 +1388,12 @@
    }
  
    // Feed profiling data for parameters to the type system so it can
    // propagate it as speculative types
    record_profiled_parameters_for_speculation();
+ 
+   CDS_ONLY( count_aot_code_calls(); )
  }
  
  //------------------------------init_blocks------------------------------------
  // Initialize our parser map to contain the types/monitors at method entry.
  void Parse::init_blocks() {

@@ -1590,11 +1718,11 @@
  
      do_one_bytecode();
      if (failing()) return;
  
      assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
-            "incorrect depth prediction: sp=%d, pre_bc_sp=%d, depth=%d", sp(), pre_bc_sp, depth);
+            "incorrect depth prediction: bc=%s bci=%d, sp=%d, pre_bc_sp=%d, depth=%d", Bytecodes::name(bc()), bci(), sp(), pre_bc_sp, depth);
  
      do_exceptions();
  
      NOT_PRODUCT( parse_histogram()->record_change(); );
  

@@ -2172,10 +2300,13 @@
    set_control( _gvn.transform(result_rgn) );
  }
  
  // Add check to deoptimize once holder klass is fully initialized.
  void Parse::clinit_deopt() {
+   if (method()->holder()->is_initialized()) {
+     return; // in case do_clinit_barriers() is true
+   }
    assert(C->has_method(), "only for normal compilations");
    assert(depth() == 1, "only for main compiled method");
    assert(is_normal_parse(), "no barrier needed on osr entry");
    assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
  
< prev index next >