< prev index next >

src/hotspot/share/opto/runtime.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/vmClasses.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/nmethod.hpp"
  30 #include "code/pcDesc.hpp"
  31 #include "code/scopeDesc.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "compiler/compilationMemoryStatistic.hpp"
  34 #include "compiler/compileBroker.hpp"

  35 #include "compiler/oopMap.hpp"
  36 #include "gc/g1/g1HeapRegion.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gcLocker.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/linkResolver.hpp"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/oopFactory.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "oops/objArrayKlass.hpp"
  48 #include "oops/klass.inline.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/typeArrayOop.inline.hpp"
  51 #include "opto/ad.hpp"
  52 #include "opto/addnode.hpp"
  53 #include "opto/callnode.hpp"
  54 #include "opto/cfgnode.hpp"
  55 #include "opto/graphKit.hpp"
  56 #include "opto/machnode.hpp"
  57 #include "opto/matcher.hpp"
  58 #include "opto/memnode.hpp"
  59 #include "opto/mulnode.hpp"
  60 #include "opto/output.hpp"
  61 #include "opto/runtime.hpp"
  62 #include "opto/subnode.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "runtime/atomic.hpp"
  65 #include "runtime/frame.inline.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"

  68 #include "runtime/javaCalls.hpp"

  69 #include "runtime/sharedRuntime.hpp"
  70 #include "runtime/signature.hpp"
  71 #include "runtime/stackWatermarkSet.hpp"
  72 #include "runtime/synchronizer.hpp"
  73 #include "runtime/threadCritical.hpp"
  74 #include "runtime/threadWXSetters.inline.hpp"
  75 #include "runtime/vframe.hpp"
  76 #include "runtime/vframeArray.hpp"
  77 #include "runtime/vframe_hp.hpp"

  78 #include "utilities/copy.hpp"
  79 #include "utilities/preserveException.hpp"
  80 
  81 
  82 // For debugging purposes:
  83 //  To force FullGCALot inside a runtime function, add the following two lines
  84 //
  85 //  Universe::release_fullgc_alot_dummy();
  86 //  Universe::heap()->collect();
  87 //
  88 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
  89 
  90 
  91 #define C2_BLOB_FIELD_DEFINE(name, type) \
  92   type OptoRuntime:: BLOB_FIELD_NAME(name)  = nullptr;
  93 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
  94 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
  95   address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
  96 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \
  97   address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr;
  98 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE)
  99 #undef C2_BLOB_FIELD_DEFINE
 100 #undef C2_STUB_FIELD_DEFINE
 101 #undef C2_JVMTI_STUB_FIELD_DEFINE
 102 

 103 #define C2_BLOB_NAME_DEFINE(name, type)  "C2 Runtime " # name "_blob",
 104 #define C2_STUB_NAME_DEFINE(name, f, t, r)  "C2 Runtime " # name,
 105 #define C2_JVMTI_STUB_NAME_DEFINE(name)  "C2 Runtime " # name,
 106 const char* OptoRuntime::_stub_names[] = {
 107   C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE)
 108 };
 109 #undef C2_BLOB_NAME_DEFINE
 110 #undef C2_STUB_NAME_DEFINE
 111 #undef C2_JVMTI_STUB_NAME_DEFINE
 112 




 113 // This should be called in an assertion at the start of OptoRuntime routines
 114 // which are entered from compiled code (all of them)
 115 #ifdef ASSERT
 116 static bool check_compiled_frame(JavaThread* thread) {
 117   assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
 118   RegisterMap map(thread,
 119                   RegisterMap::UpdateMap::skip,
 120                   RegisterMap::ProcessFrames::include,
 121                   RegisterMap::WalkContinuation::skip);
 122   frame caller = thread->last_frame().sender(&map);
 123   assert(caller.is_compiled_frame(), "not being called from compiled like code");
 124   return true;
 125 }
 126 #endif // ASSERT
 127 
 128 /*
 129 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
 130   var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
 131   if (var == nullptr) { return false; }
 132 */

 159                   C2_STUB_NAME(name),                                 \
 160                   fancy_jump,                                           \
 161                   pass_tls,                                             \
 162                   pass_retpc);                                          \
 163   if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; }          \
 164 
 165 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name)
 166 
 167 #define GEN_C2_JVMTI_STUB(name)                                       \
 168   STUB_FIELD_NAME(name) =                                               \
 169     generate_stub(env,                                                  \
 170                   notify_jvmti_vthread_Type,                            \
 171                   C2_JVMTI_STUB_C_FUNC(name),                         \
 172                   C2_STUB_NAME(name),                                 \
 173                   0,                                                    \
 174                   true,                                                 \
 175                   false);                                               \
 176   if (STUB_FIELD_NAME(name) == nullptr) { return false; }               \
 177 
 178 bool OptoRuntime::generate(ciEnv* env) {

 179 
 180   C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB)
 181 
 182   return true;
 183 }
 184 
 185 #undef GEN_C2_BLOB
 186 
 187 #undef C2_STUB_FIELD_NAME
 188 #undef C2_STUB_TYPEFUNC
 189 #undef C2_STUB_C_FUNC
 190 #undef C2_STUB_NAME
 191 #undef GEN_C2_STUB
 192 
 193 #undef C2_JVMTI_STUB_C_FUNC
 194 #undef GEN_C2_JVMTI_STUB
 195 // #undef gen
 196 
 197 const TypeFunc* OptoRuntime::_new_instance_Type                   = nullptr;
 198 const TypeFunc* OptoRuntime::_new_array_Type                      = nullptr;

 260 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type             = nullptr;
 261 const TypeFunc* OptoRuntime::_osr_end_Type                        = nullptr;
 262 const TypeFunc* OptoRuntime::_register_finalizer_Type             = nullptr;
 263 #if INCLUDE_JFR
 264 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type          = nullptr;
 265 #endif // INCLUDE_JFR
 266 #if INCLUDE_JVMTI
 267 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type           = nullptr;
 268 #endif // INCLUDE_JVMTI
 269 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type       = nullptr;
 270 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type            = nullptr;
 271 
 272 // Helper method to do generation of RunTimeStub's
 273 address OptoRuntime::generate_stub(ciEnv* env,
 274                                    TypeFunc_generator gen, address C_function,
 275                                    const char *name, int is_fancy_jump,
 276                                    bool pass_tls,
 277                                    bool return_pc) {
 278 
 279   // Matching the default directive, we currently have no method to match.
 280   DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
 281   CompilationMemoryStatisticMark cmsm(directive);
 282   ResourceMark rm;
 283   Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive);
 284   DirectivesStack::release(directive);
 285   return  C.stub_entry_point();
 286 }
 287 
 288 const char* OptoRuntime::stub_name(address entry) {
 289 #ifndef PRODUCT
 290   CodeBlob* cb = CodeCache::find_blob(entry);
 291   RuntimeStub* rs =(RuntimeStub *)cb;
 292   assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
 293   return rs->name();
 294 #else
 295   // Fast implementation for product mode (maybe it should be inlined too)
 296   return "runtime stub";
 297 #endif
 298 }
 299 
 300 // local methods passed as arguments to stub generator that forward

 304                                    oopDesc* dest, jint dest_pos,
 305                                    jint length, JavaThread* thread) {
 306   SharedRuntime::slow_arraycopy_C(src,  src_pos, dest, dest_pos, length, thread);
 307 }
 308 
 309 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
 310   SharedRuntime::complete_monitor_locking_C(obj, lock, current);
 311 }
 312 
 313 
 314 //=============================================================================
 315 // Opto compiler runtime routines
 316 //=============================================================================
 317 
 318 
 319 //=============================allocation======================================
 320 // We failed the fast-path allocation.  Now we need to do a scavenge or GC
 321 // and try allocation again.
 322 
 323 // object allocation
 324 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
 325   JRT_BLOCK;
 326 #ifndef PRODUCT
 327   SharedRuntime::_new_instance_ctr++;         // new instance requires GC
 328 #endif
 329   assert(check_compiled_frame(current), "incorrect caller");
 330 
 331   // These checks are cheap to make and support reflective allocation.
 332   int lh = klass->layout_helper();
 333   if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
 334     Handle holder(current, klass->klass_holder()); // keep the klass alive
 335     klass->check_valid_for_instantiation(false, THREAD);
 336     if (!HAS_PENDING_EXCEPTION) {
 337       InstanceKlass::cast(klass)->initialize(THREAD);
 338     }
 339   }
 340 
 341   if (!HAS_PENDING_EXCEPTION) {
 342     // Scavenge and allocate an instance.
 343     Handle holder(current, klass->klass_holder()); // keep the klass alive
 344     oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
 345     current->set_vm_result(result);
 346 
 347     // Pass oops back through thread local storage.  Our apparent type to Java
 348     // is that we return an oop, but we can block on exit from this routine and
 349     // a GC can trash the oop in C's return register.  The generated stub will
 350     // fetch the oop from TLS after any possible GC.
 351   }
 352 
 353   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 354   JRT_BLOCK_END;
 355 
 356   // inform GC that we won't do card marks for initializing writes.
 357   SharedRuntime::on_slowpath_allocation_exit(current);
 358 JRT_END
 359 
 360 
 361 // array allocation
 362 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
 363   JRT_BLOCK;
 364 #ifndef PRODUCT
 365   SharedRuntime::_new_array_ctr++;            // new array requires GC
 366 #endif
 367   assert(check_compiled_frame(current), "incorrect caller");
 368 
 369   // Scavenge and allocate an instance.
 370   oop result;
 371 
 372   if (array_type->is_typeArray_klass()) {
 373     // The oopFactory likes to work with the element type.
 374     // (We could bypass the oopFactory, since it doesn't add much value.)
 375     BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
 376     result = oopFactory::new_typeArray(elem_type, len, THREAD);
 377   } else {
 378     // Although the oopFactory likes to work with the elem_type,
 379     // the compiler prefers the array_type, since it must already have
 380     // that latter value in hand for the fast path.
 381     Handle holder(current, array_type->klass_holder()); // keep the array klass alive
 382     Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
 383     result = oopFactory::new_objArray(elem_type, len, THREAD);
 384   }
 385 
 386   // Pass oops back through thread local storage.  Our apparent type to Java
 387   // is that we return an oop, but we can block on exit from this routine and
 388   // a GC can trash the oop in C's return register.  The generated stub will
 389   // fetch the oop from TLS after any possible GC.
 390   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 391   current->set_vm_result(result);
 392   JRT_BLOCK_END;
 393 
 394   // inform GC that we won't do card marks for initializing writes.
 395   SharedRuntime::on_slowpath_allocation_exit(current);
 396 JRT_END
 397 
 398 // array allocation without zeroing
 399 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
 400   JRT_BLOCK;
 401 #ifndef PRODUCT
 402   SharedRuntime::_new_array_ctr++;            // new array requires GC
 403 #endif
 404   assert(check_compiled_frame(current), "incorrect caller");
 405 
 406   // Scavenge and allocate an instance.
 407   oop result;
 408 
 409   assert(array_type->is_typeArray_klass(), "should be called only for type array");
 410   // The oopFactory likes to work with the element type.
 411   BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
 412   result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
 413 
 414   // Pass oops back through thread local storage.  Our apparent type to Java
 415   // is that we return an oop, but we can block on exit from this routine and
 416   // a GC can trash the oop in C's return register.  The generated stub will
 417   // fetch the oop from TLS after any possible GC.
 418   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 419   current->set_vm_result(result);

 431     BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
 432     size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
 433     assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
 434     HeapWord* obj = cast_from_oop<HeapWord*>(result);
 435     if (!is_aligned(hs_bytes, BytesPerLong)) {
 436       *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
 437       hs_bytes += BytesPerInt;
 438     }
 439 
 440     // Optimized zeroing.
 441     assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
 442     const size_t aligned_hs = hs_bytes / BytesPerLong;
 443     Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
 444   }
 445 
 446 JRT_END
 447 
 448 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
 449 
 450 // multianewarray for 2 dimensions
 451 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
 452 #ifndef PRODUCT
 453   SharedRuntime::_multi2_ctr++;                // multianewarray for 1 dimension
 454 #endif
 455   assert(check_compiled_frame(current), "incorrect caller");
 456   assert(elem_type->is_klass(), "not a class");
 457   jint dims[2];
 458   dims[0] = len1;
 459   dims[1] = len2;
 460   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 461   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
 462   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 463   current->set_vm_result(obj);
 464 JRT_END
 465 
 466 // multianewarray for 3 dimensions
 467 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
 468 #ifndef PRODUCT
 469   SharedRuntime::_multi3_ctr++;                // multianewarray for 1 dimension
 470 #endif
 471   assert(check_compiled_frame(current), "incorrect caller");
 472   assert(elem_type->is_klass(), "not a class");
 473   jint dims[3];
 474   dims[0] = len1;
 475   dims[1] = len2;
 476   dims[2] = len3;
 477   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 478   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
 479   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 480   current->set_vm_result(obj);
 481 JRT_END
 482 
 483 // multianewarray for 4 dimensions
 484 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
 485 #ifndef PRODUCT
 486   SharedRuntime::_multi4_ctr++;                // multianewarray for 1 dimension
 487 #endif
 488   assert(check_compiled_frame(current), "incorrect caller");
 489   assert(elem_type->is_klass(), "not a class");
 490   jint dims[4];
 491   dims[0] = len1;
 492   dims[1] = len2;
 493   dims[2] = len3;
 494   dims[3] = len4;
 495   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 496   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
 497   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 498   current->set_vm_result(obj);
 499 JRT_END
 500 
 501 // multianewarray for 5 dimensions
 502 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
 503 #ifndef PRODUCT
 504   SharedRuntime::_multi5_ctr++;                // multianewarray for 1 dimension
 505 #endif
 506   assert(check_compiled_frame(current), "incorrect caller");
 507   assert(elem_type->is_klass(), "not a class");
 508   jint dims[5];
 509   dims[0] = len1;
 510   dims[1] = len2;
 511   dims[2] = len3;
 512   dims[3] = len4;
 513   dims[4] = len5;
 514   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 515   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
 516   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 517   current->set_vm_result(obj);
 518 JRT_END
 519 
 520 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
 521   assert(check_compiled_frame(current), "incorrect caller");
 522   assert(elem_type->is_klass(), "not a class");
 523   assert(oop(dims)->is_typeArray(), "not an array");
 524 
 525   ResourceMark rm;
 526   jint len = dims->length();
 527   assert(len > 0, "Dimensions array should contain data");
 528   jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
 529   ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
 530                                        c_dims, len);
 531 
 532   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 533   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
 534   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 535   current->set_vm_result(obj);
 536 JRT_END
 537 
 538 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
 539 
 540   // Very few notify/notifyAll operations find any threads on the waitset, so
 541   // the dominant fast-path is to simply return.
 542   // Relatedly, it's critical that notify/notifyAll be fast in order to
 543   // reduce lock hold times.
 544   if (!SafepointSynchronize::is_synchronizing()) {
 545     if (ObjectSynchronizer::quick_notify(obj, current, false)) {
 546       return;
 547     }
 548   }
 549 
 550   // This is the case the fast-path above isn't provisioned to handle.
 551   // The fast-path is designed to handle frequently arising cases in an efficient manner.
 552   // (The fast-path is just a degenerate variant of the slow-path).
 553   // Perform the dreaded state transition and pass control into the slow-path.
 554   JRT_BLOCK;
 555   Handle h_obj(current, obj);
 556   ObjectSynchronizer::notify(h_obj, CHECK);
 557   JRT_BLOCK_END;
 558 JRT_END
 559 
 560 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
 561 
 562   if (!SafepointSynchronize::is_synchronizing() ) {
 563     if (ObjectSynchronizer::quick_notify(obj, current, true)) {
 564       return;
 565     }
 566   }
 567 
 568   // This is the case the fast-path above isn't provisioned to handle.
 569   // The fast-path is designed to handle frequently arising cases in an efficient manner.
 570   // (The fast-path is just a degenerate variant of the slow-path).
 571   // Perform the dreaded state transition and pass control into the slow-path.
 572   JRT_BLOCK;
 573   Handle h_obj(current, obj);
 574   ObjectSynchronizer::notifyall(h_obj, CHECK);
 575   JRT_BLOCK_END;
 576 JRT_END
 577 
 578 static const TypeFunc* make_new_instance_Type() {
 579   // create input type (domain)
 580   const Type **fields = TypeTuple::fields(1);

1648   assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1649   switch (register_save_policy[reg]) {
1650     case 'C': return false; //SOC
1651     case 'E': return true ; //SOE
1652     case 'N': return false; //NS
1653     case 'A': return false; //AS
1654   }
1655   ShouldNotReachHere();
1656   return false;
1657 }
1658 
1659 //-----------------------------------------------------------------------
1660 // Exceptions
1661 //
1662 
1663 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1664 
1665 // The method is an entry that is always called by a C++ method not
1666 // directly from compiled code. Compiled code will call the C++ method following.
1667 // We can't allow async exception to be installed during  exception processing.
1668 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1669   // The frame we rethrow the exception to might not have been processed by the GC yet.
1670   // The stack watermark barrier takes care of detecting that and ensuring the frame
1671   // has updated oops.
1672   StackWatermarkSet::after_unwind(current);
1673 
1674   // Do not confuse exception_oop with pending_exception. The exception_oop
1675   // is only used to pass arguments into the method. Not for general
1676   // exception handling.  DO NOT CHANGE IT to use pending_exception, since
1677   // the runtime stubs checks this on exit.
1678   assert(current->exception_oop() != nullptr, "exception oop is found");
1679   address handler_address = nullptr;
1680 
1681   Handle exception(current, current->exception_oop());
1682   address pc = current->exception_pc();
1683 
1684   // Clear out the exception oop and pc since looking up an
1685   // exception handler can cause class loading, which might throw an
1686   // exception and those fields are expected to be clear during
1687   // normal bytecode execution.
1688   current->clear_exception_oop_and_pc();

1921   frame caller_frame = stub_frame.sender(&reg_map);
1922   return caller_frame.is_deoptimized_frame();
1923 }
1924 
1925 static const TypeFunc* make_register_finalizer_Type() {
1926   // create input type (domain)
1927   const Type **fields = TypeTuple::fields(1);
1928   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // oop;          Receiver
1929   // // The JavaThread* is passed to each routine as the last argument
1930   // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // JavaThread *; Executing thread
1931   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1932 
1933   // create result type (range)
1934   fields = TypeTuple::fields(0);
1935 
1936   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1937 
1938   return TypeFunc::make(domain,range);
1939 }
1940 














1941 #if INCLUDE_JFR
1942 static const TypeFunc* make_class_id_load_barrier_Type() {
1943   // create input type (domain)
1944   const Type **fields = TypeTuple::fields(1);
1945   fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1946   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1947 
1948   // create result type (range)
1949   fields = TypeTuple::fields(0);
1950 
1951   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1952 
1953   return TypeFunc::make(domain,range);
1954 }
1955 #endif // INCLUDE_JFR
1956 
















1957 //-----------------------------------------------------------------------------
1958 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
1959   // create input type (domain)
1960   const Type **fields = TypeTuple::fields(2);
1961   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1962   fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM;  // Method*;    Method we are entering
1963   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1964 
1965   // create result type (range)
1966   fields = TypeTuple::fields(0);
1967 
1968   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1969 
1970   return TypeFunc::make(domain,range);
1971 }
1972 
1973 static const TypeFunc* make_dtrace_object_alloc_Type() {
1974   // create input type (domain)
1975   const Type **fields = TypeTuple::fields(2);
1976   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1977   fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL;  // oop;    newly allocated object
1978 
1979   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1980 
1981   // create result type (range)
1982   fields = TypeTuple::fields(0);
1983 
1984   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1985 
1986   return TypeFunc::make(domain,range);
1987 }
1988 
1989 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
1990   assert(oopDesc::is_oop(obj), "must be a valid oop");
1991   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1992   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1993 JRT_END
1994 









1995 //-----------------------------------------------------------------------------
1996 
1997 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
1998 
1999 //
2000 // dump the collected NamedCounters.
2001 //
2002 void OptoRuntime::print_named_counters() {
2003   int total_lock_count = 0;
2004   int eliminated_lock_count = 0;
2005 
2006   NamedCounter* c = _named_counters;
2007   while (c) {
2008     if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2009       int count = c->count();
2010       if (count > 0) {
2011         bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2012         if (Verbose) {
2013           tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2014         }

2152 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2153   trace_exception_counter++;
2154   stringStream tempst;
2155 
2156   tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2157   exception_oop->print_value_on(&tempst);
2158   tempst.print(" in ");
2159   CodeBlob* blob = CodeCache::find_blob(exception_pc);
2160   if (blob->is_nmethod()) {
2161     blob->as_nmethod()->method()->print_value_on(&tempst);
2162   } else if (blob->is_runtime_stub()) {
2163     tempst.print("<runtime-stub>");
2164   } else {
2165     tempst.print("<unknown>");
2166   }
2167   tempst.print(" at " INTPTR_FORMAT,  p2i(exception_pc));
2168   tempst.print("]");
2169 
2170   st->print_raw_cr(tempst.freeze());
2171 }



































































  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/vmClasses.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/nmethod.hpp"
  30 #include "code/pcDesc.hpp"
  31 #include "code/scopeDesc.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "compiler/compilationMemoryStatistic.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/g1/g1HeapRegion.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "gc/shared/gcLocker.hpp"
  41 #include "interpreter/bytecode.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/linkResolver.hpp"
  44 #include "logging/log.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/oopFactory.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/objArrayKlass.hpp"
  49 #include "oops/klass.inline.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/typeArrayOop.inline.hpp"
  52 #include "opto/ad.hpp"
  53 #include "opto/addnode.hpp"
  54 #include "opto/callnode.hpp"
  55 #include "opto/cfgnode.hpp"
  56 #include "opto/graphKit.hpp"
  57 #include "opto/machnode.hpp"
  58 #include "opto/matcher.hpp"
  59 #include "opto/memnode.hpp"
  60 #include "opto/mulnode.hpp"
  61 #include "opto/output.hpp"
  62 #include "opto/runtime.hpp"
  63 #include "opto/subnode.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/frame.inline.hpp"
  67 #include "runtime/handles.inline.hpp"
  68 #include "runtime/interfaceSupport.inline.hpp"
  69 #include "runtime/java.hpp"
  70 #include "runtime/javaCalls.hpp"
  71 #include "runtime/perfData.inline.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stackWatermarkSet.hpp"
  75 #include "runtime/synchronizer.hpp"
  76 #include "runtime/threadCritical.hpp"
  77 #include "runtime/threadWXSetters.inline.hpp"
  78 #include "runtime/vframe.hpp"
  79 #include "runtime/vframeArray.hpp"
  80 #include "runtime/vframe_hp.hpp"
  81 #include "services/management.hpp"
  82 #include "utilities/copy.hpp"
  83 #include "utilities/preserveException.hpp"
  84 
  85 
  86 // For debugging purposes:
  87 //  To force FullGCALot inside a runtime function, add the following two lines
  88 //
  89 //  Universe::release_fullgc_alot_dummy();
  90 //  Universe::heap()->collect();
  91 //
  92 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
  93 
  94 
  95 #define C2_BLOB_FIELD_DEFINE(name, type) \
  96   type OptoRuntime:: BLOB_FIELD_NAME(name)  = nullptr;
  97 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
  98 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
  99   address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
 100 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \
 101   address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr;
 102 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE)
 103 #undef C2_BLOB_FIELD_DEFINE
 104 #undef C2_STUB_FIELD_DEFINE
 105 #undef C2_JVMTI_STUB_FIELD_DEFINE
 106 
 107 
 108 #define C2_BLOB_NAME_DEFINE(name, type)  "C2 Runtime " # name "_blob",
 109 #define C2_STUB_NAME_DEFINE(name, f, t, r)  "C2 Runtime " # name,
 110 #define C2_JVMTI_STUB_NAME_DEFINE(name)  "C2 Runtime " # name,
 111 const char* OptoRuntime::_stub_names[] = {
 112   C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE)
 113 };
 114 #undef C2_BLOB_NAME_DEFINE
 115 #undef C2_STUB_NAME_DEFINE
 116 #undef C2_JVMTI_STUB_NAME_DEFINE
 117 
 118 address OptoRuntime::_vtable_must_compile_Java                    = nullptr;
 119 
 120 PerfCounter* _perf_OptoRuntime_class_init_barrier_redundant_count = nullptr;
 121 
 122 // This should be called in an assertion at the start of OptoRuntime routines
 123 // which are entered from compiled code (all of them)
 124 #ifdef ASSERT
 125 static bool check_compiled_frame(JavaThread* thread) {
 126   assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
 127   RegisterMap map(thread,
 128                   RegisterMap::UpdateMap::skip,
 129                   RegisterMap::ProcessFrames::include,
 130                   RegisterMap::WalkContinuation::skip);
 131   frame caller = thread->last_frame().sender(&map);
 132   assert(caller.is_compiled_frame(), "not being called from compiled like code");
 133   return true;
 134 }
 135 #endif // ASSERT
 136 
 137 /*
 138 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
 139   var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
 140   if (var == nullptr) { return false; }
 141 */

 168                   C2_STUB_NAME(name),                                 \
 169                   fancy_jump,                                           \
 170                   pass_tls,                                             \
 171                   pass_retpc);                                          \
 172   if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; }          \
 173 
 174 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name)
 175 
 176 #define GEN_C2_JVMTI_STUB(name)                                       \
 177   STUB_FIELD_NAME(name) =                                               \
 178     generate_stub(env,                                                  \
 179                   notify_jvmti_vthread_Type,                            \
 180                   C2_JVMTI_STUB_C_FUNC(name),                         \
 181                   C2_STUB_NAME(name),                                 \
 182                   0,                                                    \
 183                   true,                                                 \
 184                   false);                                               \
 185   if (STUB_FIELD_NAME(name) == nullptr) { return false; }               \
 186 
 187 bool OptoRuntime::generate(ciEnv* env) {
 188   init_counters();
 189 
 190   C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB)
 191 
 192   return true;
 193 }
 194 
 195 #undef GEN_C2_BLOB
 196 
 197 #undef C2_STUB_FIELD_NAME
 198 #undef C2_STUB_TYPEFUNC
 199 #undef C2_STUB_C_FUNC
 200 #undef C2_STUB_NAME
 201 #undef GEN_C2_STUB
 202 
 203 #undef C2_JVMTI_STUB_C_FUNC
 204 #undef GEN_C2_JVMTI_STUB
 205 // #undef gen
 206 
 207 const TypeFunc* OptoRuntime::_new_instance_Type                   = nullptr;
 208 const TypeFunc* OptoRuntime::_new_array_Type                      = nullptr;

 270 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type             = nullptr;
 271 const TypeFunc* OptoRuntime::_osr_end_Type                        = nullptr;
 272 const TypeFunc* OptoRuntime::_register_finalizer_Type             = nullptr;
 273 #if INCLUDE_JFR
 274 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type          = nullptr;
 275 #endif // INCLUDE_JFR
 276 #if INCLUDE_JVMTI
 277 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type           = nullptr;
 278 #endif // INCLUDE_JVMTI
 279 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type       = nullptr;
 280 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type            = nullptr;
 281 
 282 // Helper method to do generation of RunTimeStub's
 283 address OptoRuntime::generate_stub(ciEnv* env,
 284                                    TypeFunc_generator gen, address C_function,
 285                                    const char *name, int is_fancy_jump,
 286                                    bool pass_tls,
 287                                    bool return_pc) {
 288 
 289   // Matching the default directive, we currently have no method to match.
 290   DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompilerThread::current()->compiler());
 291   CompilationMemoryStatisticMark cmsm(directive);
 292   ResourceMark rm;
 293   Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive);
 294   DirectivesStack::release(directive);
 295   return  C.stub_entry_point();
 296 }
 297 
 298 const char* OptoRuntime::stub_name(address entry) {
 299 #ifndef PRODUCT
 300   CodeBlob* cb = CodeCache::find_blob(entry);
 301   RuntimeStub* rs =(RuntimeStub *)cb;
 302   assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
 303   return rs->name();
 304 #else
 305   // Fast implementation for product mode (maybe it should be inlined too)
 306   return "runtime stub";
 307 #endif
 308 }
 309 
 310 // local methods passed as arguments to stub generator that forward

 314                                    oopDesc* dest, jint dest_pos,
 315                                    jint length, JavaThread* thread) {
 316   SharedRuntime::slow_arraycopy_C(src,  src_pos, dest, dest_pos, length, thread);
 317 }
 318 
 319 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
 320   SharedRuntime::complete_monitor_locking_C(obj, lock, current);
 321 }
 322 
 323 
 324 //=============================================================================
 325 // Opto compiler runtime routines
 326 //=============================================================================
 327 
 328 
 329 //=============================allocation======================================
 330 // We failed the fast-path allocation.  Now we need to do a scavenge or GC
 331 // and try allocation again.
 332 
 333 // object allocation
 334 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_instance_C, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
 335   JRT_BLOCK;
 336 #ifndef PRODUCT
 337   SharedRuntime::_new_instance_ctr++;         // new instance requires GC
 338 #endif
 339   assert(check_compiled_frame(current), "incorrect caller");
 340 
 341   // These checks are cheap to make and support reflective allocation.
 342   int lh = klass->layout_helper();
 343   if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
 344     Handle holder(current, klass->klass_holder()); // keep the klass alive
 345     klass->check_valid_for_instantiation(false, THREAD);
 346     if (!HAS_PENDING_EXCEPTION) {
 347       InstanceKlass::cast(klass)->initialize(THREAD);
 348     }
 349   }
 350 
 351   if (!HAS_PENDING_EXCEPTION) {
 352     // Scavenge and allocate an instance.
 353     Handle holder(current, klass->klass_holder()); // keep the klass alive
 354     oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
 355     current->set_vm_result(result);
 356 
 357     // Pass oops back through thread local storage.  Our apparent type to Java
 358     // is that we return an oop, but we can block on exit from this routine and
 359     // a GC can trash the oop in C's return register.  The generated stub will
 360     // fetch the oop from TLS after any possible GC.
 361   }
 362 
 363   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 364   JRT_BLOCK_END;
 365 
 366   // inform GC that we won't do card marks for initializing writes.
 367   SharedRuntime::on_slowpath_allocation_exit(current);
 368 JRT_END
 369 
 370 
 371 // array allocation
 372 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_C, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
 373   JRT_BLOCK;
 374 #ifndef PRODUCT
 375   SharedRuntime::_new_array_ctr++;            // new array requires GC
 376 #endif
 377   assert(check_compiled_frame(current), "incorrect caller");
 378 
 379   // Scavenge and allocate an instance.
 380   oop result;
 381 
 382   if (array_type->is_typeArray_klass()) {
 383     // The oopFactory likes to work with the element type.
 384     // (We could bypass the oopFactory, since it doesn't add much value.)
 385     BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
 386     result = oopFactory::new_typeArray(elem_type, len, THREAD);
 387   } else {
 388     // Although the oopFactory likes to work with the elem_type,
 389     // the compiler prefers the array_type, since it must already have
 390     // that latter value in hand for the fast path.
 391     Handle holder(current, array_type->klass_holder()); // keep the array klass alive
 392     Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
 393     result = oopFactory::new_objArray(elem_type, len, THREAD);
 394   }
 395 
 396   // Pass oops back through thread local storage.  Our apparent type to Java
 397   // is that we return an oop, but we can block on exit from this routine and
 398   // a GC can trash the oop in C's return register.  The generated stub will
 399   // fetch the oop from TLS after any possible GC.
 400   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 401   current->set_vm_result(result);
 402   JRT_BLOCK_END;
 403 
 404   // inform GC that we won't do card marks for initializing writes.
 405   SharedRuntime::on_slowpath_allocation_exit(current);
 406 JRT_END
 407 
 408 // array allocation without zeroing
 409 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_nozero_C, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
 410   JRT_BLOCK;
 411 #ifndef PRODUCT
 412   SharedRuntime::_new_array_ctr++;            // new array requires GC
 413 #endif
 414   assert(check_compiled_frame(current), "incorrect caller");
 415 
 416   // Scavenge and allocate an instance.
 417   oop result;
 418 
 419   assert(array_type->is_typeArray_klass(), "should be called only for type array");
 420   // The oopFactory likes to work with the element type.
 421   BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
 422   result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
 423 
 424   // Pass oops back through thread local storage.  Our apparent type to Java
 425   // is that we return an oop, but we can block on exit from this routine and
 426   // a GC can trash the oop in C's return register.  The generated stub will
 427   // fetch the oop from TLS after any possible GC.
 428   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 429   current->set_vm_result(result);

 441     BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
 442     size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
 443     assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
 444     HeapWord* obj = cast_from_oop<HeapWord*>(result);
 445     if (!is_aligned(hs_bytes, BytesPerLong)) {
 446       *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
 447       hs_bytes += BytesPerInt;
 448     }
 449 
 450     // Optimized zeroing.
 451     assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
 452     const size_t aligned_hs = hs_bytes / BytesPerLong;
 453     Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
 454   }
 455 
 456 JRT_END
 457 
 458 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
 459 
 460 // multianewarray for 2 dimensions
 461 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray2_C, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
 462 #ifndef PRODUCT
 463   SharedRuntime::_multi2_ctr++;                // multianewarray for 1 dimension
 464 #endif
 465   assert(check_compiled_frame(current), "incorrect caller");
 466   assert(elem_type->is_klass(), "not a class");
 467   jint dims[2];
 468   dims[0] = len1;
 469   dims[1] = len2;
 470   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 471   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
 472   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 473   current->set_vm_result(obj);
 474 JRT_END
 475 
 476 // multianewarray for 3 dimensions
 477 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray3_C, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
 478 #ifndef PRODUCT
 479   SharedRuntime::_multi3_ctr++;                // multianewarray for 1 dimension
 480 #endif
 481   assert(check_compiled_frame(current), "incorrect caller");
 482   assert(elem_type->is_klass(), "not a class");
 483   jint dims[3];
 484   dims[0] = len1;
 485   dims[1] = len2;
 486   dims[2] = len3;
 487   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 488   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
 489   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 490   current->set_vm_result(obj);
 491 JRT_END
 492 
 493 // multianewarray for 4 dimensions
 494 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray4_C, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
 495 #ifndef PRODUCT
 496   SharedRuntime::_multi4_ctr++;                // multianewarray for 1 dimension
 497 #endif
 498   assert(check_compiled_frame(current), "incorrect caller");
 499   assert(elem_type->is_klass(), "not a class");
 500   jint dims[4];
 501   dims[0] = len1;
 502   dims[1] = len2;
 503   dims[2] = len3;
 504   dims[3] = len4;
 505   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 506   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
 507   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 508   current->set_vm_result(obj);
 509 JRT_END
 510 
 511 // multianewarray for 5 dimensions
 512 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
 513 #ifndef PRODUCT
 514   SharedRuntime::_multi5_ctr++;                // multianewarray for 1 dimension
 515 #endif
 516   assert(check_compiled_frame(current), "incorrect caller");
 517   assert(elem_type->is_klass(), "not a class");
 518   jint dims[5];
 519   dims[0] = len1;
 520   dims[1] = len2;
 521   dims[2] = len3;
 522   dims[3] = len4;
 523   dims[4] = len5;
 524   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 525   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
 526   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 527   current->set_vm_result(obj);
 528 JRT_END
 529 
 530 JRT_ENTRY_PROF(void, OptoRuntime, multianewarrayN_C, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
 531   assert(check_compiled_frame(current), "incorrect caller");
 532   assert(elem_type->is_klass(), "not a class");
 533   assert(oop(dims)->is_typeArray(), "not an array");
 534 
 535   ResourceMark rm;
 536   jint len = dims->length();
 537   assert(len > 0, "Dimensions array should contain data");
 538   jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
 539   ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
 540                                        c_dims, len);
 541 
 542   Handle holder(current, elem_type->klass_holder()); // keep the klass alive
 543   oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
 544   deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
 545   current->set_vm_result(obj);
 546 JRT_END
 547 
 548 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notify_C, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
 549 
 550   // Very few notify/notifyAll operations find any threads on the waitset, so
 551   // the dominant fast-path is to simply return.
 552   // Relatedly, it's critical that notify/notifyAll be fast in order to
 553   // reduce lock hold times.
 554   if (!SafepointSynchronize::is_synchronizing()) {
 555     if (ObjectSynchronizer::quick_notify(obj, current, false)) {
 556       return;
 557     }
 558   }
 559 
 560   // This is the case the fast-path above isn't provisioned to handle.
 561   // The fast-path is designed to handle frequently arising cases in an efficient manner.
 562   // (The fast-path is just a degenerate variant of the slow-path).
 563   // Perform the dreaded state transition and pass control into the slow-path.
 564   JRT_BLOCK;
 565   Handle h_obj(current, obj);
 566   ObjectSynchronizer::notify(h_obj, CHECK);
 567   JRT_BLOCK_END;
 568 JRT_END
 569 
 570 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notifyAll_C, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
 571 
 572   if (!SafepointSynchronize::is_synchronizing() ) {
 573     if (ObjectSynchronizer::quick_notify(obj, current, true)) {
 574       return;
 575     }
 576   }
 577 
 578   // This is the case the fast-path above isn't provisioned to handle.
 579   // The fast-path is designed to handle frequently arising cases in an efficient manner.
 580   // (The fast-path is just a degenerate variant of the slow-path).
 581   // Perform the dreaded state transition and pass control into the slow-path.
 582   JRT_BLOCK;
 583   Handle h_obj(current, obj);
 584   ObjectSynchronizer::notifyall(h_obj, CHECK);
 585   JRT_BLOCK_END;
 586 JRT_END
 587 
 588 static const TypeFunc* make_new_instance_Type() {
 589   // create input type (domain)
 590   const Type **fields = TypeTuple::fields(1);

1658   assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1659   switch (register_save_policy[reg]) {
1660     case 'C': return false; //SOC
1661     case 'E': return true ; //SOE
1662     case 'N': return false; //NS
1663     case 'A': return false; //AS
1664   }
1665   ShouldNotReachHere();
1666   return false;
1667 }
1668 
1669 //-----------------------------------------------------------------------
1670 // Exceptions
1671 //
1672 
1673 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1674 
1675 // The method is an entry that is always called by a C++ method not
1676 // directly from compiled code. Compiled code will call the C++ method following.
1677 // We can't allow async exception to be installed during  exception processing.
1678 JRT_ENTRY_NO_ASYNC_PROF(address, OptoRuntime, handle_exception_C_helper, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1679   // The frame we rethrow the exception to might not have been processed by the GC yet.
1680   // The stack watermark barrier takes care of detecting that and ensuring the frame
1681   // has updated oops.
1682   StackWatermarkSet::after_unwind(current);
1683 
1684   // Do not confuse exception_oop with pending_exception. The exception_oop
1685   // is only used to pass arguments into the method. Not for general
1686   // exception handling.  DO NOT CHANGE IT to use pending_exception, since
1687   // the runtime stubs checks this on exit.
1688   assert(current->exception_oop() != nullptr, "exception oop is found");
1689   address handler_address = nullptr;
1690 
1691   Handle exception(current, current->exception_oop());
1692   address pc = current->exception_pc();
1693 
1694   // Clear out the exception oop and pc since looking up an
1695   // exception handler can cause class loading, which might throw an
1696   // exception and those fields are expected to be clear during
1697   // normal bytecode execution.
1698   current->clear_exception_oop_and_pc();

1931   frame caller_frame = stub_frame.sender(&reg_map);
1932   return caller_frame.is_deoptimized_frame();
1933 }
1934 
1935 static const TypeFunc* make_register_finalizer_Type() {
1936   // create input type (domain)
1937   const Type **fields = TypeTuple::fields(1);
1938   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // oop;          Receiver
1939   // // The JavaThread* is passed to each routine as the last argument
1940   // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // JavaThread *; Executing thread
1941   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1942 
1943   // create result type (range)
1944   fields = TypeTuple::fields(0);
1945 
1946   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1947 
1948   return TypeFunc::make(domain,range);
1949 }
1950 
1951 const TypeFunc *OptoRuntime::class_init_barrier_Type() {
1952   // create input type (domain)
1953   const Type** fields = TypeTuple::fields(1);
1954   fields[TypeFunc::Parms+0] = TypeKlassPtr::NOTNULL;
1955   // // The JavaThread* is passed to each routine as the last argument
1956   // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // JavaThread *; Executing thread
1957   const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1958 
1959   // create result type (range)
1960   fields = TypeTuple::fields(0);
1961   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
1962   return TypeFunc::make(domain,range);
1963 }
1964 
1965 #if INCLUDE_JFR
1966 static const TypeFunc* make_class_id_load_barrier_Type() {
1967   // create input type (domain)
1968   const Type **fields = TypeTuple::fields(1);
1969   fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1970   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1971 
1972   // create result type (range)
1973   fields = TypeTuple::fields(0);
1974 
1975   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1976 
1977   return TypeFunc::make(domain,range);
1978 }
1979 #endif // INCLUDE_JFR
1980 
1981 //-----------------------------------------------------------------------------
1982 // runtime upcall support
1983 const TypeFunc *OptoRuntime::runtime_up_call_Type() {
1984   // create input type (domain)
1985   const Type **fields = TypeTuple::fields(1);
1986   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1987   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1988 
1989   // create result type (range)
1990   fields = TypeTuple::fields(0);
1991 
1992   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1993 
1994   return TypeFunc::make(domain,range);
1995 }
1996 
1997 //-----------------------------------------------------------------------------
1998 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
1999   // create input type (domain)
2000   const Type **fields = TypeTuple::fields(2);
2001   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2002   fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM;  // Method*;    Method we are entering
2003   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2004 
2005   // create result type (range)
2006   fields = TypeTuple::fields(0);
2007 
2008   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2009 
2010   return TypeFunc::make(domain,range);
2011 }
2012 
2013 static const TypeFunc* make_dtrace_object_alloc_Type() {
2014   // create input type (domain)
2015   const Type **fields = TypeTuple::fields(2);
2016   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2017   fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL;  // oop;    newly allocated object
2018 
2019   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2020 
2021   // create result type (range)
2022   fields = TypeTuple::fields(0);
2023 
2024   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2025 
2026   return TypeFunc::make(domain,range);
2027 }
2028 
2029 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, register_finalizer_C, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
2030   assert(oopDesc::is_oop(obj), "must be a valid oop");
2031   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
2032   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
2033 JRT_END
2034 
2035 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, class_init_barrier_C, OptoRuntime::class_init_barrier_C(Klass* k, JavaThread* current))
2036   InstanceKlass* ik = InstanceKlass::cast(k);
2037   if (ik->should_be_initialized()) {
2038     ik->initialize(CHECK);
2039   } else if (UsePerfData) {
2040     _perf_OptoRuntime_class_init_barrier_redundant_count->inc();
2041   }
2042 JRT_END
2043 
2044 //-----------------------------------------------------------------------------
2045 
2046 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
2047 
2048 //
2049 // dump the collected NamedCounters.
2050 //
2051 void OptoRuntime::print_named_counters() {
2052   int total_lock_count = 0;
2053   int eliminated_lock_count = 0;
2054 
2055   NamedCounter* c = _named_counters;
2056   while (c) {
2057     if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2058       int count = c->count();
2059       if (count > 0) {
2060         bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2061         if (Verbose) {
2062           tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2063         }

2201 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2202   trace_exception_counter++;
2203   stringStream tempst;
2204 
2205   tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2206   exception_oop->print_value_on(&tempst);
2207   tempst.print(" in ");
2208   CodeBlob* blob = CodeCache::find_blob(exception_pc);
2209   if (blob->is_nmethod()) {
2210     blob->as_nmethod()->method()->print_value_on(&tempst);
2211   } else if (blob->is_runtime_stub()) {
2212     tempst.print("<runtime-stub>");
2213   } else {
2214     tempst.print("<unknown>");
2215   }
2216   tempst.print(" at " INTPTR_FORMAT,  p2i(exception_pc));
2217   tempst.print("]");
2218 
2219   st->print_raw_cr(tempst.freeze());
2220 }
2221 
2222 #define DO_COUNTERS2(macro2, macro1) \
2223   macro2(OptoRuntime, new_instance_C) \
2224   macro2(OptoRuntime, new_array_C) \
2225   macro2(OptoRuntime, new_array_nozero_C) \
2226   macro2(OptoRuntime, multianewarray2_C) \
2227   macro2(OptoRuntime, multianewarray3_C) \
2228   macro2(OptoRuntime, multianewarray4_C) \
2229   macro2(OptoRuntime, multianewarrayN_C) \
2230   macro2(OptoRuntime, monitor_notify_C) \
2231   macro2(OptoRuntime, monitor_notifyAll_C) \
2232   macro2(OptoRuntime, handle_exception_C_helper) \
2233   macro2(OptoRuntime, register_finalizer_C) \
2234   macro2(OptoRuntime, class_init_barrier_C) \
2235   macro1(OptoRuntime, class_init_barrier_redundant)
2236 
2237 #define INIT_COUNTER_TIME_AND_CNT(sub, name) \
2238   NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \
2239   NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
2240 
2241 #define INIT_COUNTER_CNT(sub, name) \
2242   NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
2243 
2244 void OptoRuntime::init_counters() {
2245   assert(CompilerConfig::is_c2_enabled(), "");
2246 
2247   if (UsePerfData) {
2248     EXCEPTION_MARK;
2249 
2250     DO_COUNTERS2(INIT_COUNTER_TIME_AND_CNT, INIT_COUNTER_CNT)
2251 
2252     if (HAS_PENDING_EXCEPTION) {
2253       vm_exit_during_initialization("jvm_perf_init failed unexpectedly");
2254     }
2255   }
2256 }
2257 #undef INIT_COUNTER_TIME_AND_CNT
2258 #undef INIT_COUNTER_CNT
2259 
2260 #define PRINT_COUNTER_TIME_AND_CNT(sub, name) { \
2261   jlong count = _perf_##sub##_##name##_count->get_value(); \
2262   if (count > 0) { \
2263     st->print_cr("  %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \
2264                  _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \
2265                  _perf_##sub##_##name##_timer->thread_counter_value_us(), \
2266                  count); \
2267   }}
2268 
2269 #define PRINT_COUNTER_CNT(sub, name) { \
2270   jlong count = _perf_##sub##_##name##_count->get_value(); \
2271   if (count > 0) { \
2272     st->print_cr("  %-30s = " JLONG_FORMAT_W(5) " events", #name, count); \
2273   }}
2274 
2275 void OptoRuntime::print_counters_on(outputStream* st) {
2276   if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c2_enabled()) {
2277     DO_COUNTERS2(PRINT_COUNTER_TIME_AND_CNT, PRINT_COUNTER_CNT)
2278   } else {
2279     st->print_cr("  OptoRuntime: no info (%s is disabled)",
2280                  (!CompilerConfig::is_c2_enabled() ? "C2" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")));
2281   }
2282 }
2283 
2284 #undef PRINT_COUNTER_TIME_AND_CNT
2285 #undef PRINT_COUNTER_CNT
2286 #undef DO_COUNTERS2
< prev index next >