< prev index next >

src/hotspot/share/c1/c1_Runtime1.cpp

Print this page

  36 #include "code/codeBlob.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "code/pcDesc.hpp"
  39 #include "code/scopeDesc.hpp"
  40 #include "code/vtableStubs.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/c1/barrierSetC1.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.hpp"
  48 #include "interpreter/interpreter.hpp"
  49 #include "jfr/support/jfrIntrinsics.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/oopFactory.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/access.inline.hpp"


  56 #include "oops/klass.inline.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/objArrayKlass.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/javaCalls.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stackWatermarkSet.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/threadCritical.hpp"
  71 #include "runtime/vframe.inline.hpp"
  72 #include "runtime/vframeArray.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "utilities/copy.hpp"
  75 #include "utilities/events.hpp"

 108 }
 109 
 110 // Implementation of Runtime1
 111 
 112 CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS];
 113 
 114 #define C1_BLOB_NAME_DEFINE(name)  "C1 Runtime " # name "_blob",
 115 const char *Runtime1::_blob_names[] = {
 116   C1_STUBS_DO(C1_BLOB_NAME_DEFINE)
 117 };
 118 #undef C1_STUB_NAME_DEFINE
 119 
 120 #ifndef PRODUCT
 121 // statistics
 122 uint Runtime1::_generic_arraycopystub_cnt = 0;
 123 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 124 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 125 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 126 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 127 uint Runtime1::_new_object_array_slowcase_cnt = 0;

 128 uint Runtime1::_new_instance_slowcase_cnt = 0;
 129 uint Runtime1::_new_multi_array_slowcase_cnt = 0;





 130 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 131 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 132 uint Runtime1::_patch_code_slowcase_cnt = 0;
 133 uint Runtime1::_throw_range_check_exception_count = 0;
 134 uint Runtime1::_throw_index_exception_count = 0;
 135 uint Runtime1::_throw_div0_exception_count = 0;
 136 uint Runtime1::_throw_null_pointer_exception_count = 0;
 137 uint Runtime1::_throw_class_cast_exception_count = 0;
 138 uint Runtime1::_throw_incompatible_class_change_error_count = 0;


 139 uint Runtime1::_throw_count = 0;
 140 
 141 static uint _byte_arraycopy_stub_cnt = 0;
 142 static uint _short_arraycopy_stub_cnt = 0;
 143 static uint _int_arraycopy_stub_cnt = 0;
 144 static uint _long_arraycopy_stub_cnt = 0;
 145 static uint _oop_arraycopy_stub_cnt = 0;
 146 
 147 address Runtime1::arraycopy_count_address(BasicType type) {
 148   switch (type) {
 149   case T_BOOLEAN:
 150   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 151   case T_CHAR:
 152   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 153   case T_FLOAT:
 154   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 155   case T_DOUBLE:
 156   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 157   case T_ARRAY:
 158   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;

 338   FUNCTION_CASE(entry, JfrTime::time_function());
 339 #endif
 340   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 341   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 342   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 343   FUNCTION_CASE(entry, StubRoutines::dexp());
 344   FUNCTION_CASE(entry, StubRoutines::dlog());
 345   FUNCTION_CASE(entry, StubRoutines::dlog10());
 346   FUNCTION_CASE(entry, StubRoutines::dpow());
 347   FUNCTION_CASE(entry, StubRoutines::dsin());
 348   FUNCTION_CASE(entry, StubRoutines::dcos());
 349   FUNCTION_CASE(entry, StubRoutines::dtan());
 350   FUNCTION_CASE(entry, StubRoutines::dtanh());
 351 
 352 #undef FUNCTION_CASE
 353 
 354   // Soft float adds more runtime names.
 355   return pd_name_for_address(entry);
 356 }
 357 
 358 
 359 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 360 #ifndef PRODUCT
 361   if (PrintC1Statistics) {
 362     _new_instance_slowcase_cnt++;
 363   }
 364 #endif
 365   assert(klass->is_klass(), "not a class");
 366   Handle holder(current, klass->klass_holder()); // keep the klass alive
 367   InstanceKlass* h = InstanceKlass::cast(klass);
 368   h->check_valid_for_instantiation(true, CHECK);
 369   // make sure klass is initialized
 370   h->initialize(CHECK);
 371   // allocate instance and return via TLS
 372   oop obj = h->allocate_instance(CHECK);






 373   current->set_vm_result(obj);
 374 JRT_END
 375 



 376 
 377 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 378 #ifndef PRODUCT
 379   if (PrintC1Statistics) {
 380     _new_type_array_slowcase_cnt++;
 381   }
 382 #endif
 383   // Note: no handle for klass needed since they are not used
 384   //       anymore after new_typeArray() and no GC can happen before.
 385   //       (This may have to change if this code changes!)
 386   assert(klass->is_klass(), "not a class");
 387   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 388   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 389   current->set_vm_result(obj);
 390   // This is pretty rare but this runtime patch is stressful to deoptimization
 391   // if we deoptimize here so force a deopt to stress the path.
 392   if (DeoptimizeALot) {
 393     deopt_caller(current);
 394   }
 395 
 396 JRT_END
 397 
 398 
 399 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 400 #ifndef PRODUCT
 401   if (PrintC1Statistics) {
 402     _new_object_array_slowcase_cnt++;
 403   }
 404 #endif
 405   // Note: no handle for klass needed since they are not used
 406   //       anymore after new_objArray() and no GC can happen before.
 407   //       (This may have to change if this code changes!)
 408   assert(array_klass->is_klass(), "not a class");
 409   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 410   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 411   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 412   current->set_vm_result(obj);
 413   // This is pretty rare but this runtime patch is stressful to deoptimization
 414   // if we deoptimize here so force a deopt to stress the path.
 415   if (DeoptimizeALot) {
 416     deopt_caller(current);
 417   }
 418 JRT_END
 419 
 420 






















 421 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 422 #ifndef PRODUCT
 423   if (PrintC1Statistics) {
 424     _new_multi_array_slowcase_cnt++;
 425   }
 426 #endif
 427   assert(klass->is_klass(), "not a class");
 428   assert(rank >= 1, "rank must be nonzero");
 429   Handle holder(current, klass->klass_holder()); // keep the klass alive
 430   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 431   current->set_vm_result(obj);
 432 JRT_END
 433 
 434 




























































































 435 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id))
 436   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 437 JRT_END
 438 
 439 
 440 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 441   ResourceMark rm(current);
 442   const char* klass_name = obj->klass()->external_name();
 443   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 444 JRT_END
 445 
 446 
 447 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 448 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 449 // method) method is passed as an argument. In order to do that it is embedded in the code as
 450 // a constant.
 451 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 452   nmethod* osr_nm = nullptr;
 453   methodHandle method(current, m);
 454 

 738     _throw_class_cast_exception_count++;
 739   }
 740 #endif
 741   ResourceMark rm(current);
 742   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 743   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 744 JRT_END
 745 
 746 
 747 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 748 #ifndef PRODUCT
 749   if (PrintC1Statistics) {
 750     _throw_incompatible_class_change_error_count++;
 751   }
 752 #endif
 753   ResourceMark rm(current);
 754   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 755 JRT_END
 756 
 757 













 758 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 759 #ifndef PRODUCT
 760   if (PrintC1Statistics) {
 761     _monitorenter_slowcase_cnt++;
 762   }
 763 #endif
 764   if (LockingMode == LM_MONITOR) {
 765     lock->set_obj(obj);
 766   }
 767   assert(obj == lock->obj(), "must match");
 768   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 769 JRT_END
 770 
 771 
 772 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 773   assert(current == JavaThread::current(), "pre-condition");
 774 #ifndef PRODUCT
 775   if (PrintC1Statistics) {
 776     _monitorexit_slowcase_cnt++;
 777   }

 943                       RegisterMap::WalkContinuation::skip);
 944   frame runtime_frame = current->last_frame();
 945   frame caller_frame = runtime_frame.sender(&reg_map);
 946 
 947   // last java frame on stack
 948   vframeStream vfst(current, true);
 949   assert(!vfst.at_end(), "Java frame must exist");
 950 
 951   methodHandle caller_method(current, vfst.method());
 952   // Note that caller_method->code() may not be same as caller_code because of OSR's
 953   // Note also that in the presence of inlining it is not guaranteed
 954   // that caller_method() == caller_code->method()
 955 
 956   int bci = vfst.bci();
 957   Bytecodes::Code code = caller_method()->java_code_at(bci);
 958 
 959   // this is used by assertions in the access_field_patching_id
 960   BasicType patch_field_type = T_ILLEGAL;
 961   bool deoptimize_for_volatile = false;
 962   bool deoptimize_for_atomic = false;


 963   int patch_field_offset = -1;
 964   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
 965   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
 966   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
 967   Handle appendix(current, nullptr); // oop needed by appendix_patching code
 968   bool load_klass_or_mirror_patch_id =
 969     (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id);
 970 
 971   if (stub_id == C1StubId::access_field_patching_id) {
 972 
 973     Bytecode_field field_access(caller_method, bci);
 974     fieldDescriptor result; // initialize class if needed
 975     Bytecodes::Code code = field_access.code();
 976     constantPoolHandle constants(current, caller_method->constants());
 977     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
 978     patch_field_offset = result.offset();
 979 
 980     // If we're patching a field which is volatile then at compile it
 981     // must not have been know to be volatile, so the generated code
 982     // isn't correct for a volatile reference.  The nmethod has to be

 986     // used for patching references to oops which don't need special
 987     // handling in the volatile case.
 988 
 989     deoptimize_for_volatile = result.access_flags().is_volatile();
 990 
 991     // If we are patching a field which should be atomic, then
 992     // the generated code is not correct either, force deoptimizing.
 993     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 994     // break access atomicity only for them.
 995 
 996     // Strictly speaking, the deoptimization on 64-bit platforms
 997     // is unnecessary, and T_LONG stores on 32-bit platforms need
 998     // to be handled by special patching code when AlwaysAtomicAccesses
 999     // becomes product feature. At this point, we are still going
1000     // for the deoptimization for consistency against volatile
1001     // accesses.
1002 
1003     patch_field_type = result.field_type();
1004     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1005 










1006   } else if (load_klass_or_mirror_patch_id) {
1007     Klass* k = nullptr;
1008     switch (code) {
1009       case Bytecodes::_putstatic:
1010       case Bytecodes::_getstatic:
1011         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1012           init_klass = klass;
1013           mirror = Handle(current, klass->java_mirror());
1014         }
1015         break;
1016       case Bytecodes::_new:
1017         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1018           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1019         }
1020         break;
1021       case Bytecodes::_multianewarray:
1022         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1023           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1024         }
1025         break;

1058     CallInfo info;
1059     constantPoolHandle pool(current, caller_method->constants());
1060     int index = bytecode.index();
1061     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1062     switch (bc) {
1063       case Bytecodes::_invokehandle: {
1064         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1065         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1066         break;
1067       }
1068       case Bytecodes::_invokedynamic: {
1069         appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
1070         break;
1071       }
1072       default: fatal("unexpected bytecode for load_appendix_patching_id");
1073     }
1074   } else {
1075     ShouldNotReachHere();
1076   }
1077 
1078   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1079     // At compile time we assumed the field wasn't volatile/atomic but after
1080     // loading it turns out it was volatile/atomic so we have to throw the
1081     // compiled code out and let it be regenerated.
1082     if (TracePatching) {
1083       if (deoptimize_for_volatile) {
1084         tty->print_cr("Deoptimizing for patching volatile field reference");
1085       }
1086       if (deoptimize_for_atomic) {
1087         tty->print_cr("Deoptimizing for patching atomic field reference");
1088       }






1089     }
1090 
1091     // It's possible the nmethod was invalidated in the last
1092     // safepoint, but if it's still alive then make it not_entrant.
1093     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1094     if (nm != nullptr) {
1095       nm->make_not_entrant();
1096     }
1097 
1098     Deoptimization::deoptimize_frame(current, caller_frame.id());
1099 
1100     // Return to the now deoptimized frame.
1101   }
1102 
1103   // Now copy code back
1104 
1105   {
1106     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1107     //
1108     // Deoptimization may have happened while we waited for the lock.

1519 #ifndef PRODUCT
1520 void Runtime1::print_statistics() {
1521   tty->print_cr("C1 Runtime statistics:");
1522   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1523   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1524   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1525   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1526   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1527   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1528   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1529   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1530   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1531   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1532   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1533   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1534   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1535   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1536 
1537   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1538   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);

1539   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1540   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);






1541   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1542   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1543   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1544 
1545   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1546   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1547   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1548   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1549   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1550   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);


1551   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1552 
1553   SharedRuntime::print_ic_miss_histogram();
1554   tty->cr();
1555 }
1556 #endif // PRODUCT

  36 #include "code/codeBlob.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "code/pcDesc.hpp"
  39 #include "code/scopeDesc.hpp"
  40 #include "code/vtableStubs.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/c1/barrierSetC1.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.hpp"
  48 #include "interpreter/interpreter.hpp"
  49 #include "jfr/support/jfrIntrinsics.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/oopFactory.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/access.inline.hpp"
  56 #include "oops/flatArrayKlass.hpp"
  57 #include "oops/flatArrayOop.inline.hpp"
  58 #include "oops/klass.inline.hpp"
  59 #include "oops/objArrayOop.inline.hpp"
  60 #include "oops/objArrayKlass.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/fieldDescriptor.inline.hpp"
  65 #include "runtime/frame.inline.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"
  68 #include "runtime/javaCalls.hpp"
  69 #include "runtime/sharedRuntime.hpp"
  70 #include "runtime/stackWatermarkSet.hpp"
  71 #include "runtime/stubRoutines.hpp"
  72 #include "runtime/threadCritical.hpp"
  73 #include "runtime/vframe.inline.hpp"
  74 #include "runtime/vframeArray.hpp"
  75 #include "runtime/vm_version.hpp"
  76 #include "utilities/copy.hpp"
  77 #include "utilities/events.hpp"

 110 }
 111 
 112 // Implementation of Runtime1
 113 
 114 CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS];
 115 
 116 #define C1_BLOB_NAME_DEFINE(name)  "C1 Runtime " # name "_blob",
 117 const char *Runtime1::_blob_names[] = {
 118   C1_STUBS_DO(C1_BLOB_NAME_DEFINE)
 119 };
 120 #undef C1_STUB_NAME_DEFINE
 121 
 122 #ifndef PRODUCT
 123 // statistics
 124 uint Runtime1::_generic_arraycopystub_cnt = 0;
 125 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 126 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 127 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 128 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 129 uint Runtime1::_new_object_array_slowcase_cnt = 0;
 130 uint Runtime1::_new_null_free_array_slowcase_cnt = 0;
 131 uint Runtime1::_new_instance_slowcase_cnt = 0;
 132 uint Runtime1::_new_multi_array_slowcase_cnt = 0;
 133 uint Runtime1::_load_flat_array_slowcase_cnt = 0;
 134 uint Runtime1::_store_flat_array_slowcase_cnt = 0;
 135 uint Runtime1::_substitutability_check_slowcase_cnt = 0;
 136 uint Runtime1::_buffer_inline_args_slowcase_cnt = 0;
 137 uint Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
 138 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 139 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 140 uint Runtime1::_patch_code_slowcase_cnt = 0;
 141 uint Runtime1::_throw_range_check_exception_count = 0;
 142 uint Runtime1::_throw_index_exception_count = 0;
 143 uint Runtime1::_throw_div0_exception_count = 0;
 144 uint Runtime1::_throw_null_pointer_exception_count = 0;
 145 uint Runtime1::_throw_class_cast_exception_count = 0;
 146 uint Runtime1::_throw_incompatible_class_change_error_count = 0;
 147 uint Runtime1::_throw_illegal_monitor_state_exception_count = 0;
 148 uint Runtime1::_throw_identity_exception_count = 0;
 149 uint Runtime1::_throw_count = 0;
 150 
 151 static uint _byte_arraycopy_stub_cnt = 0;
 152 static uint _short_arraycopy_stub_cnt = 0;
 153 static uint _int_arraycopy_stub_cnt = 0;
 154 static uint _long_arraycopy_stub_cnt = 0;
 155 static uint _oop_arraycopy_stub_cnt = 0;
 156 
 157 address Runtime1::arraycopy_count_address(BasicType type) {
 158   switch (type) {
 159   case T_BOOLEAN:
 160   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 161   case T_CHAR:
 162   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 163   case T_FLOAT:
 164   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 165   case T_DOUBLE:
 166   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 167   case T_ARRAY:
 168   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;

 348   FUNCTION_CASE(entry, JfrTime::time_function());
 349 #endif
 350   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 351   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 352   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 353   FUNCTION_CASE(entry, StubRoutines::dexp());
 354   FUNCTION_CASE(entry, StubRoutines::dlog());
 355   FUNCTION_CASE(entry, StubRoutines::dlog10());
 356   FUNCTION_CASE(entry, StubRoutines::dpow());
 357   FUNCTION_CASE(entry, StubRoutines::dsin());
 358   FUNCTION_CASE(entry, StubRoutines::dcos());
 359   FUNCTION_CASE(entry, StubRoutines::dtan());
 360   FUNCTION_CASE(entry, StubRoutines::dtanh());
 361 
 362 #undef FUNCTION_CASE
 363 
 364   // Soft float adds more runtime names.
 365   return pd_name_for_address(entry);
 366 }
 367 
 368 static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {

 369 #ifndef PRODUCT
 370   if (PrintC1Statistics) {
 371     Runtime1::_new_instance_slowcase_cnt++;
 372   }
 373 #endif
 374   assert(klass->is_klass(), "not a class");
 375   Handle holder(current, klass->klass_holder()); // keep the klass alive
 376   InstanceKlass* h = InstanceKlass::cast(klass);
 377   h->check_valid_for_instantiation(true, CHECK);
 378   // make sure klass is initialized
 379   h->initialize(CHECK);
 380   oop obj = nullptr;
 381   if (h->is_inline_klass() &&  InlineKlass::cast(h)->is_empty_inline_type()) {
 382     obj = InlineKlass::cast(h)->default_value();
 383     assert(obj != nullptr, "default value must exist");
 384   } else {
 385     // allocate instance and return via TLS
 386     obj = h->allocate_instance(CHECK);
 387   }
 388   current->set_vm_result(obj);
 389 JRT_END
 390 
 391 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 392   allocate_instance(current, klass, CHECK);
 393 JRT_END
 394 
 395 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 396 #ifndef PRODUCT
 397   if (PrintC1Statistics) {
 398     _new_type_array_slowcase_cnt++;
 399   }
 400 #endif
 401   // Note: no handle for klass needed since they are not used
 402   //       anymore after new_typeArray() and no GC can happen before.
 403   //       (This may have to change if this code changes!)
 404   assert(klass->is_klass(), "not a class");
 405   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 406   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 407   current->set_vm_result(obj);
 408   // This is pretty rare but this runtime patch is stressful to deoptimization
 409   // if we deoptimize here so force a deopt to stress the path.
 410   if (DeoptimizeALot) {
 411     deopt_caller(current);
 412   }
 413 
 414 JRT_END
 415 
 416 
 417 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 418 #ifndef PRODUCT
 419   if (PrintC1Statistics) {
 420     _new_object_array_slowcase_cnt++;
 421   }
 422 #endif
 423   // Note: no handle for klass needed since they are not used
 424   //       anymore after new_objArray() and no GC can happen before.
 425   //       (This may have to change if this code changes!)
 426   assert(array_klass->is_klass(), "not a class");
 427   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 428   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 429   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 430   current->set_vm_result(obj);
 431   // This is pretty rare but this runtime patch is stressful to deoptimization
 432   // if we deoptimize here so force a deopt to stress the path.
 433   if (DeoptimizeALot) {
 434     deopt_caller(current);
 435   }
 436 JRT_END
 437 
 438 
 439 JRT_ENTRY(void, Runtime1::new_null_free_array(JavaThread* current, Klass* array_klass, jint length))
 440   NOT_PRODUCT(_new_null_free_array_slowcase_cnt++;)
 441 
 442   // Note: no handle for klass needed since they are not used
 443   //       anymore after new_objArray() and no GC can happen before.
 444   //       (This may have to change if this code changes!)
 445   assert(array_klass->is_klass(), "not a class");
 446   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 447   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 448   assert(elem_klass->is_inline_klass(), "must be");
 449   // Logically creates elements, ensure klass init
 450   elem_klass->initialize(CHECK);
 451   arrayOop obj = oopFactory::new_valueArray(elem_klass, length, CHECK);
 452   current->set_vm_result(obj);
 453   // This is pretty rare but this runtime patch is stressful to deoptimization
 454   // if we deoptimize here so force a deopt to stress the path.
 455   if (DeoptimizeALot) {
 456     deopt_caller(current);
 457   }
 458 JRT_END
 459 
 460 
 461 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 462 #ifndef PRODUCT
 463   if (PrintC1Statistics) {
 464     _new_multi_array_slowcase_cnt++;
 465   }
 466 #endif
 467   assert(klass->is_klass(), "not a class");
 468   assert(rank >= 1, "rank must be nonzero");
 469   Handle holder(current, klass->klass_holder()); // keep the klass alive
 470   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 471   current->set_vm_result(obj);
 472 JRT_END
 473 
 474 
 475 static void profile_flat_array(JavaThread* current, bool load) {
 476   ResourceMark rm(current);
 477   vframeStream vfst(current, true);
 478   assert(!vfst.at_end(), "Java frame must exist");
 479   // Check if array access profiling is enabled
 480   if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
 481     return;
 482   }
 483   int bci = vfst.bci();
 484   Method* method = vfst.method();
 485   MethodData* md = method->method_data();
 486   if (md != nullptr) {
 487     // Lock to access ProfileData, and ensure lock is not broken by a safepoint
 488     MutexLocker ml(md->extra_data_lock(), Mutex::_no_safepoint_check_flag);
 489 
 490     ProfileData* data = md->bci_to_data(bci);
 491     assert(data != nullptr, "incorrect profiling entry");
 492     if (data->is_ArrayLoadData()) {
 493       assert(load, "should be an array load");
 494       ArrayLoadData* load_data = (ArrayLoadData*) data;
 495       load_data->set_flat_array();
 496     } else {
 497       assert(data->is_ArrayStoreData(), "");
 498       assert(!load, "should be an array store");
 499       ArrayStoreData* store_data = (ArrayStoreData*) data;
 500       store_data->set_flat_array();
 501     }
 502   }
 503 }
 504 
 505 JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* array, int index))
 506   assert(array->klass()->is_flatArray_klass(), "should not be called");
 507   profile_flat_array(current, true);
 508 
 509   NOT_PRODUCT(_load_flat_array_slowcase_cnt++;)
 510   assert(array->length() > 0 && index < array->length(), "already checked");
 511   flatArrayHandle vah(current, array);
 512   oop obj = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK);
 513   current->set_vm_result(obj);
 514 JRT_END
 515 
 516 
 517 JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, flatArrayOopDesc* array, int index, oopDesc* value))
 518   if (array->klass()->is_flatArray_klass()) {
 519     profile_flat_array(current, false);
 520   }
 521 
 522   NOT_PRODUCT(_store_flat_array_slowcase_cnt++;)
 523   if (value == nullptr) {
 524     assert(array->klass()->is_flatArray_klass() || array->klass()->is_null_free_array_klass(), "should not be called");
 525     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 526   } else {
 527     assert(array->klass()->is_flatArray_klass(), "should not be called");
 528     array->value_copy_to_index(value, index, LayoutKind::PAYLOAD); // Non atomic is currently the only layout supported by flat arrays
 529   }
 530 JRT_END
 531 
 532 
 533 JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
 534   NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
 535   JavaCallArguments args;
 536   args.push_oop(Handle(THREAD, left));
 537   args.push_oop(Handle(THREAD, right));
 538   JavaValue result(T_BOOLEAN);
 539   JavaCalls::call_static(&result,
 540                          vmClasses::ValueObjectMethods_klass(),
 541                          vmSymbols::isSubstitutable_name(),
 542                          vmSymbols::object_object_boolean_signature(),
 543                          &args, CHECK_0);
 544   return result.get_jboolean() ? 1 : 0;
 545 JRT_END
 546 
 547 
 548 extern "C" void ps();
 549 
 550 void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
 551   JavaThread* THREAD = current;
 552   methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
 553   oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
 554   current->set_vm_result(obj);
 555 }
 556 
 557 JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
 558   NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
 559   buffer_inline_args_impl(current, method, true);
 560 JRT_END
 561 
 562 JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
 563   NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
 564   buffer_inline_args_impl(current, method, false);
 565 JRT_END
 566 
 567 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id))
 568   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 569 JRT_END
 570 
 571 
 572 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 573   ResourceMark rm(current);
 574   const char* klass_name = obj->klass()->external_name();
 575   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 576 JRT_END
 577 
 578 
 579 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 580 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 581 // method) method is passed as an argument. In order to do that it is embedded in the code as
 582 // a constant.
 583 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 584   nmethod* osr_nm = nullptr;
 585   methodHandle method(current, m);
 586 

 870     _throw_class_cast_exception_count++;
 871   }
 872 #endif
 873   ResourceMark rm(current);
 874   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 875   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 876 JRT_END
 877 
 878 
 879 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 880 #ifndef PRODUCT
 881   if (PrintC1Statistics) {
 882     _throw_incompatible_class_change_error_count++;
 883   }
 884 #endif
 885   ResourceMark rm(current);
 886   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 887 JRT_END
 888 
 889 
 890 JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
 891   NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
 892   ResourceMark rm(current);
 893   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
 894 JRT_END
 895 
 896 JRT_ENTRY(void, Runtime1::throw_identity_exception(JavaThread* current, oopDesc* object))
 897   NOT_PRODUCT(_throw_identity_exception_count++;)
 898   ResourceMark rm(current);
 899   char* message = SharedRuntime::generate_identity_exception_message(current, object->klass());
 900   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IdentityException(), message);
 901 JRT_END
 902 
 903 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 904 #ifndef PRODUCT
 905   if (PrintC1Statistics) {
 906     _monitorenter_slowcase_cnt++;
 907   }
 908 #endif
 909   if (LockingMode == LM_MONITOR) {
 910     lock->set_obj(obj);
 911   }
 912   assert(obj == lock->obj(), "must match");
 913   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 914 JRT_END
 915 
 916 
 917 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 918   assert(current == JavaThread::current(), "pre-condition");
 919 #ifndef PRODUCT
 920   if (PrintC1Statistics) {
 921     _monitorexit_slowcase_cnt++;
 922   }

1088                       RegisterMap::WalkContinuation::skip);
1089   frame runtime_frame = current->last_frame();
1090   frame caller_frame = runtime_frame.sender(&reg_map);
1091 
1092   // last java frame on stack
1093   vframeStream vfst(current, true);
1094   assert(!vfst.at_end(), "Java frame must exist");
1095 
1096   methodHandle caller_method(current, vfst.method());
1097   // Note that caller_method->code() may not be same as caller_code because of OSR's
1098   // Note also that in the presence of inlining it is not guaranteed
1099   // that caller_method() == caller_code->method()
1100 
1101   int bci = vfst.bci();
1102   Bytecodes::Code code = caller_method()->java_code_at(bci);
1103 
1104   // this is used by assertions in the access_field_patching_id
1105   BasicType patch_field_type = T_ILLEGAL;
1106   bool deoptimize_for_volatile = false;
1107   bool deoptimize_for_atomic = false;
1108   bool deoptimize_for_null_free = false;
1109   bool deoptimize_for_flat = false;
1110   int patch_field_offset = -1;
1111   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
1112   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
1113   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
1114   Handle appendix(current, nullptr); // oop needed by appendix_patching code
1115   bool load_klass_or_mirror_patch_id =
1116     (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id);
1117 
1118   if (stub_id == C1StubId::access_field_patching_id) {
1119 
1120     Bytecode_field field_access(caller_method, bci);
1121     fieldDescriptor result; // initialize class if needed
1122     Bytecodes::Code code = field_access.code();
1123     constantPoolHandle constants(current, caller_method->constants());
1124     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
1125     patch_field_offset = result.offset();
1126 
1127     // If we're patching a field which is volatile then at compile it
1128     // must not have been know to be volatile, so the generated code
1129     // isn't correct for a volatile reference.  The nmethod has to be

1133     // used for patching references to oops which don't need special
1134     // handling in the volatile case.
1135 
1136     deoptimize_for_volatile = result.access_flags().is_volatile();
1137 
1138     // If we are patching a field which should be atomic, then
1139     // the generated code is not correct either, force deoptimizing.
1140     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1141     // break access atomicity only for them.
1142 
1143     // Strictly speaking, the deoptimization on 64-bit platforms
1144     // is unnecessary, and T_LONG stores on 32-bit platforms need
1145     // to be handled by special patching code when AlwaysAtomicAccesses
1146     // becomes product feature. At this point, we are still going
1147     // for the deoptimization for consistency against volatile
1148     // accesses.
1149 
1150     patch_field_type = result.field_type();
1151     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1152 
1153     // The field we are patching is null-free. Deoptimize and regenerate
1154     // the compiled code if we patch a putfield/putstatic because it
1155     // does not contain the required null check.
1156     deoptimize_for_null_free = result.is_null_free_inline_type() && (field_access.is_putfield() || field_access.is_putstatic());
1157 
1158     // The field we are patching is flat. Deoptimize and regenerate
1159     // the compiled code which can't handle the layout of the flat
1160     // field because it was unknown at compile time.
1161     deoptimize_for_flat = result.is_flat();
1162 
1163   } else if (load_klass_or_mirror_patch_id) {
1164     Klass* k = nullptr;
1165     switch (code) {
1166       case Bytecodes::_putstatic:
1167       case Bytecodes::_getstatic:
1168         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1169           init_klass = klass;
1170           mirror = Handle(current, klass->java_mirror());
1171         }
1172         break;
1173       case Bytecodes::_new:
1174         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1175           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1176         }
1177         break;
1178       case Bytecodes::_multianewarray:
1179         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1180           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1181         }
1182         break;

1215     CallInfo info;
1216     constantPoolHandle pool(current, caller_method->constants());
1217     int index = bytecode.index();
1218     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1219     switch (bc) {
1220       case Bytecodes::_invokehandle: {
1221         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1222         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1223         break;
1224       }
1225       case Bytecodes::_invokedynamic: {
1226         appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
1227         break;
1228       }
1229       default: fatal("unexpected bytecode for load_appendix_patching_id");
1230     }
1231   } else {
1232     ShouldNotReachHere();
1233   }
1234 
1235   if (deoptimize_for_volatile || deoptimize_for_atomic || deoptimize_for_null_free || deoptimize_for_flat) {
1236     // At compile time we assumed the field wasn't volatile/atomic but after
1237     // loading it turns out it was volatile/atomic so we have to throw the
1238     // compiled code out and let it be regenerated.
1239     if (TracePatching) {
1240       if (deoptimize_for_volatile) {
1241         tty->print_cr("Deoptimizing for patching volatile field reference");
1242       }
1243       if (deoptimize_for_atomic) {
1244         tty->print_cr("Deoptimizing for patching atomic field reference");
1245       }
1246       if (deoptimize_for_null_free) {
1247         tty->print_cr("Deoptimizing for patching null-free field reference");
1248       }
1249       if (deoptimize_for_flat) {
1250         tty->print_cr("Deoptimizing for patching flat field reference");
1251       }
1252     }
1253 
1254     // It's possible the nmethod was invalidated in the last
1255     // safepoint, but if it's still alive then make it not_entrant.
1256     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1257     if (nm != nullptr) {
1258       nm->make_not_entrant();
1259     }
1260 
1261     Deoptimization::deoptimize_frame(current, caller_frame.id());
1262 
1263     // Return to the now deoptimized frame.
1264   }
1265 
1266   // Now copy code back
1267 
1268   {
1269     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1270     //
1271     // Deoptimization may have happened while we waited for the lock.

1682 #ifndef PRODUCT
1683 void Runtime1::print_statistics() {
1684   tty->print_cr("C1 Runtime statistics:");
1685   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1686   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1687   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1688   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1689   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1690   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1691   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1692   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1693   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1694   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1695   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1696   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1697   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1698   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1699 
1700   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1701   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1702   tty->print_cr(" _new_null_free_array_slowcase_cnt: %u", _new_null_free_array_slowcase_cnt);
1703   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1704   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1705   tty->print_cr(" _load_flat_array_slowcase_cnt:   %u", _load_flat_array_slowcase_cnt);
1706   tty->print_cr(" _store_flat_array_slowcase_cnt:  %u", _store_flat_array_slowcase_cnt);
1707   tty->print_cr(" _substitutability_check_slowcase_cnt: %u", _substitutability_check_slowcase_cnt);
1708   tty->print_cr(" _buffer_inline_args_slowcase_cnt:%u", _buffer_inline_args_slowcase_cnt);
1709   tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%u", _buffer_inline_args_no_receiver_slowcase_cnt);
1710 
1711   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1712   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1713   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1714 
1715   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1716   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1717   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1718   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1719   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1720   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1721   tty->print_cr(" _throw_illegal_monitor_state_exception_count:  %u:", _throw_illegal_monitor_state_exception_count);
1722   tty->print_cr(" _throw_identity_exception_count:               %u:", _throw_identity_exception_count);
1723   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1724 
1725   SharedRuntime::print_ic_miss_histogram();
1726   tty->cr();
1727 }
1728 #endif // PRODUCT
< prev index next >