< prev index next >

src/hotspot/share/c1/c1_Runtime1.cpp

Print this page

  36 #include "code/codeBlob.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "code/pcDesc.hpp"
  39 #include "code/scopeDesc.hpp"
  40 #include "code/vtableStubs.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/c1/barrierSetC1.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.hpp"
  48 #include "interpreter/interpreter.hpp"
  49 #include "jfr/support/jfrIntrinsics.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/oopFactory.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/access.inline.hpp"


  56 #include "oops/klass.inline.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/objArrayKlass.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/javaCalls.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stackWatermarkSet.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/threadCritical.hpp"
  71 #include "runtime/vframe.inline.hpp"
  72 #include "runtime/vframeArray.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "utilities/copy.hpp"
  75 #include "utilities/events.hpp"

 105     _num_rt_args = args;
 106   }
 107   assert(_num_rt_args == args, "can't change the number of args");
 108 }
 109 
 110 // Implementation of Runtime1
 111 
 112 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 113 const char *Runtime1::_blob_names[] = {
 114   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
 115 };
 116 
 117 #ifndef PRODUCT
 118 // statistics
 119 uint Runtime1::_generic_arraycopystub_cnt = 0;
 120 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 121 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 122 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 123 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 124 uint Runtime1::_new_object_array_slowcase_cnt = 0;

 125 uint Runtime1::_new_instance_slowcase_cnt = 0;
 126 uint Runtime1::_new_multi_array_slowcase_cnt = 0;





 127 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 128 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 129 uint Runtime1::_patch_code_slowcase_cnt = 0;
 130 uint Runtime1::_throw_range_check_exception_count = 0;
 131 uint Runtime1::_throw_index_exception_count = 0;
 132 uint Runtime1::_throw_div0_exception_count = 0;
 133 uint Runtime1::_throw_null_pointer_exception_count = 0;
 134 uint Runtime1::_throw_class_cast_exception_count = 0;
 135 uint Runtime1::_throw_incompatible_class_change_error_count = 0;

 136 uint Runtime1::_throw_count = 0;
 137 
 138 static uint _byte_arraycopy_stub_cnt = 0;
 139 static uint _short_arraycopy_stub_cnt = 0;
 140 static uint _int_arraycopy_stub_cnt = 0;
 141 static uint _long_arraycopy_stub_cnt = 0;
 142 static uint _oop_arraycopy_stub_cnt = 0;
 143 
 144 address Runtime1::arraycopy_count_address(BasicType type) {
 145   switch (type) {
 146   case T_BOOLEAN:
 147   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 148   case T_CHAR:
 149   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 150   case T_FLOAT:
 151   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 152   case T_DOUBLE:
 153   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 154   case T_ARRAY:
 155   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;

 331 #ifdef JFR_HAVE_INTRINSICS
 332   FUNCTION_CASE(entry, JfrTime::time_function());
 333 #endif
 334   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 335   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 336   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 337   FUNCTION_CASE(entry, StubRoutines::dexp());
 338   FUNCTION_CASE(entry, StubRoutines::dlog());
 339   FUNCTION_CASE(entry, StubRoutines::dlog10());
 340   FUNCTION_CASE(entry, StubRoutines::dpow());
 341   FUNCTION_CASE(entry, StubRoutines::dsin());
 342   FUNCTION_CASE(entry, StubRoutines::dcos());
 343   FUNCTION_CASE(entry, StubRoutines::dtan());
 344 
 345 #undef FUNCTION_CASE
 346 
 347   // Soft float adds more runtime names.
 348   return pd_name_for_address(entry);
 349 }
 350 
 351 
 352 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 353 #ifndef PRODUCT
 354   if (PrintC1Statistics) {
 355     _new_instance_slowcase_cnt++;
 356   }
 357 #endif
 358   assert(klass->is_klass(), "not a class");
 359   Handle holder(current, klass->klass_holder()); // keep the klass alive
 360   InstanceKlass* h = InstanceKlass::cast(klass);
 361   h->check_valid_for_instantiation(true, CHECK);
 362   // make sure klass is initialized
 363   h->initialize(CHECK);
 364   // allocate instance and return via TLS
 365   oop obj = h->allocate_instance(CHECK);






 366   current->set_vm_result(obj);
 367 JRT_END
 368 



 369 
 370 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 371 #ifndef PRODUCT
 372   if (PrintC1Statistics) {
 373     _new_type_array_slowcase_cnt++;
 374   }
 375 #endif
 376   // Note: no handle for klass needed since they are not used
 377   //       anymore after new_typeArray() and no GC can happen before.
 378   //       (This may have to change if this code changes!)
 379   assert(klass->is_klass(), "not a class");
 380   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 381   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 382   current->set_vm_result(obj);
 383   // This is pretty rare but this runtime patch is stressful to deoptimization
 384   // if we deoptimize here so force a deopt to stress the path.
 385   if (DeoptimizeALot) {
 386     deopt_caller(current);
 387   }
 388 
 389 JRT_END
 390 
 391 
 392 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 393 #ifndef PRODUCT
 394   if (PrintC1Statistics) {
 395     _new_object_array_slowcase_cnt++;
 396   }
 397 #endif
 398   // Note: no handle for klass needed since they are not used
 399   //       anymore after new_objArray() and no GC can happen before.
 400   //       (This may have to change if this code changes!)
 401   assert(array_klass->is_klass(), "not a class");
 402   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 403   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 404   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 405   current->set_vm_result(obj);
 406   // This is pretty rare but this runtime patch is stressful to deoptimization
 407   // if we deoptimize here so force a deopt to stress the path.
 408   if (DeoptimizeALot) {
 409     deopt_caller(current);
 410   }
 411 JRT_END
 412 
 413 






















 414 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 415 #ifndef PRODUCT
 416   if (PrintC1Statistics) {
 417     _new_multi_array_slowcase_cnt++;
 418   }
 419 #endif
 420   assert(klass->is_klass(), "not a class");
 421   assert(rank >= 1, "rank must be nonzero");
 422   Handle holder(current, klass->klass_holder()); // keep the klass alive
 423   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 424   current->set_vm_result(obj);
 425 JRT_END
 426 
 427 

























































































 428 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
 429   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 430 JRT_END
 431 
 432 
 433 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 434   ResourceMark rm(current);
 435   const char* klass_name = obj->klass()->external_name();
 436   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 437 JRT_END
 438 
 439 
 440 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 441 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 442 // method) method is passed as an argument. In order to do that it is embedded in the code as
 443 // a constant.
 444 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 445   nmethod* osr_nm = nullptr;
 446   methodHandle method(current, m);
 447 

 731     _throw_class_cast_exception_count++;
 732   }
 733 #endif
 734   ResourceMark rm(current);
 735   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 736   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 737 JRT_END
 738 
 739 
 740 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 741 #ifndef PRODUCT
 742   if (PrintC1Statistics) {
 743     _throw_incompatible_class_change_error_count++;
 744   }
 745 #endif
 746   ResourceMark rm(current);
 747   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 748 JRT_END
 749 
 750 






 751 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 752 #ifndef PRODUCT
 753   if (PrintC1Statistics) {
 754     _monitorenter_slowcase_cnt++;
 755   }
 756 #endif
 757   if (LockingMode == LM_MONITOR) {
 758     lock->set_obj(obj);
 759   }
 760   assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match");
 761   SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? nullptr : lock->lock(), current);
 762 JRT_END
 763 
 764 
 765 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 766   assert(current == JavaThread::current(), "pre-condition");
 767 #ifndef PRODUCT
 768   if (PrintC1Statistics) {
 769     _monitorexit_slowcase_cnt++;
 770   }

 936                       RegisterMap::WalkContinuation::skip);
 937   frame runtime_frame = current->last_frame();
 938   frame caller_frame = runtime_frame.sender(&reg_map);
 939 
 940   // last java frame on stack
 941   vframeStream vfst(current, true);
 942   assert(!vfst.at_end(), "Java frame must exist");
 943 
 944   methodHandle caller_method(current, vfst.method());
 945   // Note that caller_method->code() may not be same as caller_code because of OSR's
 946   // Note also that in the presence of inlining it is not guaranteed
 947   // that caller_method() == caller_code->method()
 948 
 949   int bci = vfst.bci();
 950   Bytecodes::Code code = caller_method()->java_code_at(bci);
 951 
 952   // this is used by assertions in the access_field_patching_id
 953   BasicType patch_field_type = T_ILLEGAL;
 954   bool deoptimize_for_volatile = false;
 955   bool deoptimize_for_atomic = false;


 956   int patch_field_offset = -1;
 957   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
 958   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
 959   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
 960   Handle appendix(current, nullptr); // oop needed by appendix_patching code
 961   bool load_klass_or_mirror_patch_id =
 962     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 963 
 964   if (stub_id == Runtime1::access_field_patching_id) {
 965 
 966     Bytecode_field field_access(caller_method, bci);
 967     fieldDescriptor result; // initialize class if needed
 968     Bytecodes::Code code = field_access.code();
 969     constantPoolHandle constants(current, caller_method->constants());
 970     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
 971     patch_field_offset = result.offset();
 972 
 973     // If we're patching a field which is volatile then at compile it
 974     // must not have been know to be volatile, so the generated code
 975     // isn't correct for a volatile reference.  The nmethod has to be

 979     // used for patching references to oops which don't need special
 980     // handling in the volatile case.
 981 
 982     deoptimize_for_volatile = result.access_flags().is_volatile();
 983 
 984     // If we are patching a field which should be atomic, then
 985     // the generated code is not correct either, force deoptimizing.
 986     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 987     // break access atomicity only for them.
 988 
 989     // Strictly speaking, the deoptimization on 64-bit platforms
 990     // is unnecessary, and T_LONG stores on 32-bit platforms need
 991     // to be handled by special patching code when AlwaysAtomicAccesses
 992     // becomes product feature. At this point, we are still going
 993     // for the deoptimization for consistency against volatile
 994     // accesses.
 995 
 996     patch_field_type = result.field_type();
 997     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
 998 










 999   } else if (load_klass_or_mirror_patch_id) {
1000     Klass* k = nullptr;
1001     switch (code) {
1002       case Bytecodes::_putstatic:
1003       case Bytecodes::_getstatic:
1004         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1005           init_klass = klass;
1006           mirror = Handle(current, klass->java_mirror());
1007         }
1008         break;
1009       case Bytecodes::_new:
1010         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1011           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1012         }
1013         break;
1014       case Bytecodes::_multianewarray:
1015         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1016           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1017         }
1018         break;

1052     constantPoolHandle pool(current, caller_method->constants());
1053     int index = bytecode.index();
1054     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1055     switch (bc) {
1056       case Bytecodes::_invokehandle: {
1057         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1058         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1059         break;
1060       }
1061       case Bytecodes::_invokedynamic: {
1062         int indy_index = pool->decode_invokedynamic_index(index);
1063         appendix = Handle(current, pool->cache()->set_dynamic_call(info, indy_index));
1064         break;
1065       }
1066       default: fatal("unexpected bytecode for load_appendix_patching_id");
1067     }
1068   } else {
1069     ShouldNotReachHere();
1070   }
1071 
1072   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1073     // At compile time we assumed the field wasn't volatile/atomic but after
1074     // loading it turns out it was volatile/atomic so we have to throw the
1075     // compiled code out and let it be regenerated.
1076     if (TracePatching) {
1077       if (deoptimize_for_volatile) {
1078         tty->print_cr("Deoptimizing for patching volatile field reference");
1079       }
1080       if (deoptimize_for_atomic) {
1081         tty->print_cr("Deoptimizing for patching atomic field reference");
1082       }






1083     }
1084 
1085     // It's possible the nmethod was invalidated in the last
1086     // safepoint, but if it's still alive then make it not_entrant.
1087     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1088     if (nm != nullptr) {
1089       nm->make_not_entrant();
1090     }
1091 
1092     Deoptimization::deoptimize_frame(current, caller_frame.id());
1093 
1094     // Return to the now deoptimized frame.
1095   }
1096 
1097   // Now copy code back
1098 
1099   {
1100     MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag);
1101     //
1102     // Deoptimization may have happened while we waited for the lock.

1517 #ifndef PRODUCT
1518 void Runtime1::print_statistics() {
1519   tty->print_cr("C1 Runtime statistics:");
1520   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1521   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1522   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1523   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1524   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1525   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1526   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1527   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1528   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1529   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1530   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1531   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1532   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1533   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1534 
1535   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1536   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);

1537   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1538   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);






1539   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1540   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1541   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1542 
1543   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1544   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1545   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1546   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1547   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1548   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);

1549   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1550 
1551   SharedRuntime::print_ic_miss_histogram();
1552   tty->cr();
1553 }
1554 #endif // PRODUCT

  36 #include "code/codeBlob.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "code/pcDesc.hpp"
  39 #include "code/scopeDesc.hpp"
  40 #include "code/vtableStubs.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/c1/barrierSetC1.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.hpp"
  48 #include "interpreter/interpreter.hpp"
  49 #include "jfr/support/jfrIntrinsics.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/oopFactory.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/access.inline.hpp"
  56 #include "oops/flatArrayKlass.hpp"
  57 #include "oops/flatArrayOop.inline.hpp"
  58 #include "oops/klass.inline.hpp"
  59 #include "oops/objArrayOop.inline.hpp"
  60 #include "oops/objArrayKlass.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/fieldDescriptor.inline.hpp"
  65 #include "runtime/frame.inline.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"
  68 #include "runtime/javaCalls.hpp"
  69 #include "runtime/sharedRuntime.hpp"
  70 #include "runtime/stackWatermarkSet.hpp"
  71 #include "runtime/stubRoutines.hpp"
  72 #include "runtime/threadCritical.hpp"
  73 #include "runtime/vframe.inline.hpp"
  74 #include "runtime/vframeArray.hpp"
  75 #include "runtime/vm_version.hpp"
  76 #include "utilities/copy.hpp"
  77 #include "utilities/events.hpp"

 107     _num_rt_args = args;
 108   }
 109   assert(_num_rt_args == args, "can't change the number of args");
 110 }
 111 
 112 // Implementation of Runtime1
 113 
 114 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 115 const char *Runtime1::_blob_names[] = {
 116   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
 117 };
 118 
 119 #ifndef PRODUCT
 120 // statistics
 121 uint Runtime1::_generic_arraycopystub_cnt = 0;
 122 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 123 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 124 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 125 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 126 uint Runtime1::_new_object_array_slowcase_cnt = 0;
 127 uint Runtime1::_new_flat_array_slowcase_cnt = 0;
 128 uint Runtime1::_new_instance_slowcase_cnt = 0;
 129 uint Runtime1::_new_multi_array_slowcase_cnt = 0;
 130 uint Runtime1::_load_flat_array_slowcase_cnt = 0;
 131 uint Runtime1::_store_flat_array_slowcase_cnt = 0;
 132 uint Runtime1::_substitutability_check_slowcase_cnt = 0;
 133 uint Runtime1::_buffer_inline_args_slowcase_cnt = 0;
 134 uint Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
 135 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 136 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 137 uint Runtime1::_patch_code_slowcase_cnt = 0;
 138 uint Runtime1::_throw_range_check_exception_count = 0;
 139 uint Runtime1::_throw_index_exception_count = 0;
 140 uint Runtime1::_throw_div0_exception_count = 0;
 141 uint Runtime1::_throw_null_pointer_exception_count = 0;
 142 uint Runtime1::_throw_class_cast_exception_count = 0;
 143 uint Runtime1::_throw_incompatible_class_change_error_count = 0;
 144 uint Runtime1::_throw_illegal_monitor_state_exception_count = 0;
 145 uint Runtime1::_throw_count = 0;
 146 
 147 static uint _byte_arraycopy_stub_cnt = 0;
 148 static uint _short_arraycopy_stub_cnt = 0;
 149 static uint _int_arraycopy_stub_cnt = 0;
 150 static uint _long_arraycopy_stub_cnt = 0;
 151 static uint _oop_arraycopy_stub_cnt = 0;
 152 
 153 address Runtime1::arraycopy_count_address(BasicType type) {
 154   switch (type) {
 155   case T_BOOLEAN:
 156   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 157   case T_CHAR:
 158   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 159   case T_FLOAT:
 160   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 161   case T_DOUBLE:
 162   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 163   case T_ARRAY:
 164   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;

 340 #ifdef JFR_HAVE_INTRINSICS
 341   FUNCTION_CASE(entry, JfrTime::time_function());
 342 #endif
 343   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 344   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 345   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 346   FUNCTION_CASE(entry, StubRoutines::dexp());
 347   FUNCTION_CASE(entry, StubRoutines::dlog());
 348   FUNCTION_CASE(entry, StubRoutines::dlog10());
 349   FUNCTION_CASE(entry, StubRoutines::dpow());
 350   FUNCTION_CASE(entry, StubRoutines::dsin());
 351   FUNCTION_CASE(entry, StubRoutines::dcos());
 352   FUNCTION_CASE(entry, StubRoutines::dtan());
 353 
 354 #undef FUNCTION_CASE
 355 
 356   // Soft float adds more runtime names.
 357   return pd_name_for_address(entry);
 358 }
 359 
 360 static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {

 361 #ifndef PRODUCT
 362   if (PrintC1Statistics) {
 363     Runtime1::_new_instance_slowcase_cnt++;
 364   }
 365 #endif
 366   assert(klass->is_klass(), "not a class");
 367   Handle holder(current, klass->klass_holder()); // keep the klass alive
 368   InstanceKlass* h = InstanceKlass::cast(klass);
 369   h->check_valid_for_instantiation(true, CHECK);
 370   // make sure klass is initialized
 371   h->initialize(CHECK);
 372   oop obj = nullptr;
 373   if (h->is_empty_inline_type()) {
 374     obj = InlineKlass::cast(h)->default_value();
 375     assert(obj != nullptr, "default value must exist");
 376   } else {
 377     // allocate instance and return via TLS
 378     obj = h->allocate_instance(CHECK);
 379   }
 380   current->set_vm_result(obj);
 381 JRT_END
 382 
 383 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 384   allocate_instance(current, klass, CHECK);
 385 JRT_END
 386 
 387 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 388 #ifndef PRODUCT
 389   if (PrintC1Statistics) {
 390     _new_type_array_slowcase_cnt++;
 391   }
 392 #endif
 393   // Note: no handle for klass needed since they are not used
 394   //       anymore after new_typeArray() and no GC can happen before.
 395   //       (This may have to change if this code changes!)
 396   assert(klass->is_klass(), "not a class");
 397   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 398   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 399   current->set_vm_result(obj);
 400   // This is pretty rare but this runtime patch is stressful to deoptimization
 401   // if we deoptimize here so force a deopt to stress the path.
 402   if (DeoptimizeALot) {
 403     deopt_caller(current);
 404   }
 405 
 406 JRT_END
 407 
 408 
 409 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 410 #ifndef PRODUCT
 411   if (PrintC1Statistics) {
 412     _new_object_array_slowcase_cnt++;
 413   }
 414 #endif
 415   // Note: no handle for klass needed since they are not used
 416   //       anymore after new_objArray() and no GC can happen before.
 417   //       (This may have to change if this code changes!)
 418   assert(array_klass->is_klass(), "not a class");
 419   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 420   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 421   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 422   current->set_vm_result(obj);
 423   // This is pretty rare but this runtime patch is stressful to deoptimization
 424   // if we deoptimize here so force a deopt to stress the path.
 425   if (DeoptimizeALot) {
 426     deopt_caller(current);
 427   }
 428 JRT_END
 429 
 430 
 431 JRT_ENTRY(void, Runtime1::new_flat_array(JavaThread* current, Klass* array_klass, jint length))
 432   NOT_PRODUCT(_new_flat_array_slowcase_cnt++;)
 433 
 434   // Note: no handle for klass needed since they are not used
 435   //       anymore after new_objArray() and no GC can happen before.
 436   //       (This may have to change if this code changes!)
 437   assert(array_klass->is_klass(), "not a class");
 438   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 439   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 440   assert(elem_klass->is_inline_klass(), "must be");
 441   // Logically creates elements, ensure klass init
 442   elem_klass->initialize(CHECK);
 443   arrayOop obj = oopFactory::new_valueArray(elem_klass, length, CHECK);
 444   current->set_vm_result(obj);
 445   // This is pretty rare but this runtime patch is stressful to deoptimization
 446   // if we deoptimize here so force a deopt to stress the path.
 447   if (DeoptimizeALot) {
 448     deopt_caller(current);
 449   }
 450 JRT_END
 451 
 452 
 453 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 454 #ifndef PRODUCT
 455   if (PrintC1Statistics) {
 456     _new_multi_array_slowcase_cnt++;
 457   }
 458 #endif
 459   assert(klass->is_klass(), "not a class");
 460   assert(rank >= 1, "rank must be nonzero");
 461   Handle holder(current, klass->klass_holder()); // keep the klass alive
 462   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 463   current->set_vm_result(obj);
 464 JRT_END
 465 
 466 
 467 static void profile_flat_array(JavaThread* current, bool load) {
 468   ResourceMark rm(current);
 469   vframeStream vfst(current, true);
 470   assert(!vfst.at_end(), "Java frame must exist");
 471   // Check if array access profiling is enabled
 472   if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
 473     return;
 474   }
 475   int bci = vfst.bci();
 476   Method* method = vfst.method();
 477   MethodData* md = method->method_data();
 478   if (md != nullptr) {
 479     ProfileData* data = md->bci_to_data(bci);
 480     assert(data != nullptr, "incorrect profiling entry");
 481     if (data->is_ArrayLoadData()) {
 482       assert(load, "should be an array load");
 483       ArrayLoadData* load_data = (ArrayLoadData*) data;
 484       load_data->set_flat_array();
 485     } else {
 486       assert(data->is_ArrayStoreData(), "");
 487       assert(!load, "should be an array store");
 488       ArrayStoreData* store_data = (ArrayStoreData*) data;
 489       store_data->set_flat_array();
 490     }
 491   }
 492 }
 493 
 494 JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* array, int index))
 495   assert(array->klass()->is_flatArray_klass(), "should not be called");
 496   profile_flat_array(current, true);
 497 
 498   NOT_PRODUCT(_load_flat_array_slowcase_cnt++;)
 499   assert(array->length() > 0 && index < array->length(), "already checked");
 500   flatArrayHandle vah(current, array);
 501   oop obj = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK);
 502   current->set_vm_result(obj);
 503 JRT_END
 504 
 505 
 506 JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, flatArrayOopDesc* array, int index, oopDesc* value))
 507   if (array->klass()->is_flatArray_klass()) {
 508     profile_flat_array(current, false);
 509   }
 510 
 511   NOT_PRODUCT(_store_flat_array_slowcase_cnt++;)
 512   if (value == nullptr) {
 513     assert(array->klass()->is_flatArray_klass() || array->klass()->is_null_free_array_klass(), "should not be called");
 514     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 515   } else {
 516     assert(array->klass()->is_flatArray_klass(), "should not be called");
 517     array->value_copy_to_index(value, index);
 518   }
 519 JRT_END
 520 
 521 
 522 JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
 523   NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
 524   JavaCallArguments args;
 525   args.push_oop(Handle(THREAD, left));
 526   args.push_oop(Handle(THREAD, right));
 527   JavaValue result(T_BOOLEAN);
 528   JavaCalls::call_static(&result,
 529                          vmClasses::ValueObjectMethods_klass(),
 530                          vmSymbols::isSubstitutable_name(),
 531                          vmSymbols::object_object_boolean_signature(),
 532                          &args, CHECK_0);
 533   return result.get_jboolean() ? 1 : 0;
 534 JRT_END
 535 
 536 
 537 extern "C" void ps();
 538 
 539 void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
 540   JavaThread* THREAD = current;
 541   methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
 542   oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
 543   current->set_vm_result(obj);
 544 }
 545 
 546 JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
 547   NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
 548   buffer_inline_args_impl(current, method, true);
 549 JRT_END
 550 
 551 JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
 552   NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
 553   buffer_inline_args_impl(current, method, false);
 554 JRT_END
 555 
 556 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
 557   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 558 JRT_END
 559 
 560 
 561 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 562   ResourceMark rm(current);
 563   const char* klass_name = obj->klass()->external_name();
 564   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 565 JRT_END
 566 
 567 
 568 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 569 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 570 // method) method is passed as an argument. In order to do that it is embedded in the code as
 571 // a constant.
 572 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 573   nmethod* osr_nm = nullptr;
 574   methodHandle method(current, m);
 575 

 859     _throw_class_cast_exception_count++;
 860   }
 861 #endif
 862   ResourceMark rm(current);
 863   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 864   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 865 JRT_END
 866 
 867 
 868 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 869 #ifndef PRODUCT
 870   if (PrintC1Statistics) {
 871     _throw_incompatible_class_change_error_count++;
 872   }
 873 #endif
 874   ResourceMark rm(current);
 875   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 876 JRT_END
 877 
 878 
 879 JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
 880   NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
 881   ResourceMark rm(current);
 882   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
 883 JRT_END
 884 
 885 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 886 #ifndef PRODUCT
 887   if (PrintC1Statistics) {
 888     _monitorenter_slowcase_cnt++;
 889   }
 890 #endif
 891   if (LockingMode == LM_MONITOR) {
 892     lock->set_obj(obj);
 893   }
 894   assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match");
 895   SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? nullptr : lock->lock(), current);
 896 JRT_END
 897 
 898 
 899 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 900   assert(current == JavaThread::current(), "pre-condition");
 901 #ifndef PRODUCT
 902   if (PrintC1Statistics) {
 903     _monitorexit_slowcase_cnt++;
 904   }

1070                       RegisterMap::WalkContinuation::skip);
1071   frame runtime_frame = current->last_frame();
1072   frame caller_frame = runtime_frame.sender(&reg_map);
1073 
1074   // last java frame on stack
1075   vframeStream vfst(current, true);
1076   assert(!vfst.at_end(), "Java frame must exist");
1077 
1078   methodHandle caller_method(current, vfst.method());
1079   // Note that caller_method->code() may not be same as caller_code because of OSR's
1080   // Note also that in the presence of inlining it is not guaranteed
1081   // that caller_method() == caller_code->method()
1082 
1083   int bci = vfst.bci();
1084   Bytecodes::Code code = caller_method()->java_code_at(bci);
1085 
1086   // this is used by assertions in the access_field_patching_id
1087   BasicType patch_field_type = T_ILLEGAL;
1088   bool deoptimize_for_volatile = false;
1089   bool deoptimize_for_atomic = false;
1090   bool deoptimize_for_null_free = false;
1091   bool deoptimize_for_flat = false;
1092   int patch_field_offset = -1;
1093   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
1094   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
1095   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
1096   Handle appendix(current, nullptr); // oop needed by appendix_patching code
1097   bool load_klass_or_mirror_patch_id =
1098     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
1099 
1100   if (stub_id == Runtime1::access_field_patching_id) {
1101 
1102     Bytecode_field field_access(caller_method, bci);
1103     fieldDescriptor result; // initialize class if needed
1104     Bytecodes::Code code = field_access.code();
1105     constantPoolHandle constants(current, caller_method->constants());
1106     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
1107     patch_field_offset = result.offset();
1108 
1109     // If we're patching a field which is volatile then at compile it
1110     // must not have been know to be volatile, so the generated code
1111     // isn't correct for a volatile reference.  The nmethod has to be

1115     // used for patching references to oops which don't need special
1116     // handling in the volatile case.
1117 
1118     deoptimize_for_volatile = result.access_flags().is_volatile();
1119 
1120     // If we are patching a field which should be atomic, then
1121     // the generated code is not correct either, force deoptimizing.
1122     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1123     // break access atomicity only for them.
1124 
1125     // Strictly speaking, the deoptimization on 64-bit platforms
1126     // is unnecessary, and T_LONG stores on 32-bit platforms need
1127     // to be handled by special patching code when AlwaysAtomicAccesses
1128     // becomes product feature. At this point, we are still going
1129     // for the deoptimization for consistency against volatile
1130     // accesses.
1131 
1132     patch_field_type = result.field_type();
1133     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1134 
1135     // The field we are patching is null-free. Deoptimize and regenerate
1136     // the compiled code if we patch a putfield/putstatic because it
1137     // does not contain the required null check.
1138     deoptimize_for_null_free = result.is_null_free_inline_type() && (field_access.is_putfield() || field_access.is_putstatic());
1139 
1140     // The field we are patching is flat. Deoptimize and regenerate
1141     // the compiled code which can't handle the layout of the flat
1142     // field because it was unknown at compile time.
1143     deoptimize_for_flat = result.is_flat();
1144 
1145   } else if (load_klass_or_mirror_patch_id) {
1146     Klass* k = nullptr;
1147     switch (code) {
1148       case Bytecodes::_putstatic:
1149       case Bytecodes::_getstatic:
1150         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1151           init_klass = klass;
1152           mirror = Handle(current, klass->java_mirror());
1153         }
1154         break;
1155       case Bytecodes::_new:
1156         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1157           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1158         }
1159         break;
1160       case Bytecodes::_multianewarray:
1161         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1162           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1163         }
1164         break;

1198     constantPoolHandle pool(current, caller_method->constants());
1199     int index = bytecode.index();
1200     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1201     switch (bc) {
1202       case Bytecodes::_invokehandle: {
1203         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1204         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1205         break;
1206       }
1207       case Bytecodes::_invokedynamic: {
1208         int indy_index = pool->decode_invokedynamic_index(index);
1209         appendix = Handle(current, pool->cache()->set_dynamic_call(info, indy_index));
1210         break;
1211       }
1212       default: fatal("unexpected bytecode for load_appendix_patching_id");
1213     }
1214   } else {
1215     ShouldNotReachHere();
1216   }
1217 
1218   if (deoptimize_for_volatile || deoptimize_for_atomic || deoptimize_for_null_free || deoptimize_for_flat) {
1219     // At compile time we assumed the field wasn't volatile/atomic but after
1220     // loading it turns out it was volatile/atomic so we have to throw the
1221     // compiled code out and let it be regenerated.
1222     if (TracePatching) {
1223       if (deoptimize_for_volatile) {
1224         tty->print_cr("Deoptimizing for patching volatile field reference");
1225       }
1226       if (deoptimize_for_atomic) {
1227         tty->print_cr("Deoptimizing for patching atomic field reference");
1228       }
1229       if (deoptimize_for_null_free) {
1230         tty->print_cr("Deoptimizing for patching null-free field reference");
1231       }
1232       if (deoptimize_for_flat) {
1233         tty->print_cr("Deoptimizing for patching flat field reference");
1234       }
1235     }
1236 
1237     // It's possible the nmethod was invalidated in the last
1238     // safepoint, but if it's still alive then make it not_entrant.
1239     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1240     if (nm != nullptr) {
1241       nm->make_not_entrant();
1242     }
1243 
1244     Deoptimization::deoptimize_frame(current, caller_frame.id());
1245 
1246     // Return to the now deoptimized frame.
1247   }
1248 
1249   // Now copy code back
1250 
1251   {
1252     MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag);
1253     //
1254     // Deoptimization may have happened while we waited for the lock.

1669 #ifndef PRODUCT
1670 void Runtime1::print_statistics() {
1671   tty->print_cr("C1 Runtime statistics:");
1672   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1673   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1674   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1675   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1676   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1677   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1678   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1679   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1680   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1681   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1682   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1683   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1684   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1685   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1686 
1687   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1688   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1689   tty->print_cr(" _new_flat_array_slowcase_cnt:    %u", _new_flat_array_slowcase_cnt);
1690   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1691   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1692   tty->print_cr(" _load_flat_array_slowcase_cnt:   %u", _load_flat_array_slowcase_cnt);
1693   tty->print_cr(" _store_flat_array_slowcase_cnt:  %u", _store_flat_array_slowcase_cnt);
1694   tty->print_cr(" _substitutability_check_slowcase_cnt: %u", _substitutability_check_slowcase_cnt);
1695   tty->print_cr(" _buffer_inline_args_slowcase_cnt:%u", _buffer_inline_args_slowcase_cnt);
1696   tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%u", _buffer_inline_args_no_receiver_slowcase_cnt);
1697 
1698   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1699   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1700   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1701 
1702   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1703   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1704   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1705   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1706   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1707   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1708   tty->print_cr(" _throw_illegal_monitor_state_exception_count:  %u:", _throw_illegal_monitor_state_exception_count);
1709   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1710 
1711   SharedRuntime::print_ic_miss_histogram();
1712   tty->cr();
1713 }
1714 #endif // PRODUCT
< prev index next >