< prev index next >

src/hotspot/share/c1/c1_Runtime1.cpp

Print this page
*** 51,10 ***
--- 51,12 ---
  #include "memory/allocation.inline.hpp"
  #include "memory/oopFactory.hpp"
  #include "memory/resourceArea.hpp"
  #include "memory/universe.hpp"
  #include "oops/access.inline.hpp"
+ #include "oops/flatArrayKlass.hpp"
+ #include "oops/flatArrayOop.inline.hpp"
  #include "oops/klass.inline.hpp"
  #include "oops/objArrayOop.inline.hpp"
  #include "oops/objArrayKlass.hpp"
  #include "oops/oop.inline.hpp"
  #include "prims/jvmtiExport.hpp"

*** 120,21 ***
--- 122,28 ---
  uint Runtime1::_arraycopy_slowcase_cnt = 0;
  uint Runtime1::_arraycopy_checkcast_cnt = 0;
  uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
  uint Runtime1::_new_type_array_slowcase_cnt = 0;
  uint Runtime1::_new_object_array_slowcase_cnt = 0;
+ uint Runtime1::_new_flat_array_slowcase_cnt = 0;
  uint Runtime1::_new_instance_slowcase_cnt = 0;
  uint Runtime1::_new_multi_array_slowcase_cnt = 0;
+ uint Runtime1::_load_flat_array_slowcase_cnt = 0;
+ uint Runtime1::_store_flat_array_slowcase_cnt = 0;
+ uint Runtime1::_substitutability_check_slowcase_cnt = 0;
+ uint Runtime1::_buffer_inline_args_slowcase_cnt = 0;
+ uint Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
  uint Runtime1::_monitorenter_slowcase_cnt = 0;
  uint Runtime1::_monitorexit_slowcase_cnt = 0;
  uint Runtime1::_patch_code_slowcase_cnt = 0;
  uint Runtime1::_throw_range_check_exception_count = 0;
  uint Runtime1::_throw_index_exception_count = 0;
  uint Runtime1::_throw_div0_exception_count = 0;
  uint Runtime1::_throw_null_pointer_exception_count = 0;
  uint Runtime1::_throw_class_cast_exception_count = 0;
  uint Runtime1::_throw_incompatible_class_change_error_count = 0;
+ uint Runtime1::_throw_illegal_monitor_state_exception_count = 0;
  uint Runtime1::_throw_count = 0;
  
  static uint _byte_arraycopy_stub_cnt = 0;
  static uint _short_arraycopy_stub_cnt = 0;
  static uint _int_arraycopy_stub_cnt = 0;

*** 346,28 ***
  
    // Soft float adds more runtime names.
    return pd_name_for_address(entry);
  }
  
! 
- JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
  #ifndef PRODUCT
    if (PrintC1Statistics) {
!     _new_instance_slowcase_cnt++;
    }
  #endif
    assert(klass->is_klass(), "not a class");
    Handle holder(current, klass->klass_holder()); // keep the klass alive
    InstanceKlass* h = InstanceKlass::cast(klass);
    h->check_valid_for_instantiation(true, CHECK);
    // make sure klass is initialized
    h->initialize(CHECK);
!   // allocate instance and return via TLS
!   oop obj = h->allocate_instance(CHECK);
    current->set_vm_result(obj);
  JRT_END
  
  
  JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
  #ifndef PRODUCT
    if (PrintC1Statistics) {
      _new_type_array_slowcase_cnt++;
--- 355,36 ---
  
    // Soft float adds more runtime names.
    return pd_name_for_address(entry);
  }
  
! static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {
  #ifndef PRODUCT
    if (PrintC1Statistics) {
!     Runtime1::_new_instance_slowcase_cnt++;
    }
  #endif
    assert(klass->is_klass(), "not a class");
    Handle holder(current, klass->klass_holder()); // keep the klass alive
    InstanceKlass* h = InstanceKlass::cast(klass);
    h->check_valid_for_instantiation(true, CHECK);
    // make sure klass is initialized
    h->initialize(CHECK);
!   oop obj = nullptr;
!   if (h->is_empty_inline_type()) {
+     obj = InlineKlass::cast(h)->default_value();
+     assert(obj != nullptr, "default value must exist");
+   } else {
+     // allocate instance and return via TLS
+     obj = h->allocate_instance(CHECK);
+   }
    current->set_vm_result(obj);
  JRT_END
  
+ JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
+   allocate_instance(current, klass, CHECK);
+ JRT_END
  
  JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
  #ifndef PRODUCT
    if (PrintC1Statistics) {
      _new_type_array_slowcase_cnt++;

*** 398,21 ***
    // Note: no handle for klass needed since they are not used
    //       anymore after new_objArray() and no GC can happen before.
    //       (This may have to change if this code changes!)
    assert(array_klass->is_klass(), "not a class");
    Handle holder(current, array_klass->klass_holder()); // keep the klass alive
!   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
    objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
    current->set_vm_result(obj);
    // This is pretty rare but this runtime patch is stressful to deoptimization
    // if we deoptimize here so force a deopt to stress the path.
    if (DeoptimizeALot) {
      deopt_caller(current);
    }
  JRT_END
  
  
  JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
  #ifndef PRODUCT
    if (PrintC1Statistics) {
      _new_multi_array_slowcase_cnt++;
    }
--- 415,43 ---
    // Note: no handle for klass needed since they are not used
    //       anymore after new_objArray() and no GC can happen before.
    //       (This may have to change if this code changes!)
    assert(array_klass->is_klass(), "not a class");
    Handle holder(current, array_klass->klass_holder()); // keep the klass alive
!   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
    objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
    current->set_vm_result(obj);
    // This is pretty rare but this runtime patch is stressful to deoptimization
    // if we deoptimize here so force a deopt to stress the path.
    if (DeoptimizeALot) {
      deopt_caller(current);
    }
  JRT_END
  
  
+ JRT_ENTRY(void, Runtime1::new_flat_array(JavaThread* current, Klass* array_klass, jint length))
+   NOT_PRODUCT(_new_flat_array_slowcase_cnt++;)
+ 
+   // Note: no handle for klass needed since they are not used
+   //       anymore after new_objArray() and no GC can happen before.
+   //       (This may have to change if this code changes!)
+   assert(array_klass->is_klass(), "not a class");
+   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
+   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
+   assert(elem_klass->is_inline_klass(), "must be");
+   // Logically creates elements, ensure klass init
+   elem_klass->initialize(CHECK);
+   arrayOop obj = oopFactory::new_valueArray(elem_klass, length, CHECK);
+   current->set_vm_result(obj);
+   // This is pretty rare but this runtime patch is stressful to deoptimization
+   // if we deoptimize here so force a deopt to stress the path.
+   if (DeoptimizeALot) {
+     deopt_caller(current);
+   }
+ JRT_END
+ 
+ 
  JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
  #ifndef PRODUCT
    if (PrintC1Statistics) {
      _new_multi_array_slowcase_cnt++;
    }

*** 423,10 ***
--- 462,99 ---
    oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
    current->set_vm_result(obj);
  JRT_END
  
  
+ static void profile_flat_array(JavaThread* current, bool load) {
+   ResourceMark rm(current);
+   vframeStream vfst(current, true);
+   assert(!vfst.at_end(), "Java frame must exist");
+   // Check if array access profiling is enabled
+   if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
+     return;
+   }
+   int bci = vfst.bci();
+   Method* method = vfst.method();
+   MethodData* md = method->method_data();
+   if (md != nullptr) {
+     ProfileData* data = md->bci_to_data(bci);
+     assert(data != nullptr, "incorrect profiling entry");
+     if (data->is_ArrayLoadData()) {
+       assert(load, "should be an array load");
+       ArrayLoadData* load_data = (ArrayLoadData*) data;
+       load_data->set_flat_array();
+     } else {
+       assert(data->is_ArrayStoreData(), "");
+       assert(!load, "should be an array store");
+       ArrayStoreData* store_data = (ArrayStoreData*) data;
+       store_data->set_flat_array();
+     }
+   }
+ }
+ 
+ JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* array, int index))
+   assert(array->klass()->is_flatArray_klass(), "should not be called");
+   profile_flat_array(current, true);
+ 
+   NOT_PRODUCT(_load_flat_array_slowcase_cnt++;)
+   assert(array->length() > 0 && index < array->length(), "already checked");
+   flatArrayHandle vah(current, array);
+   oop obj = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK);
+   current->set_vm_result(obj);
+ JRT_END
+ 
+ 
+ JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, flatArrayOopDesc* array, int index, oopDesc* value))
+   if (array->klass()->is_flatArray_klass()) {
+     profile_flat_array(current, false);
+   }
+ 
+   NOT_PRODUCT(_store_flat_array_slowcase_cnt++;)
+   if (value == nullptr) {
+     assert(array->klass()->is_flatArray_klass() || array->klass()->is_null_free_array_klass(), "should not be called");
+     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
+   } else {
+     assert(array->klass()->is_flatArray_klass(), "should not be called");
+     array->value_copy_to_index(value, index);
+   }
+ JRT_END
+ 
+ 
+ JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
+   NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
+   JavaCallArguments args;
+   args.push_oop(Handle(THREAD, left));
+   args.push_oop(Handle(THREAD, right));
+   JavaValue result(T_BOOLEAN);
+   JavaCalls::call_static(&result,
+                          vmClasses::ValueObjectMethods_klass(),
+                          vmSymbols::isSubstitutable_name(),
+                          vmSymbols::object_object_boolean_signature(),
+                          &args, CHECK_0);
+   return result.get_jboolean() ? 1 : 0;
+ JRT_END
+ 
+ 
+ extern "C" void ps();
+ 
+ void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
+   JavaThread* THREAD = current;
+   methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
+   oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
+   current->set_vm_result(obj);
+ }
+ 
+ JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
+   NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
+   buffer_inline_args_impl(current, method, true);
+ JRT_END
+ 
+ JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
+   NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
+   buffer_inline_args_impl(current, method, false);
+ JRT_END
+ 
  JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
    tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
  JRT_END
  
  

*** 746,10 ***
--- 874,16 ---
    ResourceMark rm(current);
    SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
  JRT_END
  
  
+ JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
+   NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
+   ResourceMark rm(current);
+   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
+ JRT_END
+ 
  JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
  #ifndef PRODUCT
    if (PrintC1Statistics) {
      _monitorenter_slowcase_cnt++;
    }

*** 951,10 ***
--- 1085,12 ---
  
    // this is used by assertions in the access_field_patching_id
    BasicType patch_field_type = T_ILLEGAL;
    bool deoptimize_for_volatile = false;
    bool deoptimize_for_atomic = false;
+   bool deoptimize_for_null_free = false;
+   bool deoptimize_for_flat = false;
    int patch_field_offset = -1;
    Klass* init_klass = nullptr; // klass needed by load_klass_patching code
    Klass* load_klass = nullptr; // klass needed by load_klass_patching code
    Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
    Handle appendix(current, nullptr); // oop needed by appendix_patching code

*** 994,10 ***
--- 1130,20 ---
      // accesses.
  
      patch_field_type = result.field_type();
      deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
  
+     // The field we are patching is null-free. Deoptimize and regenerate
+     // the compiled code if we patch a putfield/putstatic because it
+     // does not contain the required null check.
+     deoptimize_for_null_free = result.is_null_free_inline_type() && (field_access.is_putfield() || field_access.is_putstatic());
+ 
+     // The field we are patching is flat. Deoptimize and regenerate
+     // the compiled code which can't handle the layout of the flat
+     // field because it was unknown at compile time.
+     deoptimize_for_flat = result.is_flat();
+ 
    } else if (load_klass_or_mirror_patch_id) {
      Klass* k = nullptr;
      switch (code) {
        case Bytecodes::_putstatic:
        case Bytecodes::_getstatic:

*** 1067,21 ***
      }
    } else {
      ShouldNotReachHere();
    }
  
!   if (deoptimize_for_volatile || deoptimize_for_atomic) {
      // At compile time we assumed the field wasn't volatile/atomic but after
      // loading it turns out it was volatile/atomic so we have to throw the
      // compiled code out and let it be regenerated.
      if (TracePatching) {
        if (deoptimize_for_volatile) {
          tty->print_cr("Deoptimizing for patching volatile field reference");
        }
        if (deoptimize_for_atomic) {
          tty->print_cr("Deoptimizing for patching atomic field reference");
        }
      }
  
      // It's possible the nmethod was invalidated in the last
      // safepoint, but if it's still alive then make it not_entrant.
      nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
--- 1213,27 ---
      }
    } else {
      ShouldNotReachHere();
    }
  
!   if (deoptimize_for_volatile || deoptimize_for_atomic || deoptimize_for_null_free || deoptimize_for_flat) {
      // At compile time we assumed the field wasn't volatile/atomic but after
      // loading it turns out it was volatile/atomic so we have to throw the
      // compiled code out and let it be regenerated.
      if (TracePatching) {
        if (deoptimize_for_volatile) {
          tty->print_cr("Deoptimizing for patching volatile field reference");
        }
        if (deoptimize_for_atomic) {
          tty->print_cr("Deoptimizing for patching atomic field reference");
        }
+       if (deoptimize_for_null_free) {
+         tty->print_cr("Deoptimizing for patching null-free field reference");
+       }
+       if (deoptimize_for_flat) {
+         tty->print_cr("Deoptimizing for patching flat field reference");
+       }
      }
  
      // It's possible the nmethod was invalidated in the last
      // safepoint, but if it's still alive then make it not_entrant.
      nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());

*** 1532,22 ***
--- 1684,30 ---
    tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
    tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
  
    tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
    tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
+   tty->print_cr(" _new_flat_array_slowcase_cnt:    %u", _new_flat_array_slowcase_cnt);
    tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
    tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
+   tty->print_cr(" _load_flat_array_slowcase_cnt:   %u", _load_flat_array_slowcase_cnt);
+   tty->print_cr(" _store_flat_array_slowcase_cnt:  %u", _store_flat_array_slowcase_cnt);
+   tty->print_cr(" _substitutability_check_slowcase_cnt: %u", _substitutability_check_slowcase_cnt);
+   tty->print_cr(" _buffer_inline_args_slowcase_cnt:%u", _buffer_inline_args_slowcase_cnt);
+   tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%u", _buffer_inline_args_no_receiver_slowcase_cnt);
+ 
    tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
    tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
    tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
  
    tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
    tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
    tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
    tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
    tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
    tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
+   tty->print_cr(" _throw_illegal_monitor_state_exception_count:  %u:", _throw_illegal_monitor_state_exception_count);
    tty->print_cr(" _throw_count:                                  %u:", _throw_count);
  
    SharedRuntime::print_ic_miss_histogram();
    tty->cr();
  }
< prev index next >