< prev index next > src/hotspot/share/runtime/deoptimization.cpp
Print this page
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/constantPool.hpp"
+ #include "oops/flatArrayKlass.hpp"
+ #include "oops/flatArrayOop.hpp"
#include "oops/fieldStreams.inline.hpp"
#include "oops/method.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
+ #include "oops/inlineKlass.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
#include "prims/jvmtiDeferredUpdates.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
// It is not guaranteed that we can get such information here only
// by analyzing bytecode in deoptimized frames. This is why this flag
// is set during method compilation (see Compile::Process_OopMap_Node()).
// If the previous frame was popped or if we are dispatching an exception,
// we don't have an oop result.
- bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
- Handle return_value;
+ ScopeDesc* scope = chunk->at(0)->scope();
+ bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
+ // In case of the return of multiple values, we must take care
+ // of all oop return values.
+ GrowableArray<Handle> return_oops;
+ InlineKlass* vk = nullptr;
+ if (save_oop_result && scope->return_scalarized()) {
+ vk = InlineKlass::returned_inline_klass(map);
+ if (vk != nullptr) {
+ vk->save_oop_fields(map, return_oops);
+ save_oop_result = false;
+ }
+ }
if (save_oop_result) {
// Reallocation may trigger GC. If deoptimization happened on return from
// call which returns oop we need to save it since it is not in oopmap.
oop result = deoptee.saved_oop_result(&map);
assert(oopDesc::is_oop_or_null(result), "must be oop");
- return_value = Handle(thread, result);
+ return_oops.push(Handle(thread, result));
assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
if (TraceDeoptimization) {
tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
tty->cr();
}
}
- if (objects != nullptr) {
+ if (objects != nullptr || vk != nullptr) {
if (exec_mode == Deoptimization::Unpack_none) {
assert(thread->thread_state() == _thread_in_vm, "assumption");
JavaThread* THREAD = thread; // For exception macros.
// Clear pending OOM if reallocation fails and return true indicating allocation failure
- realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
+ if (vk != nullptr) {
+ realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
+ }
+ if (objects != nullptr) {
+ realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
+ bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
+ Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
+ }
deoptimized_objects = true;
} else {
JavaThread* current = thread; // For JRT_BLOCK
JRT_BLOCK
- realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
+ if (vk != nullptr) {
+ realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
+ }
+ if (objects != nullptr) {
+ realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
+ bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
+ Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
+ }
JRT_END
}
- bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
- Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
- if (TraceDeoptimization) {
+ if (TraceDeoptimization && objects != nullptr) {
print_objects(deoptee_thread, objects, realloc_failures);
}
}
- if (save_oop_result) {
+ if (save_oop_result || vk != nullptr) {
// Restore result.
- deoptee.set_saved_oop_result(&map, return_value());
+ assert(return_oops.length() == 1, "no inline type");
+ deoptee.set_saved_oop_result(&map, return_oops.pop()());
}
return realloc_failures;
}
static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
// non-parameter locals of the first unpacked interpreted frame.
// Compute that adjustment.
caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
}
- // If the sender is deoptimized the we must retrieve the address of the handler
+ // If the sender is deoptimized we must retrieve the address of the handler
// since the frame will "magically" show the original pc before the deopt
// and we'd undo the deopt.
frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
bool failures = false;
for (int i = 0; i < objects->length(); i++) {
assert(objects->at(i)->is_object(), "invalid debug information");
ObjectValue* sv = (ObjectValue*) objects->at(i);
-
Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
- oop obj = nullptr;
+ // Check if the object may be null and has an additional is_init input that needs
+ // to be checked before using the field values. Skip re-allocation if it is null.
+ if (sv->maybe_null()) {
+ assert(k->is_inline_klass(), "must be an inline klass");
+ jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
+ if (is_init == 0) {
+ continue;
+ }
+ }
+
+ oop obj = nullptr;
bool cache_init_error = false;
if (k->is_instance_klass()) {
#if INCLUDE_JVMCI
nmethod* nm = fr->cb()->as_nmethod_or_null();
if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
} else {
obj = ik->allocate_instance(THREAD);
}
}
+ } else if (k->is_flatArray_klass()) {
+ FlatArrayKlass* ak = FlatArrayKlass::cast(k);
+ // Inline type array must be zeroed because not all memory is reassigned
+ obj = ak->allocate(sv->field_size(), THREAD);
} else if (k->is_typeArray_klass()) {
TypeArrayKlass* ak = TypeArrayKlass::cast(k);
assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
int len = sv->field_size() / type2size[ak->element_type()];
InternalOOMEMark iom(THREAD);
}
return failures;
}
+ // We're deoptimizing at the return of a call, inline type fields are
+ // in registers. When we go back to the interpreter, it will expect a
+ // reference to an inline type instance. Allocate and initialize it from
+ // the register values here.
+ bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
+ oop new_vt = vk->realloc_result(map, return_oops, THREAD);
+ if (new_vt == nullptr) {
+ CLEAR_PENDING_EXCEPTION;
+ THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
+ }
+ return_oops.clear();
+ return_oops.push(Handle(THREAD, new_vt));
+ return false;
+ }
+
#if INCLUDE_JVMCI
/**
* For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
* we need to somehow be able to recover the actual kind to be able to write the correct
* amount of bytes.
class ReassignedField {
public:
int _offset;
BasicType _type;
+ InstanceKlass* _klass;
+ bool _is_flat;
public:
- ReassignedField() {
- _offset = 0;
- _type = T_ILLEGAL;
- }
+ ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false) { }
};
static int compare(ReassignedField* left, ReassignedField* right) {
return left->_offset - right->_offset;
}
// Restore fields of an eliminated instance object using the same field order
// returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
- static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
+ static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
InstanceKlass* ik = klass;
while (ik != nullptr) {
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
ReassignedField field;
field._offset = fs.offset();
field._type = Signature::basic_type(fs.signature());
+ if (fs.is_null_free_inline_type()) {
+ if (fs.is_flat()) {
+ field._is_flat = true;
+ // Resolve klass of flat inline type field
+ field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
+ } else {
+ field._type = T_OBJECT; // Can be removed once Q-descriptors have been removed.
+ }
+ }
fields->append(field);
}
}
ik = ik->superklass();
}
fields->sort(compare);
for (int i = 0; i < fields->length(); i++) {
+ BasicType type = fields->at(i)._type;
+ int offset = base_offset + fields->at(i)._offset;
+ // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
+ if (fields->at(i)._is_flat) {
+ // Recursively re-assign flat inline type fields
+ InstanceKlass* vk = fields->at(i)._klass;
+ assert(vk != nullptr, "must be resolved");
+ offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
+ svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
+ continue; // Continue because we don't need to increment svIndex
+ }
ScopeValue* scope_field = sv->field_at(svIndex);
StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
- int offset = fields->at(i)._offset;
- BasicType type = fields->at(i)._type;
switch (type) {
- case T_OBJECT: case T_ARRAY:
+ case T_OBJECT:
+ case T_ARRAY:
assert(value->type() == T_OBJECT, "Agreement.");
obj->obj_field_put(offset, value->get_obj()());
break;
case T_INT: case T_FLOAT: { // 4 bytes.
svIndex++;
}
return svIndex;
}
+ // restore fields of an eliminated inline type array
+ void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
+ InlineKlass* vk = vak->element_klass();
+ assert(vk->flat_array(), "should only be used for flat inline type arrays");
+ // Adjust offset to omit oop header
+ int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
+ // Initialize all elements of the flat inline type array
+ for (int i = 0; i < sv->field_size(); i++) {
+ ScopeValue* val = sv->field_at(i);
+ int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
+ reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
+ }
+ }
+
// restore fields of all eliminated objects and arrays
- void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
+ void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
for (int i = 0; i < objects->length(); i++) {
assert(objects->at(i)->is_object(), "invalid debug information");
ObjectValue* sv = (ObjectValue*) objects->at(i);
Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
Handle obj = sv->value();
- assert(obj.not_null() || realloc_failures, "reallocation was missed");
+ assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
#ifndef PRODUCT
if (PrintDeoptimizationDetails) {
tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
}
#endif // !PRODUCT
// Else fall-through to do assignment for scalar-replaced boxed vector representation
// which could be restored after vector object allocation.
}
if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
- reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
+ reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
+ } else if (k->is_flatArray_klass()) {
+ FlatArrayKlass* vak = FlatArrayKlass::cast(k);
+ reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
} else if (k->is_typeArray_klass()) {
TypeArrayKlass* ak = TypeArrayKlass::cast(k);
reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
} else if (k->is_objArray_klass()) {
reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
// deopt the execution state and return to the interpreter.
fr.deoptimize(thread);
}
void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
- // Deoptimize only if the frame comes from compile code.
+ // Deoptimize only if the frame comes from compiled code.
// Do not deoptimize the frame which is already patched
// during the execution of the loops below.
if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
return;
}
< prev index next >