32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/bytecodeStream.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/oopMapCache.hpp"
42 #include "jvm.h"
43 #include "logging/log.hpp"
44 #include "logging/logLevel.hpp"
45 #include "logging/logMessage.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/constantPool.hpp"
52 #include "oops/fieldStreams.inline.hpp"
53 #include "oops/method.hpp"
54 #include "oops/objArrayKlass.hpp"
55 #include "oops/objArrayOop.inline.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/typeArrayOop.inline.hpp"
58 #include "oops/verifyOopClosure.hpp"
59 #include "prims/jvmtiDeferredUpdates.hpp"
60 #include "prims/jvmtiExport.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "prims/methodHandles.hpp"
63 #include "prims/vectorSupport.hpp"
64 #include "runtime/atomic.hpp"
65 #include "runtime/continuation.hpp"
66 #include "runtime/continuationEntry.inline.hpp"
67 #include "runtime/deoptimization.hpp"
68 #include "runtime/escapeBarrier.hpp"
69 #include "runtime/fieldDescriptor.hpp"
70 #include "runtime/fieldDescriptor.inline.hpp"
71 #include "runtime/frame.inline.hpp"
72 #include "runtime/handles.inline.hpp"
73 #include "runtime/interfaceSupport.inline.hpp"
74 #include "runtime/javaThread.hpp"
75 #include "runtime/jniHandles.inline.hpp"
76 #include "runtime/keepStackGCProcessed.hpp"
289
290 return fetch_unroll_info_helper(current, exec_mode);
291 JRT_END
292
293 #if COMPILER2_OR_JVMCI
294 // print information about reallocated objects
295 static void print_objects(JavaThread* deoptee_thread,
296 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
297 ResourceMark rm;
298 stringStream st; // change to logStream with logging
299 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
300 fieldDescriptor fd;
301
302 for (int i = 0; i < objects->length(); i++) {
303 ObjectValue* sv = (ObjectValue*) objects->at(i);
304 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
305 Handle obj = sv->value();
306
307 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
308 k->print_value_on(&st);
309 assert(obj.not_null() || realloc_failures, "reallocation was missed");
310 if (obj.is_null()) {
311 st.print(" allocation failed");
312 } else {
313 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
314 }
315 st.cr();
316
317 if (Verbose && !obj.is_null()) {
318 k->oop_print_on(obj(), &st);
319 }
320 }
321 tty->print_raw(st.freeze());
322 }
323
324 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
325 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
326 bool& deoptimized_objects) {
327 bool realloc_failures = false;
328 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
329
330 JavaThread* deoptee_thread = chunk->at(0)->thread();
331 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
332 "a frame can only be deoptimized by the owner thread");
333
334 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
335
336 // The flag return_oop() indicates call sites which return oop
337 // in compiled code. Such sites include java method calls,
338 // runtime calls (for example, used to allocate new objects/arrays
339 // on slow code path) and any other calls generated in compiled code.
340 // It is not guaranteed that we can get such information here only
341 // by analyzing bytecode in deoptimized frames. This is why this flag
342 // is set during method compilation (see Compile::Process_OopMap_Node()).
343 // If the previous frame was popped or if we are dispatching an exception,
344 // we don't have an oop result.
345 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
346 Handle return_value;
347 if (save_oop_result) {
348 // Reallocation may trigger GC. If deoptimization happened on return from
349 // call which returns oop we need to save it since it is not in oopmap.
350 oop result = deoptee.saved_oop_result(&map);
351 assert(oopDesc::is_oop_or_null(result), "must be oop");
352 return_value = Handle(thread, result);
353 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
354 if (TraceDeoptimization) {
355 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
356 tty->cr();
357 }
358 }
359 if (objects != nullptr) {
360 if (exec_mode == Deoptimization::Unpack_none) {
361 assert(thread->thread_state() == _thread_in_vm, "assumption");
362 JavaThread* THREAD = thread; // For exception macros.
363 // Clear pending OOM if reallocation fails and return true indicating allocation failure
364 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
365 deoptimized_objects = true;
366 } else {
367 JavaThread* current = thread; // For JRT_BLOCK
368 JRT_BLOCK
369 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
370 JRT_END
371 }
372 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
373 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
374 if (TraceDeoptimization) {
375 print_objects(deoptee_thread, objects, realloc_failures);
376 }
377 }
378 if (save_oop_result) {
379 // Restore result.
380 deoptee.set_saved_oop_result(&map, return_value());
381 }
382 return realloc_failures;
383 }
384
385 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
386 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
387 JavaThread* deoptee_thread = chunk->at(0)->thread();
388 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
389 assert(thread == Thread::current(), "should be");
390 HandleMark hm(thread);
391 #ifndef PRODUCT
392 bool first = true;
393 #endif // !PRODUCT
394 for (int i = 0; i < chunk->length(); i++) {
395 compiledVFrame* cvf = chunk->at(i);
396 assert (cvf->scope() != nullptr,"expect only compiled java frames");
397 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
398 if (monitors->is_nonempty()) {
399 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
400 exec_mode, realloc_failures);
695 // its caller's stack by. If the caller is a compiled frame then
696 // we pretend that the callee has no parameters so that the
697 // extension counts for the full amount of locals and not just
698 // locals-parms. This is because without a c2i adapter the parm
699 // area as created by the compiled frame will not be usable by
700 // the interpreter. (Depending on the calling convention there
701 // may not even be enough space).
702
703 // QQQ I'd rather see this pushed down into last_frame_adjust
704 // and have it take the sender (aka caller).
705
706 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
707 caller_adjustment = last_frame_adjust(0, callee_locals);
708 } else if (callee_locals > callee_parameters) {
709 // The caller frame may need extending to accommodate
710 // non-parameter locals of the first unpacked interpreted frame.
711 // Compute that adjustment.
712 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
713 }
714
715 // If the sender is deoptimized the we must retrieve the address of the handler
716 // since the frame will "magically" show the original pc before the deopt
717 // and we'd undo the deopt.
718
719 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
720 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
721 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
722 }
723
724 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
725
726 #if INCLUDE_JVMCI
727 if (exceptionObject() != nullptr) {
728 current->set_exception_oop(exceptionObject());
729 exec_mode = Unpack_exception;
730 }
731 #endif
732
733 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
734 assert(current->has_pending_exception(), "should have thrown OOME");
735 current->set_exception_oop(current->pending_exception());
1195 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1196 default:;
1197 }
1198 }
1199 return nullptr;
1200 }
1201 #endif // INCLUDE_JVMCI
1202
1203 #if COMPILER2_OR_JVMCI
1204 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1205 Handle pending_exception(THREAD, thread->pending_exception());
1206 const char* exception_file = thread->exception_file();
1207 int exception_line = thread->exception_line();
1208 thread->clear_pending_exception();
1209
1210 bool failures = false;
1211
1212 for (int i = 0; i < objects->length(); i++) {
1213 assert(objects->at(i)->is_object(), "invalid debug information");
1214 ObjectValue* sv = (ObjectValue*) objects->at(i);
1215
1216 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1217 oop obj = nullptr;
1218
1219 bool cache_init_error = false;
1220 if (k->is_instance_klass()) {
1221 #if INCLUDE_JVMCI
1222 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1223 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1224 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1225 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1226 if (obj != nullptr) {
1227 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1228 abv->set_cached(true);
1229 } else if (cache_init_error) {
1230 // Results in an OOME which is valid (as opposed to a class initialization error)
1231 // and is fine for the rare case a cache initialization failing.
1232 failures = true;
1233 }
1234 }
1235 #endif // INCLUDE_JVMCI
1236
1237 InstanceKlass* ik = InstanceKlass::cast(k);
1238 if (obj == nullptr && !cache_init_error) {
1239 #if COMPILER2_OR_JVMCI
1240 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1241 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1242 } else {
1243 obj = ik->allocate_instance(THREAD);
1244 }
1245 #else
1246 obj = ik->allocate_instance(THREAD);
1247 #endif // COMPILER2_OR_JVMCI
1248 }
1249 } else if (k->is_typeArray_klass()) {
1250 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1251 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1252 int len = sv->field_size() / type2size[ak->element_type()];
1253 obj = ak->allocate(len, THREAD);
1254 } else if (k->is_objArray_klass()) {
1255 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1256 obj = ak->allocate(sv->field_size(), THREAD);
1257 }
1258
1259 if (obj == nullptr) {
1260 failures = true;
1261 }
1262
1263 assert(sv->value().is_null(), "redundant reallocation");
1264 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1265 CLEAR_PENDING_EXCEPTION;
1266 sv->set_value(obj);
1267 }
1268
1269 if (failures) {
1270 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1271 } else if (pending_exception.not_null()) {
1272 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1273 }
1274
1275 return failures;
1276 }
1277
1278 #if INCLUDE_JVMCI
1279 /**
1280 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1281 * we need to somehow be able to recover the actual kind to be able to write the correct
1282 * amount of bytes.
1283 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1284 * the entries at index n + 1 to n + i are 'markers'.
1285 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1286 * expected form of the array would be:
1287 *
1288 * {b0, b1, b2, b3, INT, marker, b6, b7}
1289 *
1290 * Thus, in order to get back the size of the entry, we simply need to count the number
1291 * of marked entries
1292 *
1293 * @param virtualArray the virtualized byte array
1294 * @param i index of the virtual entry we are recovering
1295 * @return The number of bytes the entry spans
1296 */
1297 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1423 default:
1424 ShouldNotReachHere();
1425 }
1426 index++;
1427 }
1428 }
1429
1430 // restore fields of an eliminated object array
1431 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1432 for (int i = 0; i < sv->field_size(); i++) {
1433 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1434 assert(value->type() == T_OBJECT, "object element expected");
1435 obj->obj_at_put(i, value->get_obj()());
1436 }
1437 }
1438
1439 class ReassignedField {
1440 public:
1441 int _offset;
1442 BasicType _type;
1443 public:
1444 ReassignedField() {
1445 _offset = 0;
1446 _type = T_ILLEGAL;
1447 }
1448 };
1449
1450 int compare(ReassignedField* left, ReassignedField* right) {
1451 return left->_offset - right->_offset;
1452 }
1453
1454 // Restore fields of an eliminated instance object using the same field order
1455 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1456 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1457 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1458 InstanceKlass* ik = klass;
1459 while (ik != nullptr) {
1460 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1461 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1462 ReassignedField field;
1463 field._offset = fs.offset();
1464 field._type = Signature::basic_type(fs.signature());
1465 fields->append(field);
1466 }
1467 }
1468 ik = ik->superklass();
1469 }
1470 fields->sort(compare);
1471 for (int i = 0; i < fields->length(); i++) {
1472 ScopeValue* scope_field = sv->field_at(svIndex);
1473 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1474 int offset = fields->at(i)._offset;
1475 BasicType type = fields->at(i)._type;
1476 switch (type) {
1477 case T_OBJECT: case T_ARRAY:
1478 assert(value->type() == T_OBJECT, "Agreement.");
1479 obj->obj_field_put(offset, value->get_obj()());
1480 break;
1481
1482 case T_INT: case T_FLOAT: { // 4 bytes.
1483 assert(value->type() == T_INT, "Agreement.");
1484 bool big_value = false;
1485 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1486 if (scope_field->is_location()) {
1487 Location::Type type = ((LocationValue*) scope_field)->location().type();
1488 if (type == Location::dbl || type == Location::lng) {
1489 big_value = true;
1490 }
1491 }
1492 if (scope_field->is_constant_int()) {
1493 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1494 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1495 big_value = true;
1496 }
1497 }
1531 break;
1532
1533 case T_BYTE:
1534 assert(value->type() == T_INT, "Agreement.");
1535 obj->byte_field_put(offset, (jbyte)value->get_jint());
1536 break;
1537
1538 case T_BOOLEAN:
1539 assert(value->type() == T_INT, "Agreement.");
1540 obj->bool_field_put(offset, (jboolean)value->get_jint());
1541 break;
1542
1543 default:
1544 ShouldNotReachHere();
1545 }
1546 svIndex++;
1547 }
1548 return svIndex;
1549 }
1550
1551 // restore fields of all eliminated objects and arrays
1552 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1553 for (int i = 0; i < objects->length(); i++) {
1554 assert(objects->at(i)->is_object(), "invalid debug information");
1555 ObjectValue* sv = (ObjectValue*) objects->at(i);
1556 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1557 Handle obj = sv->value();
1558 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1559 #ifndef PRODUCT
1560 if (PrintDeoptimizationDetails) {
1561 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1562 }
1563 #endif // !PRODUCT
1564
1565 if (obj.is_null()) {
1566 continue;
1567 }
1568
1569 #if INCLUDE_JVMCI
1570 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1571 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1572 continue;
1573 }
1574 #endif // INCLUDE_JVMCI
1575 #if COMPILER2_OR_JVMCI
1576 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1577 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1578 ScopeValue* payload = sv->field_at(0);
1579 if (payload->is_location() &&
1580 payload->as_LocationValue()->location().type() == Location::vector) {
1581 #ifndef PRODUCT
1582 if (PrintDeoptimizationDetails) {
1583 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1584 if (Verbose) {
1585 Handle obj = sv->value();
1586 k->oop_print_on(obj(), tty);
1587 }
1588 }
1589 #endif // !PRODUCT
1590 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1591 }
1592 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1593 // which could be restored after vector object allocation.
1594 }
1595 #endif /* !COMPILER2_OR_JVMCI */
1596 if (k->is_instance_klass()) {
1597 InstanceKlass* ik = InstanceKlass::cast(k);
1598 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1599 } else if (k->is_typeArray_klass()) {
1600 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1601 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1602 } else if (k->is_objArray_klass()) {
1603 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1604 }
1605 }
1606 }
1607
1608
1609 // relock objects for which synchronization was eliminated
1610 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1611 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1612 bool relocked_objects = false;
1613 for (int i = 0; i < monitors->length(); i++) {
1614 MonitorInfo* mon_info = monitors->at(i);
1615 if (mon_info->eliminated()) {
1616 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1617 relocked_objects = true;
1618 if (!mon_info->owner_is_scalar_replaced()) {
1748 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1749 cm->log_identity(xtty);
1750 xtty->end_head();
1751 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1752 xtty->begin_elem("jvms bci='%d'", sd->bci());
1753 xtty->method(sd->method());
1754 xtty->end_elem();
1755 if (sd->is_top()) break;
1756 }
1757 xtty->tail("deoptimized");
1758 }
1759
1760 Continuation::notify_deopt(thread, fr.sp());
1761
1762 // Patch the compiled method so that when execution returns to it we will
1763 // deopt the execution state and return to the interpreter.
1764 fr.deoptimize(thread);
1765 }
1766
1767 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1768 // Deoptimize only if the frame comes from compile code.
1769 // Do not deoptimize the frame which is already patched
1770 // during the execution of the loops below.
1771 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1772 return;
1773 }
1774 ResourceMark rm;
1775 deoptimize_single_frame(thread, fr, reason);
1776 }
1777
1778 #if INCLUDE_JVMCI
1779 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1780 // there is no exception handler for this pc => deoptimize
1781 cm->make_not_entrant();
1782
1783 // Use Deoptimization::deoptimize for all of its side-effects:
1784 // gathering traps statistics, logging...
1785 // it also patches the return pc but we do not care about that
1786 // since we return a continuation to the deopt_blob below.
1787 JavaThread* thread = JavaThread::current();
1788 RegisterMap reg_map(thread,
|
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/bytecodeStream.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/oopMapCache.hpp"
42 #include "jvm.h"
43 #include "logging/log.hpp"
44 #include "logging/logLevel.hpp"
45 #include "logging/logMessage.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/constantPool.hpp"
52 #include "oops/flatArrayKlass.hpp"
53 #include "oops/flatArrayOop.hpp"
54 #include "oops/fieldStreams.inline.hpp"
55 #include "oops/method.hpp"
56 #include "oops/objArrayKlass.hpp"
57 #include "oops/objArrayOop.inline.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "oops/inlineKlass.inline.hpp"
60 #include "oops/typeArrayOop.inline.hpp"
61 #include "oops/verifyOopClosure.hpp"
62 #include "prims/jvmtiDeferredUpdates.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "prims/jvmtiThreadState.hpp"
65 #include "prims/methodHandles.hpp"
66 #include "prims/vectorSupport.hpp"
67 #include "runtime/atomic.hpp"
68 #include "runtime/continuation.hpp"
69 #include "runtime/continuationEntry.inline.hpp"
70 #include "runtime/deoptimization.hpp"
71 #include "runtime/escapeBarrier.hpp"
72 #include "runtime/fieldDescriptor.hpp"
73 #include "runtime/fieldDescriptor.inline.hpp"
74 #include "runtime/frame.inline.hpp"
75 #include "runtime/handles.inline.hpp"
76 #include "runtime/interfaceSupport.inline.hpp"
77 #include "runtime/javaThread.hpp"
78 #include "runtime/jniHandles.inline.hpp"
79 #include "runtime/keepStackGCProcessed.hpp"
292
293 return fetch_unroll_info_helper(current, exec_mode);
294 JRT_END
295
296 #if COMPILER2_OR_JVMCI
297 // print information about reallocated objects
298 static void print_objects(JavaThread* deoptee_thread,
299 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
300 ResourceMark rm;
301 stringStream st; // change to logStream with logging
302 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
303 fieldDescriptor fd;
304
305 for (int i = 0; i < objects->length(); i++) {
306 ObjectValue* sv = (ObjectValue*) objects->at(i);
307 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
308 Handle obj = sv->value();
309
310 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
311 k->print_value_on(&st);
312 assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
313 if (obj.is_null()) {
314 if (k->is_inline_klass()) {
315 st.print(" is null");
316 } else {
317 st.print(" allocation failed");
318 }
319 } else {
320 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
321 }
322 st.cr();
323
324 if (Verbose && !obj.is_null()) {
325 k->oop_print_on(obj(), &st);
326 }
327 }
328 tty->print_raw(st.freeze());
329 }
330
331 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
332 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
333 bool& deoptimized_objects) {
334 bool realloc_failures = false;
335 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
336
337 JavaThread* deoptee_thread = chunk->at(0)->thread();
338 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
339 "a frame can only be deoptimized by the owner thread");
340
341 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
342
343 // The flag return_oop() indicates call sites which return oop
344 // in compiled code. Such sites include java method calls,
345 // runtime calls (for example, used to allocate new objects/arrays
346 // on slow code path) and any other calls generated in compiled code.
347 // It is not guaranteed that we can get such information here only
348 // by analyzing bytecode in deoptimized frames. This is why this flag
349 // is set during method compilation (see Compile::Process_OopMap_Node()).
350 // If the previous frame was popped or if we are dispatching an exception,
351 // we don't have an oop result.
352 ScopeDesc* scope = chunk->at(0)->scope();
353 bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
354 // In case of the return of multiple values, we must take care
355 // of all oop return values.
356 GrowableArray<Handle> return_oops;
357 InlineKlass* vk = nullptr;
358 if (save_oop_result && scope->return_scalarized()) {
359 vk = InlineKlass::returned_inline_klass(map);
360 if (vk != nullptr) {
361 vk->save_oop_fields(map, return_oops);
362 save_oop_result = false;
363 }
364 }
365 if (save_oop_result) {
366 // Reallocation may trigger GC. If deoptimization happened on return from
367 // call which returns oop we need to save it since it is not in oopmap.
368 oop result = deoptee.saved_oop_result(&map);
369 assert(oopDesc::is_oop_or_null(result), "must be oop");
370 return_oops.push(Handle(thread, result));
371 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
372 if (TraceDeoptimization) {
373 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
374 tty->cr();
375 }
376 }
377 if (objects != nullptr || vk != nullptr) {
378 if (exec_mode == Deoptimization::Unpack_none) {
379 assert(thread->thread_state() == _thread_in_vm, "assumption");
380 JavaThread* THREAD = thread; // For exception macros.
381 // Clear pending OOM if reallocation fails and return true indicating allocation failure
382 if (vk != nullptr) {
383 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
384 }
385 if (objects != nullptr) {
386 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
387 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
388 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
389 }
390 deoptimized_objects = true;
391 } else {
392 JavaThread* current = thread; // For JRT_BLOCK
393 JRT_BLOCK
394 if (vk != nullptr) {
395 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
396 }
397 if (objects != nullptr) {
398 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
399 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
400 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
401 }
402 JRT_END
403 }
404 if (TraceDeoptimization && objects != nullptr) {
405 print_objects(deoptee_thread, objects, realloc_failures);
406 }
407 }
408 if (save_oop_result || vk != nullptr) {
409 // Restore result.
410 assert(return_oops.length() == 1, "no inline type");
411 deoptee.set_saved_oop_result(&map, return_oops.pop()());
412 }
413 return realloc_failures;
414 }
415
416 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
417 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
418 JavaThread* deoptee_thread = chunk->at(0)->thread();
419 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
420 assert(thread == Thread::current(), "should be");
421 HandleMark hm(thread);
422 #ifndef PRODUCT
423 bool first = true;
424 #endif // !PRODUCT
425 for (int i = 0; i < chunk->length(); i++) {
426 compiledVFrame* cvf = chunk->at(i);
427 assert (cvf->scope() != nullptr,"expect only compiled java frames");
428 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
429 if (monitors->is_nonempty()) {
430 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
431 exec_mode, realloc_failures);
726 // its caller's stack by. If the caller is a compiled frame then
727 // we pretend that the callee has no parameters so that the
728 // extension counts for the full amount of locals and not just
729 // locals-parms. This is because without a c2i adapter the parm
730 // area as created by the compiled frame will not be usable by
731 // the interpreter. (Depending on the calling convention there
732 // may not even be enough space).
733
734 // QQQ I'd rather see this pushed down into last_frame_adjust
735 // and have it take the sender (aka caller).
736
737 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
738 caller_adjustment = last_frame_adjust(0, callee_locals);
739 } else if (callee_locals > callee_parameters) {
740 // The caller frame may need extending to accommodate
741 // non-parameter locals of the first unpacked interpreted frame.
742 // Compute that adjustment.
743 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
744 }
745
746 // If the sender is deoptimized we must retrieve the address of the handler
747 // since the frame will "magically" show the original pc before the deopt
748 // and we'd undo the deopt.
749
750 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
751 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
752 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
753 }
754
755 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
756
757 #if INCLUDE_JVMCI
758 if (exceptionObject() != nullptr) {
759 current->set_exception_oop(exceptionObject());
760 exec_mode = Unpack_exception;
761 }
762 #endif
763
764 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
765 assert(current->has_pending_exception(), "should have thrown OOME");
766 current->set_exception_oop(current->pending_exception());
1226 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1227 default:;
1228 }
1229 }
1230 return nullptr;
1231 }
1232 #endif // INCLUDE_JVMCI
1233
1234 #if COMPILER2_OR_JVMCI
1235 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1236 Handle pending_exception(THREAD, thread->pending_exception());
1237 const char* exception_file = thread->exception_file();
1238 int exception_line = thread->exception_line();
1239 thread->clear_pending_exception();
1240
1241 bool failures = false;
1242
1243 for (int i = 0; i < objects->length(); i++) {
1244 assert(objects->at(i)->is_object(), "invalid debug information");
1245 ObjectValue* sv = (ObjectValue*) objects->at(i);
1246 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1247
1248 // Check if the object may be null and has an additional is_init input that needs
1249 // to be checked before using the field values. Skip re-allocation if it is null.
1250 if (sv->maybe_null()) {
1251 assert(k->is_inline_klass(), "must be an inline klass");
1252 jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
1253 if (is_init == 0) {
1254 continue;
1255 }
1256 }
1257
1258 oop obj = nullptr;
1259 bool cache_init_error = false;
1260 if (k->is_instance_klass()) {
1261 #if INCLUDE_JVMCI
1262 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1263 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1264 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1265 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1266 if (obj != nullptr) {
1267 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1268 abv->set_cached(true);
1269 } else if (cache_init_error) {
1270 // Results in an OOME which is valid (as opposed to a class initialization error)
1271 // and is fine for the rare case a cache initialization failing.
1272 failures = true;
1273 }
1274 }
1275 #endif // INCLUDE_JVMCI
1276
1277 InstanceKlass* ik = InstanceKlass::cast(k);
1278 if (obj == nullptr && !cache_init_error) {
1279 #if COMPILER2_OR_JVMCI
1280 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1281 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1282 } else {
1283 obj = ik->allocate_instance(THREAD);
1284 }
1285 #else
1286 obj = ik->allocate_instance(THREAD);
1287 #endif // COMPILER2_OR_JVMCI
1288 }
1289 } else if (k->is_flatArray_klass()) {
1290 FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1291 // Inline type array must be zeroed because not all memory is reassigned
1292 obj = ak->allocate(sv->field_size(), THREAD);
1293 } else if (k->is_typeArray_klass()) {
1294 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1295 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1296 int len = sv->field_size() / type2size[ak->element_type()];
1297 obj = ak->allocate(len, THREAD);
1298 } else if (k->is_objArray_klass()) {
1299 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1300 obj = ak->allocate(sv->field_size(), THREAD);
1301 }
1302
1303 if (obj == nullptr) {
1304 failures = true;
1305 }
1306
1307 assert(sv->value().is_null(), "redundant reallocation");
1308 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1309 CLEAR_PENDING_EXCEPTION;
1310 sv->set_value(obj);
1311 }
1312
1313 if (failures) {
1314 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1315 } else if (pending_exception.not_null()) {
1316 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1317 }
1318
1319 return failures;
1320 }
1321
1322 // We're deoptimizing at the return of a call, inline type fields are
1323 // in registers. When we go back to the interpreter, it will expect a
1324 // reference to an inline type instance. Allocate and initialize it from
1325 // the register values here.
1326 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1327 oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1328 if (new_vt == nullptr) {
1329 CLEAR_PENDING_EXCEPTION;
1330 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1331 }
1332 return_oops.clear();
1333 return_oops.push(Handle(THREAD, new_vt));
1334 return false;
1335 }
1336
1337 #if INCLUDE_JVMCI
1338 /**
1339 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1340 * we need to somehow be able to recover the actual kind to be able to write the correct
1341 * amount of bytes.
1342 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1343 * the entries at index n + 1 to n + i are 'markers'.
1344 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1345 * expected form of the array would be:
1346 *
1347 * {b0, b1, b2, b3, INT, marker, b6, b7}
1348 *
1349 * Thus, in order to get back the size of the entry, we simply need to count the number
1350 * of marked entries
1351 *
1352 * @param virtualArray the virtualized byte array
1353 * @param i index of the virtual entry we are recovering
1354 * @return The number of bytes the entry spans
1355 */
1356 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1482 default:
1483 ShouldNotReachHere();
1484 }
1485 index++;
1486 }
1487 }
1488
1489 // restore fields of an eliminated object array
1490 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1491 for (int i = 0; i < sv->field_size(); i++) {
1492 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1493 assert(value->type() == T_OBJECT, "object element expected");
1494 obj->obj_at_put(i, value->get_obj()());
1495 }
1496 }
1497
1498 class ReassignedField {
1499 public:
1500 int _offset;
1501 BasicType _type;
1502 InstanceKlass* _klass;
1503 bool _is_flat;
1504 public:
1505 ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false) { }
1506 };
1507
1508 int compare(ReassignedField* left, ReassignedField* right) {
1509 return left->_offset - right->_offset;
1510 }
1511
1512 // Restore fields of an eliminated instance object using the same field order
1513 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1514 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1515 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1516 InstanceKlass* ik = klass;
1517 while (ik != nullptr) {
1518 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1519 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1520 ReassignedField field;
1521 field._offset = fs.offset();
1522 field._type = Signature::basic_type(fs.signature());
1523 if (fs.is_null_free_inline_type()) {
1524 if (fs.is_flat()) {
1525 field._is_flat = true;
1526 // Resolve klass of flat inline type field
1527 field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1528 } else {
1529 field._type = T_OBJECT; // Can be removed once Q-descriptors have been removed.
1530 }
1531 }
1532 fields->append(field);
1533 }
1534 }
1535 ik = ik->superklass();
1536 }
1537 fields->sort(compare);
1538 for (int i = 0; i < fields->length(); i++) {
1539 BasicType type = fields->at(i)._type;
1540 int offset = base_offset + fields->at(i)._offset;
1541 // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1542 if (fields->at(i)._is_flat) {
1543 // Recursively re-assign flat inline type fields
1544 InstanceKlass* vk = fields->at(i)._klass;
1545 assert(vk != nullptr, "must be resolved");
1546 offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1547 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1548 continue; // Continue because we don't need to increment svIndex
1549 }
1550 ScopeValue* scope_field = sv->field_at(svIndex);
1551 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1552 switch (type) {
1553 case T_OBJECT:
1554 case T_ARRAY:
1555 assert(value->type() == T_OBJECT, "Agreement.");
1556 obj->obj_field_put(offset, value->get_obj()());
1557 break;
1558
1559 case T_INT: case T_FLOAT: { // 4 bytes.
1560 assert(value->type() == T_INT, "Agreement.");
1561 bool big_value = false;
1562 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1563 if (scope_field->is_location()) {
1564 Location::Type type = ((LocationValue*) scope_field)->location().type();
1565 if (type == Location::dbl || type == Location::lng) {
1566 big_value = true;
1567 }
1568 }
1569 if (scope_field->is_constant_int()) {
1570 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1571 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1572 big_value = true;
1573 }
1574 }
1608 break;
1609
1610 case T_BYTE:
1611 assert(value->type() == T_INT, "Agreement.");
1612 obj->byte_field_put(offset, (jbyte)value->get_jint());
1613 break;
1614
1615 case T_BOOLEAN:
1616 assert(value->type() == T_INT, "Agreement.");
1617 obj->bool_field_put(offset, (jboolean)value->get_jint());
1618 break;
1619
1620 default:
1621 ShouldNotReachHere();
1622 }
1623 svIndex++;
1624 }
1625 return svIndex;
1626 }
1627
1628 // restore fields of an eliminated inline type array
1629 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1630 InlineKlass* vk = vak->element_klass();
1631 assert(vk->flat_array(), "should only be used for flat inline type arrays");
1632 // Adjust offset to omit oop header
1633 int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1634 // Initialize all elements of the flat inline type array
1635 for (int i = 0; i < sv->field_size(); i++) {
1636 ScopeValue* val = sv->field_at(i);
1637 int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1638 reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1639 }
1640 }
1641
1642 // restore fields of all eliminated objects and arrays
1643 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1644 for (int i = 0; i < objects->length(); i++) {
1645 assert(objects->at(i)->is_object(), "invalid debug information");
1646 ObjectValue* sv = (ObjectValue*) objects->at(i);
1647 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1648 Handle obj = sv->value();
1649 assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1650 #ifndef PRODUCT
1651 if (PrintDeoptimizationDetails) {
1652 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1653 }
1654 #endif // !PRODUCT
1655
1656 if (obj.is_null()) {
1657 continue;
1658 }
1659
1660 #if INCLUDE_JVMCI
1661 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1662 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1663 continue;
1664 }
1665 #endif // INCLUDE_JVMCI
1666 #if COMPILER2_OR_JVMCI
1667 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1668 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1669 ScopeValue* payload = sv->field_at(0);
1670 if (payload->is_location() &&
1671 payload->as_LocationValue()->location().type() == Location::vector) {
1672 #ifndef PRODUCT
1673 if (PrintDeoptimizationDetails) {
1674 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1675 if (Verbose) {
1676 Handle obj = sv->value();
1677 k->oop_print_on(obj(), tty);
1678 }
1679 }
1680 #endif // !PRODUCT
1681 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1682 }
1683 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1684 // which could be restored after vector object allocation.
1685 }
1686 #endif /* !COMPILER2_OR_JVMCI */
1687 if (k->is_instance_klass()) {
1688 InstanceKlass* ik = InstanceKlass::cast(k);
1689 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1690 } else if (k->is_flatArray_klass()) {
1691 FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1692 reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1693 } else if (k->is_typeArray_klass()) {
1694 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1695 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1696 } else if (k->is_objArray_klass()) {
1697 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1698 }
1699 }
1700 }
1701
1702
1703 // relock objects for which synchronization was eliminated
1704 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1705 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1706 bool relocked_objects = false;
1707 for (int i = 0; i < monitors->length(); i++) {
1708 MonitorInfo* mon_info = monitors->at(i);
1709 if (mon_info->eliminated()) {
1710 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1711 relocked_objects = true;
1712 if (!mon_info->owner_is_scalar_replaced()) {
1842 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1843 cm->log_identity(xtty);
1844 xtty->end_head();
1845 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1846 xtty->begin_elem("jvms bci='%d'", sd->bci());
1847 xtty->method(sd->method());
1848 xtty->end_elem();
1849 if (sd->is_top()) break;
1850 }
1851 xtty->tail("deoptimized");
1852 }
1853
1854 Continuation::notify_deopt(thread, fr.sp());
1855
1856 // Patch the compiled method so that when execution returns to it we will
1857 // deopt the execution state and return to the interpreter.
1858 fr.deoptimize(thread);
1859 }
1860
1861 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1862 // Deoptimize only if the frame comes from compiled code.
1863 // Do not deoptimize the frame which is already patched
1864 // during the execution of the loops below.
1865 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1866 return;
1867 }
1868 ResourceMark rm;
1869 deoptimize_single_frame(thread, fr, reason);
1870 }
1871
1872 #if INCLUDE_JVMCI
1873 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1874 // there is no exception handler for this pc => deoptimize
1875 cm->make_not_entrant();
1876
1877 // Use Deoptimization::deoptimize for all of its side-effects:
1878 // gathering traps statistics, logging...
1879 // it also patches the return pc but we do not care about that
1880 // since we return a continuation to the deopt_blob below.
1881 JavaThread* thread = JavaThread::current();
1882 RegisterMap reg_map(thread,
|