32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/bytecodeStream.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/oopMapCache.hpp"
42 #include "jvm.h"
43 #include "logging/log.hpp"
44 #include "logging/logLevel.hpp"
45 #include "logging/logMessage.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/constantPool.hpp"
52 #include "oops/fieldStreams.inline.hpp"
53 #include "oops/method.hpp"
54 #include "oops/objArrayKlass.hpp"
55 #include "oops/objArrayOop.inline.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/typeArrayOop.inline.hpp"
58 #include "oops/verifyOopClosure.hpp"
59 #include "prims/jvmtiDeferredUpdates.hpp"
60 #include "prims/jvmtiExport.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "prims/methodHandles.hpp"
63 #include "prims/vectorSupport.hpp"
64 #include "runtime/atomic.hpp"
65 #include "runtime/continuation.hpp"
66 #include "runtime/continuationEntry.inline.hpp"
67 #include "runtime/deoptimization.hpp"
68 #include "runtime/escapeBarrier.hpp"
69 #include "runtime/fieldDescriptor.hpp"
70 #include "runtime/fieldDescriptor.inline.hpp"
71 #include "runtime/frame.inline.hpp"
72 #include "runtime/handles.inline.hpp"
73 #include "runtime/interfaceSupport.inline.hpp"
74 #include "runtime/javaThread.hpp"
75 #include "runtime/jniHandles.inline.hpp"
76 #include "runtime/keepStackGCProcessed.hpp"
288
289 return fetch_unroll_info_helper(current, exec_mode);
290 JRT_END
291
292 #if COMPILER2_OR_JVMCI
293 // print information about reallocated objects
294 static void print_objects(JavaThread* deoptee_thread,
295 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
296 ResourceMark rm;
297 stringStream st; // change to logStream with logging
298 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
299 fieldDescriptor fd;
300
301 for (int i = 0; i < objects->length(); i++) {
302 ObjectValue* sv = (ObjectValue*) objects->at(i);
303 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
304 Handle obj = sv->value();
305
306 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
307 k->print_value_on(&st);
308 assert(obj.not_null() || realloc_failures, "reallocation was missed");
309 if (obj.is_null()) {
310 st.print(" allocation failed");
311 } else {
312 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
313 }
314 st.cr();
315
316 if (Verbose && !obj.is_null()) {
317 k->oop_print_on(obj(), &st);
318 }
319 }
320 tty->print_raw(st.freeze());
321 }
322
323 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
324 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
325 bool& deoptimized_objects) {
326 bool realloc_failures = false;
327 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
328
329 JavaThread* deoptee_thread = chunk->at(0)->thread();
330 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
331 "a frame can only be deoptimized by the owner thread");
332
333 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
334
335 // The flag return_oop() indicates call sites which return oop
336 // in compiled code. Such sites include java method calls,
337 // runtime calls (for example, used to allocate new objects/arrays
338 // on slow code path) and any other calls generated in compiled code.
339 // It is not guaranteed that we can get such information here only
340 // by analyzing bytecode in deoptimized frames. This is why this flag
341 // is set during method compilation (see Compile::Process_OopMap_Node()).
342 // If the previous frame was popped or if we are dispatching an exception,
343 // we don't have an oop result.
344 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
345 Handle return_value;
346 if (save_oop_result) {
347 // Reallocation may trigger GC. If deoptimization happened on return from
348 // call which returns oop we need to save it since it is not in oopmap.
349 oop result = deoptee.saved_oop_result(&map);
350 assert(oopDesc::is_oop_or_null(result), "must be oop");
351 return_value = Handle(thread, result);
352 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
353 if (TraceDeoptimization) {
354 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
355 tty->cr();
356 }
357 }
358 if (objects != nullptr) {
359 if (exec_mode == Deoptimization::Unpack_none) {
360 assert(thread->thread_state() == _thread_in_vm, "assumption");
361 JavaThread* THREAD = thread; // For exception macros.
362 // Clear pending OOM if reallocation fails and return true indicating allocation failure
363 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
364 deoptimized_objects = true;
365 } else {
366 JavaThread* current = thread; // For JRT_BLOCK
367 JRT_BLOCK
368 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
369 JRT_END
370 }
371 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
372 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
373 if (TraceDeoptimization) {
374 print_objects(deoptee_thread, objects, realloc_failures);
375 }
376 }
377 if (save_oop_result) {
378 // Restore result.
379 deoptee.set_saved_oop_result(&map, return_value());
380 }
381 return realloc_failures;
382 }
383
384 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
385 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
386 JavaThread* deoptee_thread = chunk->at(0)->thread();
387 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
388 assert(thread == Thread::current(), "should be");
389 HandleMark hm(thread);
390 #ifndef PRODUCT
391 bool first = true;
392 #endif // !PRODUCT
393 for (int i = 0; i < chunk->length(); i++) {
394 compiledVFrame* cvf = chunk->at(i);
395 assert (cvf->scope() != nullptr,"expect only compiled java frames");
396 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
397 if (monitors->is_nonempty()) {
398 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
399 exec_mode, realloc_failures);
694 // its caller's stack by. If the caller is a compiled frame then
695 // we pretend that the callee has no parameters so that the
696 // extension counts for the full amount of locals and not just
697 // locals-parms. This is because without a c2i adapter the parm
698 // area as created by the compiled frame will not be usable by
699 // the interpreter. (Depending on the calling convention there
700 // may not even be enough space).
701
702 // QQQ I'd rather see this pushed down into last_frame_adjust
703 // and have it take the sender (aka caller).
704
705 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
706 caller_adjustment = last_frame_adjust(0, callee_locals);
707 } else if (callee_locals > callee_parameters) {
708 // The caller frame may need extending to accommodate
709 // non-parameter locals of the first unpacked interpreted frame.
710 // Compute that adjustment.
711 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
712 }
713
714 // If the sender is deoptimized the we must retrieve the address of the handler
715 // since the frame will "magically" show the original pc before the deopt
716 // and we'd undo the deopt.
717
718 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
719 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
720 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
721 }
722
723 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
724
725 #if INCLUDE_JVMCI
726 if (exceptionObject() != nullptr) {
727 current->set_exception_oop(exceptionObject());
728 exec_mode = Unpack_exception;
729 }
730 #endif
731
732 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
733 assert(current->has_pending_exception(), "should have thrown OOME");
734 current->set_exception_oop(current->pending_exception());
1194 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1195 default:;
1196 }
1197 }
1198 return nullptr;
1199 }
1200 #endif // INCLUDE_JVMCI
1201
1202 #if COMPILER2_OR_JVMCI
1203 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1204 Handle pending_exception(THREAD, thread->pending_exception());
1205 const char* exception_file = thread->exception_file();
1206 int exception_line = thread->exception_line();
1207 thread->clear_pending_exception();
1208
1209 bool failures = false;
1210
1211 for (int i = 0; i < objects->length(); i++) {
1212 assert(objects->at(i)->is_object(), "invalid debug information");
1213 ObjectValue* sv = (ObjectValue*) objects->at(i);
1214
1215 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1216 oop obj = nullptr;
1217
1218 bool cache_init_error = false;
1219 if (k->is_instance_klass()) {
1220 #if INCLUDE_JVMCI
1221 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1222 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1223 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1224 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1225 if (obj != nullptr) {
1226 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1227 abv->set_cached(true);
1228 } else if (cache_init_error) {
1229 // Results in an OOME which is valid (as opposed to a class initialization error)
1230 // and is fine for the rare case a cache initialization failing.
1231 failures = true;
1232 }
1233 }
1234 #endif // INCLUDE_JVMCI
1235
1236 InstanceKlass* ik = InstanceKlass::cast(k);
1237 if (obj == nullptr && !cache_init_error) {
1238 #if COMPILER2_OR_JVMCI
1239 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1240 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1241 } else {
1242 obj = ik->allocate_instance(THREAD);
1243 }
1244 #else
1245 obj = ik->allocate_instance(THREAD);
1246 #endif // COMPILER2_OR_JVMCI
1247 }
1248 } else if (k->is_typeArray_klass()) {
1249 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1250 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1251 int len = sv->field_size() / type2size[ak->element_type()];
1252 obj = ak->allocate(len, THREAD);
1253 } else if (k->is_objArray_klass()) {
1254 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1255 obj = ak->allocate(sv->field_size(), THREAD);
1256 }
1257
1258 if (obj == nullptr) {
1259 failures = true;
1260 }
1261
1262 assert(sv->value().is_null(), "redundant reallocation");
1263 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1264 CLEAR_PENDING_EXCEPTION;
1265 sv->set_value(obj);
1266 }
1267
1268 if (failures) {
1269 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1270 } else if (pending_exception.not_null()) {
1271 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1272 }
1273
1274 return failures;
1275 }
1276
1277 #if INCLUDE_JVMCI
1278 /**
1279 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1280 * we need to somehow be able to recover the actual kind to be able to write the correct
1281 * amount of bytes.
1282 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1283 * the entries at index n + 1 to n + i are 'markers'.
1284 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1285 * expected form of the array would be:
1286 *
1287 * {b0, b1, b2, b3, INT, marker, b6, b7}
1288 *
1289 * Thus, in order to get back the size of the entry, we simply need to count the number
1290 * of marked entries
1291 *
1292 * @param virtualArray the virtualized byte array
1293 * @param i index of the virtual entry we are recovering
1294 * @return The number of bytes the entry spans
1295 */
1296 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1422 default:
1423 ShouldNotReachHere();
1424 }
1425 index++;
1426 }
1427 }
1428
1429 // restore fields of an eliminated object array
1430 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1431 for (int i = 0; i < sv->field_size(); i++) {
1432 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1433 assert(value->type() == T_OBJECT, "object element expected");
1434 obj->obj_at_put(i, value->get_obj()());
1435 }
1436 }
1437
1438 class ReassignedField {
1439 public:
1440 int _offset;
1441 BasicType _type;
1442 public:
1443 ReassignedField() {
1444 _offset = 0;
1445 _type = T_ILLEGAL;
1446 }
1447 };
1448
1449 int compare(ReassignedField* left, ReassignedField* right) {
1450 return left->_offset - right->_offset;
1451 }
1452
1453 // Restore fields of an eliminated instance object using the same field order
1454 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1455 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1456 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1457 InstanceKlass* ik = klass;
1458 while (ik != nullptr) {
1459 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1460 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1461 ReassignedField field;
1462 field._offset = fs.offset();
1463 field._type = Signature::basic_type(fs.signature());
1464 fields->append(field);
1465 }
1466 }
1467 ik = ik->superklass();
1468 }
1469 fields->sort(compare);
1470 for (int i = 0; i < fields->length(); i++) {
1471 ScopeValue* scope_field = sv->field_at(svIndex);
1472 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1473 int offset = fields->at(i)._offset;
1474 BasicType type = fields->at(i)._type;
1475 switch (type) {
1476 case T_OBJECT: case T_ARRAY:
1477 assert(value->type() == T_OBJECT, "Agreement.");
1478 obj->obj_field_put(offset, value->get_obj()());
1479 break;
1480
1481 case T_INT: case T_FLOAT: { // 4 bytes.
1482 assert(value->type() == T_INT, "Agreement.");
1483 bool big_value = false;
1484 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1485 if (scope_field->is_location()) {
1486 Location::Type type = ((LocationValue*) scope_field)->location().type();
1487 if (type == Location::dbl || type == Location::lng) {
1488 big_value = true;
1489 }
1490 }
1491 if (scope_field->is_constant_int()) {
1492 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1493 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1494 big_value = true;
1495 }
1496 }
1530 break;
1531
1532 case T_BYTE:
1533 assert(value->type() == T_INT, "Agreement.");
1534 obj->byte_field_put(offset, (jbyte)value->get_jint());
1535 break;
1536
1537 case T_BOOLEAN:
1538 assert(value->type() == T_INT, "Agreement.");
1539 obj->bool_field_put(offset, (jboolean)value->get_jint());
1540 break;
1541
1542 default:
1543 ShouldNotReachHere();
1544 }
1545 svIndex++;
1546 }
1547 return svIndex;
1548 }
1549
1550 // restore fields of all eliminated objects and arrays
1551 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1552 for (int i = 0; i < objects->length(); i++) {
1553 assert(objects->at(i)->is_object(), "invalid debug information");
1554 ObjectValue* sv = (ObjectValue*) objects->at(i);
1555 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1556 Handle obj = sv->value();
1557 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1558 #ifndef PRODUCT
1559 if (PrintDeoptimizationDetails) {
1560 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1561 }
1562 #endif // !PRODUCT
1563
1564 if (obj.is_null()) {
1565 continue;
1566 }
1567
1568 #if INCLUDE_JVMCI
1569 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1570 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1571 continue;
1572 }
1573 #endif // INCLUDE_JVMCI
1574 #if COMPILER2_OR_JVMCI
1575 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1576 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1577 ScopeValue* payload = sv->field_at(0);
1578 if (payload->is_location() &&
1579 payload->as_LocationValue()->location().type() == Location::vector) {
1580 #ifndef PRODUCT
1581 if (PrintDeoptimizationDetails) {
1582 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1583 if (Verbose) {
1584 Handle obj = sv->value();
1585 k->oop_print_on(obj(), tty);
1586 }
1587 }
1588 #endif // !PRODUCT
1589 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1590 }
1591 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1592 // which could be restored after vector object allocation.
1593 }
1594 #endif /* !COMPILER2_OR_JVMCI */
1595 if (k->is_instance_klass()) {
1596 InstanceKlass* ik = InstanceKlass::cast(k);
1597 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1598 } else if (k->is_typeArray_klass()) {
1599 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1600 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1601 } else if (k->is_objArray_klass()) {
1602 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1603 }
1604 }
1605 }
1606
1607
1608 // relock objects for which synchronization was eliminated
1609 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1610 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1611 bool relocked_objects = false;
1612 for (int i = 0; i < monitors->length(); i++) {
1613 MonitorInfo* mon_info = monitors->at(i);
1614 if (mon_info->eliminated()) {
1615 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1616 relocked_objects = true;
1617 if (!mon_info->owner_is_scalar_replaced()) {
1737 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1738 cm->log_identity(xtty);
1739 xtty->end_head();
1740 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1741 xtty->begin_elem("jvms bci='%d'", sd->bci());
1742 xtty->method(sd->method());
1743 xtty->end_elem();
1744 if (sd->is_top()) break;
1745 }
1746 xtty->tail("deoptimized");
1747 }
1748
1749 Continuation::notify_deopt(thread, fr.sp());
1750
1751 // Patch the compiled method so that when execution returns to it we will
1752 // deopt the execution state and return to the interpreter.
1753 fr.deoptimize(thread);
1754 }
1755
1756 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1757 // Deoptimize only if the frame comes from compile code.
1758 // Do not deoptimize the frame which is already patched
1759 // during the execution of the loops below.
1760 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1761 return;
1762 }
1763 ResourceMark rm;
1764 deoptimize_single_frame(thread, fr, reason);
1765 }
1766
1767 #if INCLUDE_JVMCI
1768 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1769 // there is no exception handler for this pc => deoptimize
1770 cm->make_not_entrant();
1771
1772 // Use Deoptimization::deoptimize for all of its side-effects:
1773 // gathering traps statistics, logging...
1774 // it also patches the return pc but we do not care about that
1775 // since we return a continuation to the deopt_blob below.
1776 JavaThread* thread = JavaThread::current();
1777 RegisterMap reg_map(thread,
|
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/bytecodeStream.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/oopMapCache.hpp"
42 #include "jvm.h"
43 #include "logging/log.hpp"
44 #include "logging/logLevel.hpp"
45 #include "logging/logMessage.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/allocation.inline.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/constantPool.hpp"
52 #include "oops/flatArrayKlass.hpp"
53 #include "oops/flatArrayOop.hpp"
54 #include "oops/fieldStreams.inline.hpp"
55 #include "oops/method.hpp"
56 #include "oops/objArrayKlass.hpp"
57 #include "oops/objArrayOop.inline.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "oops/inlineKlass.inline.hpp"
60 #include "oops/typeArrayOop.inline.hpp"
61 #include "oops/verifyOopClosure.hpp"
62 #include "prims/jvmtiDeferredUpdates.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "prims/jvmtiThreadState.hpp"
65 #include "prims/methodHandles.hpp"
66 #include "prims/vectorSupport.hpp"
67 #include "runtime/atomic.hpp"
68 #include "runtime/continuation.hpp"
69 #include "runtime/continuationEntry.inline.hpp"
70 #include "runtime/deoptimization.hpp"
71 #include "runtime/escapeBarrier.hpp"
72 #include "runtime/fieldDescriptor.hpp"
73 #include "runtime/fieldDescriptor.inline.hpp"
74 #include "runtime/frame.inline.hpp"
75 #include "runtime/handles.inline.hpp"
76 #include "runtime/interfaceSupport.inline.hpp"
77 #include "runtime/javaThread.hpp"
78 #include "runtime/jniHandles.inline.hpp"
79 #include "runtime/keepStackGCProcessed.hpp"
291
292 return fetch_unroll_info_helper(current, exec_mode);
293 JRT_END
294
295 #if COMPILER2_OR_JVMCI
296 // print information about reallocated objects
297 static void print_objects(JavaThread* deoptee_thread,
298 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
299 ResourceMark rm;
300 stringStream st; // change to logStream with logging
301 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
302 fieldDescriptor fd;
303
304 for (int i = 0; i < objects->length(); i++) {
305 ObjectValue* sv = (ObjectValue*) objects->at(i);
306 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
307 Handle obj = sv->value();
308
309 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
310 k->print_value_on(&st);
311 assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
312 if (obj.is_null()) {
313 if (k->is_inline_klass()) {
314 st.print(" is null");
315 } else {
316 st.print(" allocation failed");
317 }
318 } else {
319 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
320 }
321 st.cr();
322
323 if (Verbose && !obj.is_null()) {
324 k->oop_print_on(obj(), &st);
325 }
326 }
327 tty->print_raw(st.freeze());
328 }
329
330 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
331 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
332 bool& deoptimized_objects) {
333 bool realloc_failures = false;
334 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
335
336 JavaThread* deoptee_thread = chunk->at(0)->thread();
337 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
338 "a frame can only be deoptimized by the owner thread");
339
340 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
341
342 // The flag return_oop() indicates call sites which return oop
343 // in compiled code. Such sites include java method calls,
344 // runtime calls (for example, used to allocate new objects/arrays
345 // on slow code path) and any other calls generated in compiled code.
346 // It is not guaranteed that we can get such information here only
347 // by analyzing bytecode in deoptimized frames. This is why this flag
348 // is set during method compilation (see Compile::Process_OopMap_Node()).
349 // If the previous frame was popped or if we are dispatching an exception,
350 // we don't have an oop result.
351 ScopeDesc* scope = chunk->at(0)->scope();
352 bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
353 // In case of the return of multiple values, we must take care
354 // of all oop return values.
355 GrowableArray<Handle> return_oops;
356 InlineKlass* vk = nullptr;
357 if (save_oop_result && scope->return_scalarized()) {
358 vk = InlineKlass::returned_inline_klass(map);
359 if (vk != nullptr) {
360 vk->save_oop_fields(map, return_oops);
361 save_oop_result = false;
362 }
363 }
364 if (save_oop_result) {
365 // Reallocation may trigger GC. If deoptimization happened on return from
366 // call which returns oop we need to save it since it is not in oopmap.
367 oop result = deoptee.saved_oop_result(&map);
368 assert(oopDesc::is_oop_or_null(result), "must be oop");
369 return_oops.push(Handle(thread, result));
370 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
371 if (TraceDeoptimization) {
372 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
373 tty->cr();
374 }
375 }
376 if (objects != nullptr || vk != nullptr) {
377 if (exec_mode == Deoptimization::Unpack_none) {
378 assert(thread->thread_state() == _thread_in_vm, "assumption");
379 JavaThread* THREAD = thread; // For exception macros.
380 // Clear pending OOM if reallocation fails and return true indicating allocation failure
381 if (vk != nullptr) {
382 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
383 }
384 if (objects != nullptr) {
385 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
386 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
387 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
388 }
389 deoptimized_objects = true;
390 } else {
391 JavaThread* current = thread; // For JRT_BLOCK
392 JRT_BLOCK
393 if (vk != nullptr) {
394 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
395 }
396 if (objects != nullptr) {
397 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
398 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
399 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
400 }
401 JRT_END
402 }
403 if (TraceDeoptimization) {
404 print_objects(deoptee_thread, objects, realloc_failures);
405 }
406 }
407 if (save_oop_result || vk != nullptr) {
408 // Restore result.
409 assert(return_oops.length() == 1, "no inline type");
410 deoptee.set_saved_oop_result(&map, return_oops.pop()());
411 }
412 return realloc_failures;
413 }
414
415 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
416 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
417 JavaThread* deoptee_thread = chunk->at(0)->thread();
418 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
419 assert(thread == Thread::current(), "should be");
420 HandleMark hm(thread);
421 #ifndef PRODUCT
422 bool first = true;
423 #endif // !PRODUCT
424 for (int i = 0; i < chunk->length(); i++) {
425 compiledVFrame* cvf = chunk->at(i);
426 assert (cvf->scope() != nullptr,"expect only compiled java frames");
427 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
428 if (monitors->is_nonempty()) {
429 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
430 exec_mode, realloc_failures);
725 // its caller's stack by. If the caller is a compiled frame then
726 // we pretend that the callee has no parameters so that the
727 // extension counts for the full amount of locals and not just
728 // locals-parms. This is because without a c2i adapter the parm
729 // area as created by the compiled frame will not be usable by
730 // the interpreter. (Depending on the calling convention there
731 // may not even be enough space).
732
733 // QQQ I'd rather see this pushed down into last_frame_adjust
734 // and have it take the sender (aka caller).
735
736 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
737 caller_adjustment = last_frame_adjust(0, callee_locals);
738 } else if (callee_locals > callee_parameters) {
739 // The caller frame may need extending to accommodate
740 // non-parameter locals of the first unpacked interpreted frame.
741 // Compute that adjustment.
742 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
743 }
744
745 // If the sender is deoptimized we must retrieve the address of the handler
746 // since the frame will "magically" show the original pc before the deopt
747 // and we'd undo the deopt.
748
749 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
750 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
751 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
752 }
753
754 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
755
756 #if INCLUDE_JVMCI
757 if (exceptionObject() != nullptr) {
758 current->set_exception_oop(exceptionObject());
759 exec_mode = Unpack_exception;
760 }
761 #endif
762
763 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
764 assert(current->has_pending_exception(), "should have thrown OOME");
765 current->set_exception_oop(current->pending_exception());
1225 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1226 default:;
1227 }
1228 }
1229 return nullptr;
1230 }
1231 #endif // INCLUDE_JVMCI
1232
1233 #if COMPILER2_OR_JVMCI
1234 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1235 Handle pending_exception(THREAD, thread->pending_exception());
1236 const char* exception_file = thread->exception_file();
1237 int exception_line = thread->exception_line();
1238 thread->clear_pending_exception();
1239
1240 bool failures = false;
1241
1242 for (int i = 0; i < objects->length(); i++) {
1243 assert(objects->at(i)->is_object(), "invalid debug information");
1244 ObjectValue* sv = (ObjectValue*) objects->at(i);
1245 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1246
1247 // Check if the object may be null and has an additional is_init input that needs
1248 // to be checked before using the field values. Skip re-allocation if it is null.
1249 if (sv->maybe_null()) {
1250 assert(k->is_inline_klass(), "must be an inline klass");
1251 jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
1252 if (is_init == 0) {
1253 continue;
1254 }
1255 }
1256
1257 oop obj = nullptr;
1258 bool cache_init_error = false;
1259 if (k->is_instance_klass()) {
1260 #if INCLUDE_JVMCI
1261 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1262 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1263 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1264 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1265 if (obj != nullptr) {
1266 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1267 abv->set_cached(true);
1268 } else if (cache_init_error) {
1269 // Results in an OOME which is valid (as opposed to a class initialization error)
1270 // and is fine for the rare case a cache initialization failing.
1271 failures = true;
1272 }
1273 }
1274 #endif // INCLUDE_JVMCI
1275
1276 InstanceKlass* ik = InstanceKlass::cast(k);
1277 if (obj == nullptr && !cache_init_error) {
1278 #if COMPILER2_OR_JVMCI
1279 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1280 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1281 } else {
1282 obj = ik->allocate_instance(THREAD);
1283 }
1284 #else
1285 obj = ik->allocate_instance(THREAD);
1286 #endif // COMPILER2_OR_JVMCI
1287 }
1288 } else if (k->is_flatArray_klass()) {
1289 FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1290 // Inline type array must be zeroed because not all memory is reassigned
1291 obj = ak->allocate(sv->field_size(), THREAD);
1292 } else if (k->is_typeArray_klass()) {
1293 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1294 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1295 int len = sv->field_size() / type2size[ak->element_type()];
1296 obj = ak->allocate(len, THREAD);
1297 } else if (k->is_objArray_klass()) {
1298 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1299 obj = ak->allocate(sv->field_size(), THREAD);
1300 }
1301
1302 if (obj == nullptr) {
1303 failures = true;
1304 }
1305
1306 assert(sv->value().is_null(), "redundant reallocation");
1307 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1308 CLEAR_PENDING_EXCEPTION;
1309 sv->set_value(obj);
1310 }
1311
1312 if (failures) {
1313 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1314 } else if (pending_exception.not_null()) {
1315 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1316 }
1317
1318 return failures;
1319 }
1320
1321 // We're deoptimizing at the return of a call, inline type fields are
1322 // in registers. When we go back to the interpreter, it will expect a
1323 // reference to an inline type instance. Allocate and initialize it from
1324 // the register values here.
1325 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1326 oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1327 if (new_vt == nullptr) {
1328 CLEAR_PENDING_EXCEPTION;
1329 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1330 }
1331 return_oops.clear();
1332 return_oops.push(Handle(THREAD, new_vt));
1333 return false;
1334 }
1335
1336 #if INCLUDE_JVMCI
1337 /**
1338 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1339 * we need to somehow be able to recover the actual kind to be able to write the correct
1340 * amount of bytes.
1341 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1342 * the entries at index n + 1 to n + i are 'markers'.
1343 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1344 * expected form of the array would be:
1345 *
1346 * {b0, b1, b2, b3, INT, marker, b6, b7}
1347 *
1348 * Thus, in order to get back the size of the entry, we simply need to count the number
1349 * of marked entries
1350 *
1351 * @param virtualArray the virtualized byte array
1352 * @param i index of the virtual entry we are recovering
1353 * @return The number of bytes the entry spans
1354 */
1355 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1481 default:
1482 ShouldNotReachHere();
1483 }
1484 index++;
1485 }
1486 }
1487
1488 // restore fields of an eliminated object array
1489 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1490 for (int i = 0; i < sv->field_size(); i++) {
1491 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1492 assert(value->type() == T_OBJECT, "object element expected");
1493 obj->obj_at_put(i, value->get_obj()());
1494 }
1495 }
1496
1497 class ReassignedField {
1498 public:
1499 int _offset;
1500 BasicType _type;
1501 InstanceKlass* _klass;
1502 bool _is_flat;
1503 public:
1504 ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false) { }
1505 };
1506
1507 int compare(ReassignedField* left, ReassignedField* right) {
1508 return left->_offset - right->_offset;
1509 }
1510
1511 // Restore fields of an eliminated instance object using the same field order
1512 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1513 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1514 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1515 InstanceKlass* ik = klass;
1516 while (ik != nullptr) {
1517 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1518 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1519 ReassignedField field;
1520 field._offset = fs.offset();
1521 field._type = Signature::basic_type(fs.signature());
1522 if (fs.is_null_free_inline_type()) {
1523 if (fs.is_flat()) {
1524 field._is_flat = true;
1525 // Resolve klass of flat inline type field
1526 field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1527 } else {
1528 field._type = T_OBJECT; // Can be removed once Q-descriptors have been removed.
1529 }
1530 }
1531 fields->append(field);
1532 }
1533 }
1534 ik = ik->superklass();
1535 }
1536 fields->sort(compare);
1537 for (int i = 0; i < fields->length(); i++) {
1538 BasicType type = fields->at(i)._type;
1539 int offset = base_offset + fields->at(i)._offset;
1540 // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1541 if (fields->at(i)._is_flat) {
1542 // Recursively re-assign flat inline type fields
1543 InstanceKlass* vk = fields->at(i)._klass;
1544 assert(vk != nullptr, "must be resolved");
1545 offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1546 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1547 continue; // Continue because we don't need to increment svIndex
1548 }
1549 ScopeValue* scope_field = sv->field_at(svIndex);
1550 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1551 switch (type) {
1552 case T_OBJECT:
1553 case T_ARRAY:
1554 assert(value->type() == T_OBJECT, "Agreement.");
1555 obj->obj_field_put(offset, value->get_obj()());
1556 break;
1557
1558 case T_INT: case T_FLOAT: { // 4 bytes.
1559 assert(value->type() == T_INT, "Agreement.");
1560 bool big_value = false;
1561 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1562 if (scope_field->is_location()) {
1563 Location::Type type = ((LocationValue*) scope_field)->location().type();
1564 if (type == Location::dbl || type == Location::lng) {
1565 big_value = true;
1566 }
1567 }
1568 if (scope_field->is_constant_int()) {
1569 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1570 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1571 big_value = true;
1572 }
1573 }
1607 break;
1608
1609 case T_BYTE:
1610 assert(value->type() == T_INT, "Agreement.");
1611 obj->byte_field_put(offset, (jbyte)value->get_jint());
1612 break;
1613
1614 case T_BOOLEAN:
1615 assert(value->type() == T_INT, "Agreement.");
1616 obj->bool_field_put(offset, (jboolean)value->get_jint());
1617 break;
1618
1619 default:
1620 ShouldNotReachHere();
1621 }
1622 svIndex++;
1623 }
1624 return svIndex;
1625 }
1626
1627 // restore fields of an eliminated inline type array
1628 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1629 InlineKlass* vk = vak->element_klass();
1630 assert(vk->flat_array(), "should only be used for flat inline type arrays");
1631 // Adjust offset to omit oop header
1632 int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1633 // Initialize all elements of the flat inline type array
1634 for (int i = 0; i < sv->field_size(); i++) {
1635 ScopeValue* val = sv->field_at(i);
1636 int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1637 reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1638 }
1639 }
1640
1641 // restore fields of all eliminated objects and arrays
1642 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1643 for (int i = 0; i < objects->length(); i++) {
1644 assert(objects->at(i)->is_object(), "invalid debug information");
1645 ObjectValue* sv = (ObjectValue*) objects->at(i);
1646 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1647 Handle obj = sv->value();
1648 assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1649 #ifndef PRODUCT
1650 if (PrintDeoptimizationDetails) {
1651 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1652 }
1653 #endif // !PRODUCT
1654
1655 if (obj.is_null()) {
1656 continue;
1657 }
1658
1659 #if INCLUDE_JVMCI
1660 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1661 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1662 continue;
1663 }
1664 #endif // INCLUDE_JVMCI
1665 #if COMPILER2_OR_JVMCI
1666 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1667 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1668 ScopeValue* payload = sv->field_at(0);
1669 if (payload->is_location() &&
1670 payload->as_LocationValue()->location().type() == Location::vector) {
1671 #ifndef PRODUCT
1672 if (PrintDeoptimizationDetails) {
1673 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1674 if (Verbose) {
1675 Handle obj = sv->value();
1676 k->oop_print_on(obj(), tty);
1677 }
1678 }
1679 #endif // !PRODUCT
1680 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1681 }
1682 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1683 // which could be restored after vector object allocation.
1684 }
1685 #endif /* !COMPILER2_OR_JVMCI */
1686 if (k->is_instance_klass()) {
1687 InstanceKlass* ik = InstanceKlass::cast(k);
1688 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1689 } else if (k->is_flatArray_klass()) {
1690 FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1691 reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1692 } else if (k->is_typeArray_klass()) {
1693 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1694 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1695 } else if (k->is_objArray_klass()) {
1696 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1697 }
1698 }
1699 }
1700
1701
1702 // relock objects for which synchronization was eliminated
1703 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1704 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1705 bool relocked_objects = false;
1706 for (int i = 0; i < monitors->length(); i++) {
1707 MonitorInfo* mon_info = monitors->at(i);
1708 if (mon_info->eliminated()) {
1709 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1710 relocked_objects = true;
1711 if (!mon_info->owner_is_scalar_replaced()) {
1831 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1832 cm->log_identity(xtty);
1833 xtty->end_head();
1834 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1835 xtty->begin_elem("jvms bci='%d'", sd->bci());
1836 xtty->method(sd->method());
1837 xtty->end_elem();
1838 if (sd->is_top()) break;
1839 }
1840 xtty->tail("deoptimized");
1841 }
1842
1843 Continuation::notify_deopt(thread, fr.sp());
1844
1845 // Patch the compiled method so that when execution returns to it we will
1846 // deopt the execution state and return to the interpreter.
1847 fr.deoptimize(thread);
1848 }
1849
1850 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1851 // Deoptimize only if the frame comes from compiled code.
1852 // Do not deoptimize the frame which is already patched
1853 // during the execution of the loops below.
1854 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1855 return;
1856 }
1857 ResourceMark rm;
1858 deoptimize_single_frame(thread, fr, reason);
1859 }
1860
1861 #if INCLUDE_JVMCI
1862 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1863 // there is no exception handler for this pc => deoptimize
1864 cm->make_not_entrant();
1865
1866 // Use Deoptimization::deoptimize for all of its side-effects:
1867 // gathering traps statistics, logging...
1868 // it also patches the return pc but we do not care about that
1869 // since we return a continuation to the deopt_blob below.
1870 JavaThread* thread = JavaThread::current();
1871 RegisterMap reg_map(thread,
|