31 #include "code/debugInfoRec.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/interpreter.hpp"
40 #include "interpreter/oopMapCache.hpp"
41 #include "jvm.h"
42 #include "logging/log.hpp"
43 #include "logging/logLevel.hpp"
44 #include "logging/logMessage.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/allocation.inline.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/constantPool.hpp"
51 #include "oops/fieldStreams.inline.hpp"
52 #include "oops/method.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/objArrayOop.inline.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "oops/typeArrayOop.inline.hpp"
57 #include "oops/verifyOopClosure.hpp"
58 #include "prims/jvmtiDeferredUpdates.hpp"
59 #include "prims/jvmtiExport.hpp"
60 #include "prims/jvmtiThreadState.hpp"
61 #include "prims/methodHandles.hpp"
62 #include "prims/vectorSupport.hpp"
63 #include "runtime/atomic.hpp"
64 #include "runtime/continuation.hpp"
65 #include "runtime/continuationEntry.inline.hpp"
66 #include "runtime/deoptimization.hpp"
67 #include "runtime/escapeBarrier.hpp"
68 #include "runtime/fieldDescriptor.hpp"
69 #include "runtime/fieldDescriptor.inline.hpp"
70 #include "runtime/frame.inline.hpp"
71 #include "runtime/handles.inline.hpp"
72 #include "runtime/interfaceSupport.inline.hpp"
73 #include "runtime/javaThread.hpp"
74 #include "runtime/jniHandles.inline.hpp"
75 #include "runtime/keepStackGCProcessed.hpp"
287
288 return fetch_unroll_info_helper(current, exec_mode);
289 JRT_END
290
291 #if COMPILER2_OR_JVMCI
292 // print information about reallocated objects
293 static void print_objects(JavaThread* deoptee_thread,
294 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
295 ResourceMark rm;
296 stringStream st; // change to logStream with logging
297 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
298 fieldDescriptor fd;
299
300 for (int i = 0; i < objects->length(); i++) {
301 ObjectValue* sv = (ObjectValue*) objects->at(i);
302 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
303 Handle obj = sv->value();
304
305 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
306 k->print_value_on(&st);
307 assert(obj.not_null() || realloc_failures, "reallocation was missed");
308 if (obj.is_null()) {
309 st.print(" allocation failed");
310 } else {
311 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
312 }
313 st.cr();
314
315 if (Verbose && !obj.is_null()) {
316 k->oop_print_on(obj(), &st);
317 }
318 }
319 tty->print_raw(st.freeze());
320 }
321
322 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
323 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
324 bool& deoptimized_objects) {
325 bool realloc_failures = false;
326 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
327
328 JavaThread* deoptee_thread = chunk->at(0)->thread();
329 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
330 "a frame can only be deoptimized by the owner thread");
331
332 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
333
334 // The flag return_oop() indicates call sites which return oop
335 // in compiled code. Such sites include java method calls,
336 // runtime calls (for example, used to allocate new objects/arrays
337 // on slow code path) and any other calls generated in compiled code.
338 // It is not guaranteed that we can get such information here only
339 // by analyzing bytecode in deoptimized frames. This is why this flag
340 // is set during method compilation (see Compile::Process_OopMap_Node()).
341 // If the previous frame was popped or if we are dispatching an exception,
342 // we don't have an oop result.
343 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
344 Handle return_value;
345 if (save_oop_result) {
346 // Reallocation may trigger GC. If deoptimization happened on return from
347 // call which returns oop we need to save it since it is not in oopmap.
348 oop result = deoptee.saved_oop_result(&map);
349 assert(oopDesc::is_oop_or_null(result), "must be oop");
350 return_value = Handle(thread, result);
351 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
352 if (TraceDeoptimization) {
353 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
354 tty->cr();
355 }
356 }
357 if (objects != nullptr) {
358 if (exec_mode == Deoptimization::Unpack_none) {
359 assert(thread->thread_state() == _thread_in_vm, "assumption");
360 JavaThread* THREAD = thread; // For exception macros.
361 // Clear pending OOM if reallocation fails and return true indicating allocation failure
362 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
363 deoptimized_objects = true;
364 } else {
365 JavaThread* current = thread; // For JRT_BLOCK
366 JRT_BLOCK
367 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
368 JRT_END
369 }
370 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
371 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
372 if (TraceDeoptimization) {
373 print_objects(deoptee_thread, objects, realloc_failures);
374 }
375 }
376 if (save_oop_result) {
377 // Restore result.
378 deoptee.set_saved_oop_result(&map, return_value());
379 }
380 return realloc_failures;
381 }
382
383 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
384 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
385 JavaThread* deoptee_thread = chunk->at(0)->thread();
386 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
387 assert(thread == Thread::current(), "should be");
388 HandleMark hm(thread);
389 #ifndef PRODUCT
390 bool first = true;
391 #endif // !PRODUCT
392 for (int i = 0; i < chunk->length(); i++) {
393 compiledVFrame* cvf = chunk->at(i);
394 assert (cvf->scope() != nullptr,"expect only compiled java frames");
395 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
396 if (monitors->is_nonempty()) {
397 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
398 exec_mode, realloc_failures);
693 // its caller's stack by. If the caller is a compiled frame then
694 // we pretend that the callee has no parameters so that the
695 // extension counts for the full amount of locals and not just
696 // locals-parms. This is because without a c2i adapter the parm
697 // area as created by the compiled frame will not be usable by
698 // the interpreter. (Depending on the calling convention there
699 // may not even be enough space).
700
701 // QQQ I'd rather see this pushed down into last_frame_adjust
702 // and have it take the sender (aka caller).
703
704 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
705 caller_adjustment = last_frame_adjust(0, callee_locals);
706 } else if (callee_locals > callee_parameters) {
707 // The caller frame may need extending to accommodate
708 // non-parameter locals of the first unpacked interpreted frame.
709 // Compute that adjustment.
710 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
711 }
712
713 // If the sender is deoptimized the we must retrieve the address of the handler
714 // since the frame will "magically" show the original pc before the deopt
715 // and we'd undo the deopt.
716
717 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
718 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
719 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
720 }
721
722 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
723
724 #if INCLUDE_JVMCI
725 if (exceptionObject() != nullptr) {
726 current->set_exception_oop(exceptionObject());
727 exec_mode = Unpack_exception;
728 }
729 #endif
730
731 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
732 assert(current->has_pending_exception(), "should have thrown OOME");
733 current->set_exception_oop(current->pending_exception());
1193 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int(), cache_init_error);
1194 default:;
1195 }
1196 }
1197 return nullptr;
1198 }
1199 #endif // INCLUDE_JVMCI
1200
1201 #if COMPILER2_OR_JVMCI
1202 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1203 Handle pending_exception(THREAD, thread->pending_exception());
1204 const char* exception_file = thread->exception_file();
1205 int exception_line = thread->exception_line();
1206 thread->clear_pending_exception();
1207
1208 bool failures = false;
1209
1210 for (int i = 0; i < objects->length(); i++) {
1211 assert(objects->at(i)->is_object(), "invalid debug information");
1212 ObjectValue* sv = (ObjectValue*) objects->at(i);
1213
1214 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1215 oop obj = nullptr;
1216
1217 bool cache_init_error = false;
1218 if (k->is_instance_klass()) {
1219 #if INCLUDE_JVMCI
1220 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1221 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1222 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1223 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1224 if (obj != nullptr) {
1225 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1226 abv->set_cached(true);
1227 } else if (cache_init_error) {
1228 // Results in an OOME which is valid (as opposed to a class initialization error)
1229 // and is fine for the rare case a cache initialization failing.
1230 failures = true;
1231 }
1232 }
1233 #endif // INCLUDE_JVMCI
1234
1235 InstanceKlass* ik = InstanceKlass::cast(k);
1236 if (obj == nullptr && !cache_init_error) {
1237 #ifdef COMPILER2
1238 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1239 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1240 } else {
1241 obj = ik->allocate_instance(THREAD);
1242 }
1243 #else
1244 obj = ik->allocate_instance(THREAD);
1245 #endif // COMPILER2
1246 }
1247 } else if (k->is_typeArray_klass()) {
1248 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1249 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1250 int len = sv->field_size() / type2size[ak->element_type()];
1251 obj = ak->allocate(len, THREAD);
1252 } else if (k->is_objArray_klass()) {
1253 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1254 obj = ak->allocate(sv->field_size(), THREAD);
1255 }
1256
1257 if (obj == nullptr) {
1258 failures = true;
1259 }
1260
1261 assert(sv->value().is_null(), "redundant reallocation");
1262 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1263 CLEAR_PENDING_EXCEPTION;
1264 sv->set_value(obj);
1265 }
1266
1267 if (failures) {
1268 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1269 } else if (pending_exception.not_null()) {
1270 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1271 }
1272
1273 return failures;
1274 }
1275
1276 #if INCLUDE_JVMCI
1277 /**
1278 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1279 * we need to somehow be able to recover the actual kind to be able to write the correct
1280 * amount of bytes.
1281 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1282 * the entries at index n + 1 to n + i are 'markers'.
1283 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1284 * expected form of the array would be:
1285 *
1286 * {b0, b1, b2, b3, INT, marker, b6, b7}
1287 *
1288 * Thus, in order to get back the size of the entry, we simply need to count the number
1289 * of marked entries
1290 *
1291 * @param virtualArray the virtualized byte array
1292 * @param i index of the virtual entry we are recovering
1293 * @return The number of bytes the entry spans
1294 */
1295 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1428 default:
1429 ShouldNotReachHere();
1430 }
1431 index++;
1432 }
1433 }
1434
1435 // restore fields of an eliminated object array
1436 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1437 for (int i = 0; i < sv->field_size(); i++) {
1438 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1439 assert(value->type() == T_OBJECT, "object element expected");
1440 obj->obj_at_put(i, value->get_obj()());
1441 }
1442 }
1443
1444 class ReassignedField {
1445 public:
1446 int _offset;
1447 BasicType _type;
1448 public:
1449 ReassignedField() {
1450 _offset = 0;
1451 _type = T_ILLEGAL;
1452 }
1453 };
1454
1455 int compare(ReassignedField* left, ReassignedField* right) {
1456 return left->_offset - right->_offset;
1457 }
1458
1459 // Restore fields of an eliminated instance object using the same field order
1460 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1461 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1462 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1463 InstanceKlass* ik = klass;
1464 while (ik != nullptr) {
1465 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1466 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1467 ReassignedField field;
1468 field._offset = fs.offset();
1469 field._type = Signature::basic_type(fs.signature());
1470 fields->append(field);
1471 }
1472 }
1473 ik = ik->superklass();
1474 }
1475 fields->sort(compare);
1476 for (int i = 0; i < fields->length(); i++) {
1477 intptr_t val;
1478 ScopeValue* scope_field = sv->field_at(svIndex);
1479 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1480 int offset = fields->at(i)._offset;
1481 BasicType type = fields->at(i)._type;
1482 switch (type) {
1483 case T_OBJECT: case T_ARRAY:
1484 assert(value->type() == T_OBJECT, "Agreement.");
1485 obj->obj_field_put(offset, value->get_obj()());
1486 break;
1487
1488 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1489 case T_INT: case T_FLOAT: { // 4 bytes.
1490 assert(value->type() == T_INT, "Agreement.");
1491 bool big_value = false;
1492 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1493 if (scope_field->is_location()) {
1494 Location::Type type = ((LocationValue*) scope_field)->location().type();
1495 if (type == Location::dbl || type == Location::lng) {
1496 big_value = true;
1497 }
1498 }
1499 if (scope_field->is_constant_int()) {
1500 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1501 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1502 big_value = true;
1503 }
1543 case T_BYTE:
1544 assert(value->type() == T_INT, "Agreement.");
1545 val = value->get_int();
1546 obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1547 break;
1548
1549 case T_BOOLEAN:
1550 assert(value->type() == T_INT, "Agreement.");
1551 val = value->get_int();
1552 obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1553 break;
1554
1555 default:
1556 ShouldNotReachHere();
1557 }
1558 svIndex++;
1559 }
1560 return svIndex;
1561 }
1562
1563 // restore fields of all eliminated objects and arrays
1564 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1565 for (int i = 0; i < objects->length(); i++) {
1566 ObjectValue* sv = (ObjectValue*) objects->at(i);
1567 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1568 Handle obj = sv->value();
1569 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1570 #ifndef PRODUCT
1571 if (PrintDeoptimizationDetails) {
1572 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1573 }
1574 #endif // !PRODUCT
1575
1576 if (obj.is_null()) {
1577 continue;
1578 }
1579
1580 #if INCLUDE_JVMCI
1581 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1582 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1583 continue;
1584 }
1585 #endif // INCLUDE_JVMCI
1586 #ifdef COMPILER2
1587 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1588 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1589 ScopeValue* payload = sv->field_at(0);
1590 if (payload->is_location() &&
1591 payload->as_LocationValue()->location().type() == Location::vector) {
1592 #ifndef PRODUCT
1593 if (PrintDeoptimizationDetails) {
1594 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1595 if (Verbose) {
1596 Handle obj = sv->value();
1597 k->oop_print_on(obj(), tty);
1598 }
1599 }
1600 #endif // !PRODUCT
1601 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1602 }
1603 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1604 // which could be restored after vector object allocation.
1605 }
1606 #endif /* !COMPILER2 */
1607 if (k->is_instance_klass()) {
1608 InstanceKlass* ik = InstanceKlass::cast(k);
1609 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1610 } else if (k->is_typeArray_klass()) {
1611 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1612 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1613 } else if (k->is_objArray_klass()) {
1614 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1615 }
1616 }
1617 }
1618
1619
1620 // relock objects for which synchronization was eliminated
1621 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1622 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1623 bool relocked_objects = false;
1624 for (int i = 0; i < monitors->length(); i++) {
1625 MonitorInfo* mon_info = monitors->at(i);
1626 if (mon_info->eliminated()) {
1627 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1628 relocked_objects = true;
1629 if (!mon_info->owner_is_scalar_replaced()) {
1749 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1750 cm->log_identity(xtty);
1751 xtty->end_head();
1752 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1753 xtty->begin_elem("jvms bci='%d'", sd->bci());
1754 xtty->method(sd->method());
1755 xtty->end_elem();
1756 if (sd->is_top()) break;
1757 }
1758 xtty->tail("deoptimized");
1759 }
1760
1761 Continuation::notify_deopt(thread, fr.sp());
1762
1763 // Patch the compiled method so that when execution returns to it we will
1764 // deopt the execution state and return to the interpreter.
1765 fr.deoptimize(thread);
1766 }
1767
1768 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1769 // Deoptimize only if the frame comes from compile code.
1770 // Do not deoptimize the frame which is already patched
1771 // during the execution of the loops below.
1772 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1773 return;
1774 }
1775 ResourceMark rm;
1776 deoptimize_single_frame(thread, fr, reason);
1777 }
1778
1779 #if INCLUDE_JVMCI
1780 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1781 // there is no exception handler for this pc => deoptimize
1782 cm->make_not_entrant();
1783
1784 // Use Deoptimization::deoptimize for all of its side-effects:
1785 // gathering traps statistics, logging...
1786 // it also patches the return pc but we do not care about that
1787 // since we return a continuation to the deopt_blob below.
1788 JavaThread* thread = JavaThread::current();
1789 RegisterMap reg_map(thread,
|
31 #include "code/debugInfoRec.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/interpreter.hpp"
40 #include "interpreter/oopMapCache.hpp"
41 #include "jvm.h"
42 #include "logging/log.hpp"
43 #include "logging/logLevel.hpp"
44 #include "logging/logMessage.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/allocation.inline.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/constantPool.hpp"
51 #include "oops/flatArrayKlass.hpp"
52 #include "oops/flatArrayOop.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/method.hpp"
55 #include "oops/objArrayKlass.hpp"
56 #include "oops/objArrayOop.inline.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "oops/inlineKlass.inline.hpp"
59 #include "oops/typeArrayOop.inline.hpp"
60 #include "oops/verifyOopClosure.hpp"
61 #include "prims/jvmtiDeferredUpdates.hpp"
62 #include "prims/jvmtiExport.hpp"
63 #include "prims/jvmtiThreadState.hpp"
64 #include "prims/methodHandles.hpp"
65 #include "prims/vectorSupport.hpp"
66 #include "runtime/atomic.hpp"
67 #include "runtime/continuation.hpp"
68 #include "runtime/continuationEntry.inline.hpp"
69 #include "runtime/deoptimization.hpp"
70 #include "runtime/escapeBarrier.hpp"
71 #include "runtime/fieldDescriptor.hpp"
72 #include "runtime/fieldDescriptor.inline.hpp"
73 #include "runtime/frame.inline.hpp"
74 #include "runtime/handles.inline.hpp"
75 #include "runtime/interfaceSupport.inline.hpp"
76 #include "runtime/javaThread.hpp"
77 #include "runtime/jniHandles.inline.hpp"
78 #include "runtime/keepStackGCProcessed.hpp"
290
291 return fetch_unroll_info_helper(current, exec_mode);
292 JRT_END
293
294 #if COMPILER2_OR_JVMCI
295 // print information about reallocated objects
296 static void print_objects(JavaThread* deoptee_thread,
297 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
298 ResourceMark rm;
299 stringStream st; // change to logStream with logging
300 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
301 fieldDescriptor fd;
302
303 for (int i = 0; i < objects->length(); i++) {
304 ObjectValue* sv = (ObjectValue*) objects->at(i);
305 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
306 Handle obj = sv->value();
307
308 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
309 k->print_value_on(&st);
310 assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
311 if (obj.is_null()) {
312 if (k->is_inline_klass()) {
313 st.print(" is null");
314 } else {
315 st.print(" allocation failed");
316 }
317 } else {
318 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
319 }
320 st.cr();
321
322 if (Verbose && !obj.is_null()) {
323 k->oop_print_on(obj(), &st);
324 }
325 }
326 tty->print_raw(st.freeze());
327 }
328
329 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
330 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
331 bool& deoptimized_objects) {
332 bool realloc_failures = false;
333 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
334
335 JavaThread* deoptee_thread = chunk->at(0)->thread();
336 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
337 "a frame can only be deoptimized by the owner thread");
338
339 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
340
341 // The flag return_oop() indicates call sites which return oop
342 // in compiled code. Such sites include java method calls,
343 // runtime calls (for example, used to allocate new objects/arrays
344 // on slow code path) and any other calls generated in compiled code.
345 // It is not guaranteed that we can get such information here only
346 // by analyzing bytecode in deoptimized frames. This is why this flag
347 // is set during method compilation (see Compile::Process_OopMap_Node()).
348 // If the previous frame was popped or if we are dispatching an exception,
349 // we don't have an oop result.
350 ScopeDesc* scope = chunk->at(0)->scope();
351 bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
352 // In case of the return of multiple values, we must take care
353 // of all oop return values.
354 GrowableArray<Handle> return_oops;
355 InlineKlass* vk = nullptr;
356 if (save_oop_result && scope->return_scalarized()) {
357 vk = InlineKlass::returned_inline_klass(map);
358 if (vk != nullptr) {
359 vk->save_oop_fields(map, return_oops);
360 save_oop_result = false;
361 }
362 }
363 if (save_oop_result) {
364 // Reallocation may trigger GC. If deoptimization happened on return from
365 // call which returns oop we need to save it since it is not in oopmap.
366 oop result = deoptee.saved_oop_result(&map);
367 assert(oopDesc::is_oop_or_null(result), "must be oop");
368 return_oops.push(Handle(thread, result));
369 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
370 if (TraceDeoptimization) {
371 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
372 tty->cr();
373 }
374 }
375 if (objects != nullptr || vk != nullptr) {
376 if (exec_mode == Deoptimization::Unpack_none) {
377 assert(thread->thread_state() == _thread_in_vm, "assumption");
378 JavaThread* THREAD = thread; // For exception macros.
379 // Clear pending OOM if reallocation fails and return true indicating allocation failure
380 if (vk != nullptr) {
381 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
382 }
383 if (objects != nullptr) {
384 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
385 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
386 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
387 }
388 deoptimized_objects = true;
389 } else {
390 JavaThread* current = thread; // For JRT_BLOCK
391 JRT_BLOCK
392 if (vk != nullptr) {
393 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
394 }
395 if (objects != nullptr) {
396 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
397 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
398 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
399 }
400 JRT_END
401 }
402 if (TraceDeoptimization) {
403 print_objects(deoptee_thread, objects, realloc_failures);
404 }
405 }
406 if (save_oop_result || vk != nullptr) {
407 // Restore result.
408 assert(return_oops.length() == 1, "no inline type");
409 deoptee.set_saved_oop_result(&map, return_oops.pop()());
410 }
411 return realloc_failures;
412 }
413
414 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
415 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
416 JavaThread* deoptee_thread = chunk->at(0)->thread();
417 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
418 assert(thread == Thread::current(), "should be");
419 HandleMark hm(thread);
420 #ifndef PRODUCT
421 bool first = true;
422 #endif // !PRODUCT
423 for (int i = 0; i < chunk->length(); i++) {
424 compiledVFrame* cvf = chunk->at(i);
425 assert (cvf->scope() != nullptr,"expect only compiled java frames");
426 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
427 if (monitors->is_nonempty()) {
428 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
429 exec_mode, realloc_failures);
724 // its caller's stack by. If the caller is a compiled frame then
725 // we pretend that the callee has no parameters so that the
726 // extension counts for the full amount of locals and not just
727 // locals-parms. This is because without a c2i adapter the parm
728 // area as created by the compiled frame will not be usable by
729 // the interpreter. (Depending on the calling convention there
730 // may not even be enough space).
731
732 // QQQ I'd rather see this pushed down into last_frame_adjust
733 // and have it take the sender (aka caller).
734
735 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
736 caller_adjustment = last_frame_adjust(0, callee_locals);
737 } else if (callee_locals > callee_parameters) {
738 // The caller frame may need extending to accommodate
739 // non-parameter locals of the first unpacked interpreted frame.
740 // Compute that adjustment.
741 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
742 }
743
744 // If the sender is deoptimized we must retrieve the address of the handler
745 // since the frame will "magically" show the original pc before the deopt
746 // and we'd undo the deopt.
747
748 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
749 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
750 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
751 }
752
753 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
754
755 #if INCLUDE_JVMCI
756 if (exceptionObject() != nullptr) {
757 current->set_exception_oop(exceptionObject());
758 exec_mode = Unpack_exception;
759 }
760 #endif
761
762 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
763 assert(current->has_pending_exception(), "should have thrown OOME");
764 current->set_exception_oop(current->pending_exception());
1224 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int(), cache_init_error);
1225 default:;
1226 }
1227 }
1228 return nullptr;
1229 }
1230 #endif // INCLUDE_JVMCI
1231
1232 #if COMPILER2_OR_JVMCI
1233 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1234 Handle pending_exception(THREAD, thread->pending_exception());
1235 const char* exception_file = thread->exception_file();
1236 int exception_line = thread->exception_line();
1237 thread->clear_pending_exception();
1238
1239 bool failures = false;
1240
1241 for (int i = 0; i < objects->length(); i++) {
1242 assert(objects->at(i)->is_object(), "invalid debug information");
1243 ObjectValue* sv = (ObjectValue*) objects->at(i);
1244 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1245
1246 // Check if the object may be null and has an additional is_init input that needs
1247 // to be checked before using the field values. Skip re-allocation if it is null.
1248 if (sv->maybe_null()) {
1249 assert(k->is_inline_klass(), "must be an inline klass");
1250 intptr_t init_value = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_int();
1251 jint is_init = (jint)*((jint*)&init_value);
1252 if (is_init == 0) {
1253 continue;
1254 }
1255 }
1256
1257 oop obj = nullptr;
1258 bool cache_init_error = false;
1259 if (k->is_instance_klass()) {
1260 #if INCLUDE_JVMCI
1261 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1262 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1263 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1264 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1265 if (obj != nullptr) {
1266 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1267 abv->set_cached(true);
1268 } else if (cache_init_error) {
1269 // Results in an OOME which is valid (as opposed to a class initialization error)
1270 // and is fine for the rare case a cache initialization failing.
1271 failures = true;
1272 }
1273 }
1274 #endif // INCLUDE_JVMCI
1275
1276 InstanceKlass* ik = InstanceKlass::cast(k);
1277 if (obj == nullptr && !cache_init_error) {
1278 #ifdef COMPILER2
1279 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1280 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1281 } else {
1282 obj = ik->allocate_instance(THREAD);
1283 }
1284 #else
1285 obj = ik->allocate_instance(THREAD);
1286 #endif // COMPILER2
1287 }
1288 } else if (k->is_flatArray_klass()) {
1289 FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1290 // Inline type array must be zeroed because not all memory is reassigned
1291 obj = ak->allocate(sv->field_size(), THREAD);
1292 } else if (k->is_typeArray_klass()) {
1293 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1294 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1295 int len = sv->field_size() / type2size[ak->element_type()];
1296 obj = ak->allocate(len, THREAD);
1297 } else if (k->is_objArray_klass()) {
1298 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1299 obj = ak->allocate(sv->field_size(), THREAD);
1300 }
1301
1302 if (obj == nullptr) {
1303 failures = true;
1304 }
1305
1306 assert(sv->value().is_null(), "redundant reallocation");
1307 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1308 CLEAR_PENDING_EXCEPTION;
1309 sv->set_value(obj);
1310 }
1311
1312 if (failures) {
1313 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1314 } else if (pending_exception.not_null()) {
1315 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1316 }
1317
1318 return failures;
1319 }
1320
1321 // We're deoptimizing at the return of a call, inline type fields are
1322 // in registers. When we go back to the interpreter, it will expect a
1323 // reference to an inline type instance. Allocate and initialize it from
1324 // the register values here.
1325 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1326 oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1327 if (new_vt == nullptr) {
1328 CLEAR_PENDING_EXCEPTION;
1329 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1330 }
1331 return_oops.clear();
1332 return_oops.push(Handle(THREAD, new_vt));
1333 return false;
1334 }
1335
1336 #if INCLUDE_JVMCI
1337 /**
1338 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1339 * we need to somehow be able to recover the actual kind to be able to write the correct
1340 * amount of bytes.
1341 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1342 * the entries at index n + 1 to n + i are 'markers'.
1343 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1344 * expected form of the array would be:
1345 *
1346 * {b0, b1, b2, b3, INT, marker, b6, b7}
1347 *
1348 * Thus, in order to get back the size of the entry, we simply need to count the number
1349 * of marked entries
1350 *
1351 * @param virtualArray the virtualized byte array
1352 * @param i index of the virtual entry we are recovering
1353 * @return The number of bytes the entry spans
1354 */
1355 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1488 default:
1489 ShouldNotReachHere();
1490 }
1491 index++;
1492 }
1493 }
1494
1495 // restore fields of an eliminated object array
1496 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1497 for (int i = 0; i < sv->field_size(); i++) {
1498 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1499 assert(value->type() == T_OBJECT, "object element expected");
1500 obj->obj_at_put(i, value->get_obj()());
1501 }
1502 }
1503
1504 class ReassignedField {
1505 public:
1506 int _offset;
1507 BasicType _type;
1508 InstanceKlass* _klass;
1509 public:
1510 ReassignedField() {
1511 _offset = 0;
1512 _type = T_ILLEGAL;
1513 _klass = nullptr;
1514 }
1515 };
1516
1517 int compare(ReassignedField* left, ReassignedField* right) {
1518 return left->_offset - right->_offset;
1519 }
1520
1521 // Restore fields of an eliminated instance object using the same field order
1522 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1523 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1524 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1525 InstanceKlass* ik = klass;
1526 while (ik != nullptr) {
1527 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1528 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1529 ReassignedField field;
1530 field._offset = fs.offset();
1531 field._type = Signature::basic_type(fs.signature());
1532 if (fs.signature()->is_Q_signature()) {
1533 if (fs.is_inlined()) {
1534 // Resolve klass of flattened inline type field
1535 field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1536 } else {
1537 field._type = T_OBJECT;
1538 }
1539 }
1540 fields->append(field);
1541 }
1542 }
1543 ik = ik->superklass();
1544 }
1545 fields->sort(compare);
1546 for (int i = 0; i < fields->length(); i++) {
1547 BasicType type = fields->at(i)._type;
1548 int offset = base_offset + fields->at(i)._offset;
1549 // Check for flattened inline type field before accessing the ScopeValue because it might not have any fields
1550 if (type == T_PRIMITIVE_OBJECT) {
1551 // Recursively re-assign flattened inline type fields
1552 InstanceKlass* vk = fields->at(i)._klass;
1553 assert(vk != nullptr, "must be resolved");
1554 offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1555 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1556 continue; // Continue because we don't need to increment svIndex
1557 }
1558 intptr_t val;
1559 ScopeValue* scope_field = sv->field_at(svIndex);
1560 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1561 switch (type) {
1562 case T_OBJECT:
1563 case T_ARRAY:
1564 assert(value->type() == T_OBJECT, "Agreement.");
1565 obj->obj_field_put(offset, value->get_obj()());
1566 break;
1567
1568 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1569 case T_INT: case T_FLOAT: { // 4 bytes.
1570 assert(value->type() == T_INT, "Agreement.");
1571 bool big_value = false;
1572 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1573 if (scope_field->is_location()) {
1574 Location::Type type = ((LocationValue*) scope_field)->location().type();
1575 if (type == Location::dbl || type == Location::lng) {
1576 big_value = true;
1577 }
1578 }
1579 if (scope_field->is_constant_int()) {
1580 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1581 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1582 big_value = true;
1583 }
1623 case T_BYTE:
1624 assert(value->type() == T_INT, "Agreement.");
1625 val = value->get_int();
1626 obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1627 break;
1628
1629 case T_BOOLEAN:
1630 assert(value->type() == T_INT, "Agreement.");
1631 val = value->get_int();
1632 obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1633 break;
1634
1635 default:
1636 ShouldNotReachHere();
1637 }
1638 svIndex++;
1639 }
1640 return svIndex;
1641 }
1642
1643 // restore fields of an eliminated inline type array
1644 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1645 InlineKlass* vk = vak->element_klass();
1646 assert(vk->flatten_array(), "should only be used for flattened inline type arrays");
1647 // Adjust offset to omit oop header
1648 int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1649 // Initialize all elements of the flattened inline type array
1650 for (int i = 0; i < sv->field_size(); i++) {
1651 ScopeValue* val = sv->field_at(i);
1652 int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1653 reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1654 }
1655 }
1656
1657 // restore fields of all eliminated objects and arrays
1658 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1659 for (int i = 0; i < objects->length(); i++) {
1660 ObjectValue* sv = (ObjectValue*) objects->at(i);
1661 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1662 Handle obj = sv->value();
1663 assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1664 #ifndef PRODUCT
1665 if (PrintDeoptimizationDetails) {
1666 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1667 }
1668 #endif // !PRODUCT
1669
1670 if (obj.is_null()) {
1671 continue;
1672 }
1673
1674 #if INCLUDE_JVMCI
1675 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1676 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1677 continue;
1678 }
1679 #endif // INCLUDE_JVMCI
1680 #ifdef COMPILER2
1681 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1682 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1683 ScopeValue* payload = sv->field_at(0);
1684 if (payload->is_location() &&
1685 payload->as_LocationValue()->location().type() == Location::vector) {
1686 #ifndef PRODUCT
1687 if (PrintDeoptimizationDetails) {
1688 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1689 if (Verbose) {
1690 Handle obj = sv->value();
1691 k->oop_print_on(obj(), tty);
1692 }
1693 }
1694 #endif // !PRODUCT
1695 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1696 }
1697 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1698 // which could be restored after vector object allocation.
1699 }
1700 #endif /* !COMPILER2 */
1701 if (k->is_instance_klass()) {
1702 InstanceKlass* ik = InstanceKlass::cast(k);
1703 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1704 } else if (k->is_flatArray_klass()) {
1705 FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1706 reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1707 } else if (k->is_typeArray_klass()) {
1708 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1709 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1710 } else if (k->is_objArray_klass()) {
1711 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1712 }
1713 }
1714 }
1715
1716
1717 // relock objects for which synchronization was eliminated
1718 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1719 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1720 bool relocked_objects = false;
1721 for (int i = 0; i < monitors->length(); i++) {
1722 MonitorInfo* mon_info = monitors->at(i);
1723 if (mon_info->eliminated()) {
1724 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1725 relocked_objects = true;
1726 if (!mon_info->owner_is_scalar_replaced()) {
1846 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1847 cm->log_identity(xtty);
1848 xtty->end_head();
1849 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1850 xtty->begin_elem("jvms bci='%d'", sd->bci());
1851 xtty->method(sd->method());
1852 xtty->end_elem();
1853 if (sd->is_top()) break;
1854 }
1855 xtty->tail("deoptimized");
1856 }
1857
1858 Continuation::notify_deopt(thread, fr.sp());
1859
1860 // Patch the compiled method so that when execution returns to it we will
1861 // deopt the execution state and return to the interpreter.
1862 fr.deoptimize(thread);
1863 }
1864
1865 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1866 // Deoptimize only if the frame comes from compiled code.
1867 // Do not deoptimize the frame which is already patched
1868 // during the execution of the loops below.
1869 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1870 return;
1871 }
1872 ResourceMark rm;
1873 deoptimize_single_frame(thread, fr, reason);
1874 }
1875
1876 #if INCLUDE_JVMCI
1877 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1878 // there is no exception handler for this pc => deoptimize
1879 cm->make_not_entrant();
1880
1881 // Use Deoptimization::deoptimize for all of its side-effects:
1882 // gathering traps statistics, logging...
1883 // it also patches the return pc but we do not care about that
1884 // since we return a continuation to the deopt_blob below.
1885 JavaThread* thread = JavaThread::current();
1886 RegisterMap reg_map(thread,
|