< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  26 #include "jvm.h"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/oopFactory.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/constantPool.hpp"


  46 #include "oops/method.hpp"
  47 #include "oops/objArrayKlass.hpp"
  48 #include "oops/objArrayOop.inline.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/fieldStreams.inline.hpp"

  51 #include "oops/typeArrayOop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "prims/jvmtiDeferredUpdates.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/vectorSupport.hpp"
  57 #include "prims/methodHandles.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/deoptimization.hpp"
  60 #include "runtime/escapeBarrier.hpp"
  61 #include "runtime/fieldDescriptor.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/jniHandles.inline.hpp"
  67 #include "runtime/keepStackGCProcessed.hpp"
  68 #include "runtime/objectMonitor.inline.hpp"
  69 #include "runtime/osThread.hpp"
  70 #include "runtime/safepointVerifiers.hpp"

 176 
 177   return fetch_unroll_info_helper(current, exec_mode);
 178 JRT_END
 179 
 180 #if COMPILER2_OR_JVMCI
 181 // print information about reallocated objects
 182 static void print_objects(JavaThread* deoptee_thread,
 183                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 184   ResourceMark rm;
 185   stringStream st;  // change to logStream with logging
 186   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 187   fieldDescriptor fd;
 188 
 189   for (int i = 0; i < objects->length(); i++) {
 190     ObjectValue* sv = (ObjectValue*) objects->at(i);
 191     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 192     Handle obj = sv->value();
 193 
 194     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 195     k->print_value_on(&st);
 196     assert(obj.not_null() || realloc_failures, "reallocation was missed");
 197     if (obj.is_null()) {
 198       st.print(" allocation failed");




 199     } else {
 200       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 201     }
 202     st.cr();
 203 
 204     if (Verbose && !obj.is_null()) {
 205       k->oop_print_on(obj(), &st);
 206     }
 207   }
 208   tty->print_raw(st.as_string());
 209 }
 210 
 211 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 212                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 213                                   bool& deoptimized_objects) {
 214   bool realloc_failures = false;
 215   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 216 
 217   JavaThread* deoptee_thread = chunk->at(0)->thread();
 218   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 219          "a frame can only be deoptimized by the owner thread");
 220 
 221   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 222 
 223   // The flag return_oop() indicates call sites which return oop
 224   // in compiled code. Such sites include java method calls,
 225   // runtime calls (for example, used to allocate new objects/arrays
 226   // on slow code path) and any other calls generated in compiled code.
 227   // It is not guaranteed that we can get such information here only
 228   // by analyzing bytecode in deoptimized frames. This is why this flag
 229   // is set during method compilation (see Compile::Process_OopMap_Node()).
 230   // If the previous frame was popped or if we are dispatching an exception,
 231   // we don't have an oop result.
 232   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 233   Handle return_value;











 234   if (save_oop_result) {
 235     // Reallocation may trigger GC. If deoptimization happened on return from
 236     // call which returns oop we need to save it since it is not in oopmap.
 237     oop result = deoptee.saved_oop_result(&map);
 238     assert(oopDesc::is_oop_or_null(result), "must be oop");
 239     return_value = Handle(thread, result);
 240     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 241     if (TraceDeoptimization) {
 242       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 243       tty->cr();
 244     }
 245   }
 246   if (objects != NULL) {
 247     if (exec_mode == Deoptimization::Unpack_none) {
 248       assert(thread->thread_state() == _thread_in_vm, "assumption");
 249       JavaThread* THREAD = thread; // For exception macros.
 250       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 251       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));







 252       deoptimized_objects = true;
 253     } else {
 254       JavaThread* current = thread; // For JRT_BLOCK
 255       JRT_BLOCK
 256       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);







 257       JRT_END
 258     }
 259     bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 260     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 261     if (TraceDeoptimization) {
 262       print_objects(deoptee_thread, objects, realloc_failures);
 263     }
 264   }
 265   if (save_oop_result) {
 266     // Restore result.
 267     deoptee.set_saved_oop_result(&map, return_value());

 268   }
 269   return realloc_failures;
 270 }
 271 
 272 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 273                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 274   JavaThread* deoptee_thread = chunk->at(0)->thread();
 275   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 276   assert(thread == Thread::current(), "should be");
 277   HandleMark hm(thread);
 278 #ifndef PRODUCT
 279   bool first = true;
 280 #endif // !PRODUCT
 281   for (int i = 0; i < chunk->length(); i++) {
 282     compiledVFrame* cvf = chunk->at(i);
 283     assert (cvf->scope() != NULL,"expect only compiled java frames");
 284     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 285     if (monitors->is_nonempty()) {
 286       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 287                                                      exec_mode, realloc_failures);

 574   // its caller's stack by. If the caller is a compiled frame then
 575   // we pretend that the callee has no parameters so that the
 576   // extension counts for the full amount of locals and not just
 577   // locals-parms. This is because without a c2i adapter the parm
 578   // area as created by the compiled frame will not be usable by
 579   // the interpreter. (Depending on the calling convention there
 580   // may not even be enough space).
 581 
 582   // QQQ I'd rather see this pushed down into last_frame_adjust
 583   // and have it take the sender (aka caller).
 584 
 585   if (deopt_sender.is_compiled_caller() || caller_was_method_handle) {
 586     caller_adjustment = last_frame_adjust(0, callee_locals);
 587   } else if (callee_locals > callee_parameters) {
 588     // The caller frame may need extending to accommodate
 589     // non-parameter locals of the first unpacked interpreted frame.
 590     // Compute that adjustment.
 591     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 592   }
 593 
 594   // If the sender is deoptimized the we must retrieve the address of the handler
 595   // since the frame will "magically" show the original pc before the deopt
 596   // and we'd undo the deopt.
 597 
 598   frame_pcs[0] = deopt_sender.raw_pc();
 599 
 600   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 601 
 602 #if INCLUDE_JVMCI
 603   if (exceptionObject() != NULL) {
 604     current->set_exception_oop(exceptionObject());
 605     exec_mode = Unpack_exception;
 606   }
 607 #endif
 608 
 609   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 610     assert(current->has_pending_exception(), "should have thrown OOME");
 611     current->set_exception_oop(current->pending_exception());
 612     current->clear_pending_exception();
 613     exec_mode = Unpack_exception;
 614   }

1049        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1050        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1051        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1052        default:;
1053      }
1054    }
1055    return NULL;
1056 }
1057 
1058 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1059   Handle pending_exception(THREAD, thread->pending_exception());
1060   const char* exception_file = thread->exception_file();
1061   int exception_line = thread->exception_line();
1062   thread->clear_pending_exception();
1063 
1064   bool failures = false;
1065 
1066   for (int i = 0; i < objects->length(); i++) {
1067     assert(objects->at(i)->is_object(), "invalid debug information");
1068     ObjectValue* sv = (ObjectValue*) objects->at(i);
1069 
1070     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1071     oop obj = NULL;
1072 












1073     if (k->is_instance_klass()) {
1074       if (sv->is_auto_box()) {
1075         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1076         obj = get_cached_box(abv, fr, reg_map, THREAD);
1077         if (obj != NULL) {
1078           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1079           abv->set_cached(true);
1080         }
1081       }
1082 
1083       InstanceKlass* ik = InstanceKlass::cast(k);
1084       if (obj == NULL) {
1085 #ifdef COMPILER2
1086         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1087           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1088         } else {
1089           obj = ik->allocate_instance(THREAD);
1090         }
1091 #else
1092         obj = ik->allocate_instance(THREAD);
1093 #endif // COMPILER2
1094       }




1095     } else if (k->is_typeArray_klass()) {
1096       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1097       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1098       int len = sv->field_size() / type2size[ak->element_type()];
1099       obj = ak->allocate(len, THREAD);
1100     } else if (k->is_objArray_klass()) {
1101       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1102       obj = ak->allocate(sv->field_size(), THREAD);
1103     }
1104 
1105     if (obj == NULL) {
1106       failures = true;
1107     }
1108 
1109     assert(sv->value().is_null(), "redundant reallocation");
1110     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1111     CLEAR_PENDING_EXCEPTION;
1112     sv->set_value(obj);
1113   }
1114 
1115   if (failures) {
1116     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1117   } else if (pending_exception.not_null()) {
1118     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1119   }
1120 
1121   return failures;
1122 }
1123 















1124 #if INCLUDE_JVMCI
1125 /**
1126  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1127  * we need to somehow be able to recover the actual kind to be able to write the correct
1128  * amount of bytes.
1129  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1130  * the entries at index n + 1 to n + i are 'markers'.
1131  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1132  * expected form of the array would be:
1133  *
1134  * {b0, b1, b2, b3, INT, marker, b6, b7}
1135  *
1136  * Thus, in order to get back the size of the entry, we simply need to count the number
1137  * of marked entries
1138  *
1139  * @param virtualArray the virtualized byte array
1140  * @param i index of the virtual entry we are recovering
1141  * @return The number of bytes the entry spans
1142  */
1143 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1276       default:
1277         ShouldNotReachHere();
1278     }
1279     index++;
1280   }
1281 }
1282 
1283 // restore fields of an eliminated object array
1284 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1285   for (int i = 0; i < sv->field_size(); i++) {
1286     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1287     assert(value->type() == T_OBJECT, "object element expected");
1288     obj->obj_at_put(i, value->get_obj()());
1289   }
1290 }
1291 
1292 class ReassignedField {
1293 public:
1294   int _offset;
1295   BasicType _type;

1296 public:
1297   ReassignedField() {
1298     _offset = 0;
1299     _type = T_ILLEGAL;

1300   }
1301 };
1302 
1303 int compare(ReassignedField* left, ReassignedField* right) {
1304   return left->_offset - right->_offset;
1305 }
1306 
1307 // Restore fields of an eliminated instance object using the same field order
1308 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1309 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1310   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1311   InstanceKlass* ik = klass;
1312   while (ik != NULL) {
1313     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1314       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1315         ReassignedField field;
1316         field._offset = fs.offset();
1317         field._type = Signature::basic_type(fs.signature());








1318         fields->append(field);
1319       }
1320     }
1321     ik = ik->superklass();
1322   }
1323   fields->sort(compare);
1324   for (int i = 0; i < fields->length(); i++) {











1325     intptr_t val;
1326     ScopeValue* scope_field = sv->field_at(svIndex);
1327     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1328     int offset = fields->at(i)._offset;
1329     BasicType type = fields->at(i)._type;
1330     switch (type) {
1331       case T_OBJECT: case T_ARRAY:

1332         assert(value->type() == T_OBJECT, "Agreement.");
1333         obj->obj_field_put(offset, value->get_obj()());
1334         break;
1335 
1336       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1337       case T_INT: case T_FLOAT: { // 4 bytes.
1338         assert(value->type() == T_INT, "Agreement.");
1339         bool big_value = false;
1340         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1341           if (scope_field->is_location()) {
1342             Location::Type type = ((LocationValue*) scope_field)->location().type();
1343             if (type == Location::dbl || type == Location::lng) {
1344               big_value = true;
1345             }
1346           }
1347           if (scope_field->is_constant_int()) {
1348             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1349             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1350               big_value = true;
1351             }

1391       case T_BYTE:
1392         assert(value->type() == T_INT, "Agreement.");
1393         val = value->get_int();
1394         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1395         break;
1396 
1397       case T_BOOLEAN:
1398         assert(value->type() == T_INT, "Agreement.");
1399         val = value->get_int();
1400         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1401         break;
1402 
1403       default:
1404         ShouldNotReachHere();
1405     }
1406     svIndex++;
1407   }
1408   return svIndex;
1409 }
1410 














1411 // restore fields of all eliminated objects and arrays
1412 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1413   for (int i = 0; i < objects->length(); i++) {
1414     ObjectValue* sv = (ObjectValue*) objects->at(i);
1415     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1416     Handle obj = sv->value();
1417     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1418 #ifndef PRODUCT
1419     if (PrintDeoptimizationDetails) {
1420       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1421     }
1422 #endif // !PRODUCT
1423 
1424     if (obj.is_null()) {
1425       continue;
1426     }
1427 
1428     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1429     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1430       continue;
1431     }
1432 #ifdef COMPILER2
1433     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1434       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1435       ScopeValue* payload = sv->field_at(0);
1436       if (payload->is_location() &&
1437           payload->as_LocationValue()->location().type() == Location::vector) {
1438 #ifndef PRODUCT
1439         if (PrintDeoptimizationDetails) {
1440           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1441           if (Verbose) {
1442             Handle obj = sv->value();
1443             k->oop_print_on(obj(), tty);
1444           }
1445         }
1446 #endif // !PRODUCT
1447         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1448       }
1449       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1450       // which could be restored after vector object allocation.
1451     }
1452 #endif /* !COMPILER2 */
1453     if (k->is_instance_klass()) {
1454       InstanceKlass* ik = InstanceKlass::cast(k);
1455       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);



1456     } else if (k->is_typeArray_klass()) {
1457       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1458       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1459     } else if (k->is_objArray_klass()) {
1460       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1461     }
1462   }
1463 }
1464 
1465 
1466 // relock objects for which synchronization was eliminated
1467 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1468                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1469   bool relocked_objects = false;
1470   for (int i = 0; i < monitors->length(); i++) {
1471     MonitorInfo* mon_info = monitors->at(i);
1472     if (mon_info->eliminated()) {
1473       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1474       relocked_objects = true;
1475       if (!mon_info->owner_is_scalar_replaced()) {

1593 
1594     ttyLocker ttyl;
1595     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1596     cm->log_identity(xtty);
1597     xtty->end_head();
1598     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1599       xtty->begin_elem("jvms bci='%d'", sd->bci());
1600       xtty->method(sd->method());
1601       xtty->end_elem();
1602       if (sd->is_top())  break;
1603     }
1604     xtty->tail("deoptimized");
1605   }
1606 
1607   // Patch the compiled method so that when execution returns to it we will
1608   // deopt the execution state and return to the interpreter.
1609   fr.deoptimize(thread);
1610 }
1611 
1612 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1613   // Deoptimize only if the frame comes from compile code.
1614   // Do not deoptimize the frame which is already patched
1615   // during the execution of the loops below.
1616   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1617     return;
1618   }
1619   ResourceMark rm;
1620   DeoptimizationMarker dm;
1621   deoptimize_single_frame(thread, fr, reason);
1622 }
1623 
1624 #if INCLUDE_JVMCI
1625 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1626   // there is no exception handler for this pc => deoptimize
1627   cm->make_not_entrant();
1628 
1629   // Use Deoptimization::deoptimize for all of its side-effects:
1630   // gathering traps statistics, logging...
1631   // it also patches the return pc but we do not care about that
1632   // since we return a continuation to the deopt_blob below.
1633   JavaThread* thread = JavaThread::current();

  26 #include "jvm.h"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/oopFactory.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/constantPool.hpp"
  46 #include "oops/flatArrayKlass.hpp"
  47 #include "oops/flatArrayOop.hpp"
  48 #include "oops/method.hpp"
  49 #include "oops/objArrayKlass.hpp"
  50 #include "oops/objArrayOop.inline.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/fieldStreams.inline.hpp"
  53 #include "oops/inlineKlass.inline.hpp"
  54 #include "oops/typeArrayOop.inline.hpp"
  55 #include "oops/verifyOopClosure.hpp"
  56 #include "prims/jvmtiDeferredUpdates.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "prims/jvmtiThreadState.hpp"
  59 #include "prims/vectorSupport.hpp"
  60 #include "prims/methodHandles.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/deoptimization.hpp"
  63 #include "runtime/escapeBarrier.hpp"
  64 #include "runtime/fieldDescriptor.hpp"
  65 #include "runtime/fieldDescriptor.inline.hpp"
  66 #include "runtime/frame.inline.hpp"
  67 #include "runtime/handles.inline.hpp"
  68 #include "runtime/interfaceSupport.inline.hpp"
  69 #include "runtime/jniHandles.inline.hpp"
  70 #include "runtime/keepStackGCProcessed.hpp"
  71 #include "runtime/objectMonitor.inline.hpp"
  72 #include "runtime/osThread.hpp"
  73 #include "runtime/safepointVerifiers.hpp"

 179 
 180   return fetch_unroll_info_helper(current, exec_mode);
 181 JRT_END
 182 
 183 #if COMPILER2_OR_JVMCI
 184 // print information about reallocated objects
 185 static void print_objects(JavaThread* deoptee_thread,
 186                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 187   ResourceMark rm;
 188   stringStream st;  // change to logStream with logging
 189   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 190   fieldDescriptor fd;
 191 
 192   for (int i = 0; i < objects->length(); i++) {
 193     ObjectValue* sv = (ObjectValue*) objects->at(i);
 194     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 195     Handle obj = sv->value();
 196 
 197     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 198     k->print_value_on(&st);
 199     assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
 200     if (obj.is_null()) {
 201       if (k->is_inline_klass()) {
 202         st.print(" is null");
 203       } else {
 204         st.print(" allocation failed");
 205       }
 206     } else {
 207       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 208     }
 209     st.cr();
 210 
 211     if (Verbose && !obj.is_null()) {
 212       k->oop_print_on(obj(), &st);
 213     }
 214   }
 215   tty->print_raw(st.as_string());
 216 }
 217 
 218 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 219                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 220                                   bool& deoptimized_objects) {
 221   bool realloc_failures = false;
 222   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 223 
 224   JavaThread* deoptee_thread = chunk->at(0)->thread();
 225   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 226          "a frame can only be deoptimized by the owner thread");
 227 
 228   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 229 
 230   // The flag return_oop() indicates call sites which return oop
 231   // in compiled code. Such sites include java method calls,
 232   // runtime calls (for example, used to allocate new objects/arrays
 233   // on slow code path) and any other calls generated in compiled code.
 234   // It is not guaranteed that we can get such information here only
 235   // by analyzing bytecode in deoptimized frames. This is why this flag
 236   // is set during method compilation (see Compile::Process_OopMap_Node()).
 237   // If the previous frame was popped or if we are dispatching an exception,
 238   // we don't have an oop result.
 239   ScopeDesc* scope = chunk->at(0)->scope();
 240   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 241   // In case of the return of multiple values, we must take care
 242   // of all oop return values.
 243   GrowableArray<Handle> return_oops;
 244   InlineKlass* vk = NULL;
 245   if (save_oop_result && scope->return_scalarized()) {
 246     vk = InlineKlass::returned_inline_klass(map);
 247     if (vk != NULL) {
 248       vk->save_oop_fields(map, return_oops);
 249       save_oop_result = false;
 250     }
 251   }
 252   if (save_oop_result) {
 253     // Reallocation may trigger GC. If deoptimization happened on return from
 254     // call which returns oop we need to save it since it is not in oopmap.
 255     oop result = deoptee.saved_oop_result(&map);
 256     assert(oopDesc::is_oop_or_null(result), "must be oop");
 257     return_oops.push(Handle(thread, result));
 258     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 259     if (TraceDeoptimization) {
 260       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 261       tty->cr();
 262     }
 263   }
 264   if (objects != NULL || vk != NULL) {
 265     if (exec_mode == Deoptimization::Unpack_none) {
 266       assert(thread->thread_state() == _thread_in_vm, "assumption");
 267       JavaThread* THREAD = thread; // For exception macros.
 268       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 269       if (vk != NULL) {
 270         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 271       }
 272       if (objects != NULL) {
 273         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 274         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 275         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
 276       }
 277       deoptimized_objects = true;
 278     } else {
 279       JavaThread* current = thread; // For JRT_BLOCK
 280       JRT_BLOCK
 281       if (vk != NULL) {
 282         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 283       }
 284       if (objects != NULL) {
 285         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 286         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 287         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
 288       }
 289       JRT_END
 290     }


 291     if (TraceDeoptimization) {
 292       print_objects(deoptee_thread, objects, realloc_failures);
 293     }
 294   }
 295   if (save_oop_result || vk != NULL) {
 296     // Restore result.
 297     assert(return_oops.length() == 1, "no inline type");
 298     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 299   }
 300   return realloc_failures;
 301 }
 302 
 303 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 304                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 305   JavaThread* deoptee_thread = chunk->at(0)->thread();
 306   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 307   assert(thread == Thread::current(), "should be");
 308   HandleMark hm(thread);
 309 #ifndef PRODUCT
 310   bool first = true;
 311 #endif // !PRODUCT
 312   for (int i = 0; i < chunk->length(); i++) {
 313     compiledVFrame* cvf = chunk->at(i);
 314     assert (cvf->scope() != NULL,"expect only compiled java frames");
 315     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 316     if (monitors->is_nonempty()) {
 317       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 318                                                      exec_mode, realloc_failures);

 605   // its caller's stack by. If the caller is a compiled frame then
 606   // we pretend that the callee has no parameters so that the
 607   // extension counts for the full amount of locals and not just
 608   // locals-parms. This is because without a c2i adapter the parm
 609   // area as created by the compiled frame will not be usable by
 610   // the interpreter. (Depending on the calling convention there
 611   // may not even be enough space).
 612 
 613   // QQQ I'd rather see this pushed down into last_frame_adjust
 614   // and have it take the sender (aka caller).
 615 
 616   if (deopt_sender.is_compiled_caller() || caller_was_method_handle) {
 617     caller_adjustment = last_frame_adjust(0, callee_locals);
 618   } else if (callee_locals > callee_parameters) {
 619     // The caller frame may need extending to accommodate
 620     // non-parameter locals of the first unpacked interpreted frame.
 621     // Compute that adjustment.
 622     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 623   }
 624 
 625   // If the sender is deoptimized we must retrieve the address of the handler
 626   // since the frame will "magically" show the original pc before the deopt
 627   // and we'd undo the deopt.
 628 
 629   frame_pcs[0] = deopt_sender.raw_pc();
 630 
 631   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 632 
 633 #if INCLUDE_JVMCI
 634   if (exceptionObject() != NULL) {
 635     current->set_exception_oop(exceptionObject());
 636     exec_mode = Unpack_exception;
 637   }
 638 #endif
 639 
 640   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 641     assert(current->has_pending_exception(), "should have thrown OOME");
 642     current->set_exception_oop(current->pending_exception());
 643     current->clear_pending_exception();
 644     exec_mode = Unpack_exception;
 645   }

1080        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1081        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1082        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1083        default:;
1084      }
1085    }
1086    return NULL;
1087 }
1088 
1089 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1090   Handle pending_exception(THREAD, thread->pending_exception());
1091   const char* exception_file = thread->exception_file();
1092   int exception_line = thread->exception_line();
1093   thread->clear_pending_exception();
1094 
1095   bool failures = false;
1096 
1097   for (int i = 0; i < objects->length(); i++) {
1098     assert(objects->at(i)->is_object(), "invalid debug information");
1099     ObjectValue* sv = (ObjectValue*) objects->at(i);

1100     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1101 
1102     // Check if the object may be null and has an additional is_init input that needs
1103     // to be checked before using the field values. Skip re-allocation if it is null.
1104     if (sv->maybe_null()) {
1105       assert(k->is_inline_klass(), "must be an inline klass");
1106       intptr_t init_value = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_int();
1107       jint is_init = (jint)*((jint*)&init_value);
1108       if (is_init == 0) {
1109         continue;
1110       }
1111     }
1112 
1113     oop obj = NULL;
1114     if (k->is_instance_klass()) {
1115       if (sv->is_auto_box()) {
1116         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1117         obj = get_cached_box(abv, fr, reg_map, THREAD);
1118         if (obj != NULL) {
1119           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1120           abv->set_cached(true);
1121         }
1122       }
1123 
1124       InstanceKlass* ik = InstanceKlass::cast(k);
1125       if (obj == NULL) {
1126 #ifdef COMPILER2
1127         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1128           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1129         } else {
1130           obj = ik->allocate_instance(THREAD);
1131         }
1132 #else
1133         obj = ik->allocate_instance(THREAD);
1134 #endif // COMPILER2
1135       }
1136     } else if (k->is_flatArray_klass()) {
1137       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1138       // Inline type array must be zeroed because not all memory is reassigned
1139       obj = ak->allocate(sv->field_size(), THREAD);
1140     } else if (k->is_typeArray_klass()) {
1141       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1142       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1143       int len = sv->field_size() / type2size[ak->element_type()];
1144       obj = ak->allocate(len, THREAD);
1145     } else if (k->is_objArray_klass()) {
1146       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1147       obj = ak->allocate(sv->field_size(), THREAD);
1148     }
1149 
1150     if (obj == NULL) {
1151       failures = true;
1152     }
1153 
1154     assert(sv->value().is_null(), "redundant reallocation");
1155     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1156     CLEAR_PENDING_EXCEPTION;
1157     sv->set_value(obj);
1158   }
1159 
1160   if (failures) {
1161     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1162   } else if (pending_exception.not_null()) {
1163     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1164   }
1165 
1166   return failures;
1167 }
1168 
1169 // We're deoptimizing at the return of a call, inline type fields are
1170 // in registers. When we go back to the interpreter, it will expect a
1171 // reference to an inline type instance. Allocate and initialize it from
1172 // the register values here.
1173 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1174   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1175   if (new_vt == NULL) {
1176     CLEAR_PENDING_EXCEPTION;
1177     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1178   }
1179   return_oops.clear();
1180   return_oops.push(Handle(THREAD, new_vt));
1181   return false;
1182 }
1183 
1184 #if INCLUDE_JVMCI
1185 /**
1186  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1187  * we need to somehow be able to recover the actual kind to be able to write the correct
1188  * amount of bytes.
1189  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1190  * the entries at index n + 1 to n + i are 'markers'.
1191  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1192  * expected form of the array would be:
1193  *
1194  * {b0, b1, b2, b3, INT, marker, b6, b7}
1195  *
1196  * Thus, in order to get back the size of the entry, we simply need to count the number
1197  * of marked entries
1198  *
1199  * @param virtualArray the virtualized byte array
1200  * @param i index of the virtual entry we are recovering
1201  * @return The number of bytes the entry spans
1202  */
1203 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1336       default:
1337         ShouldNotReachHere();
1338     }
1339     index++;
1340   }
1341 }
1342 
1343 // restore fields of an eliminated object array
1344 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1345   for (int i = 0; i < sv->field_size(); i++) {
1346     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1347     assert(value->type() == T_OBJECT, "object element expected");
1348     obj->obj_at_put(i, value->get_obj()());
1349   }
1350 }
1351 
1352 class ReassignedField {
1353 public:
1354   int _offset;
1355   BasicType _type;
1356   InstanceKlass* _klass;
1357 public:
1358   ReassignedField() {
1359     _offset = 0;
1360     _type = T_ILLEGAL;
1361     _klass = NULL;
1362   }
1363 };
1364 
1365 int compare(ReassignedField* left, ReassignedField* right) {
1366   return left->_offset - right->_offset;
1367 }
1368 
1369 // Restore fields of an eliminated instance object using the same field order
1370 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1371 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1372   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1373   InstanceKlass* ik = klass;
1374   while (ik != NULL) {
1375     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1376       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1377         ReassignedField field;
1378         field._offset = fs.offset();
1379         field._type = Signature::basic_type(fs.signature());
1380         if (fs.signature()->is_Q_signature()) {
1381           if (fs.is_inlined()) {
1382             // Resolve klass of flattened inline type field
1383             field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1384           } else {
1385             field._type = T_OBJECT;
1386           }
1387         }
1388         fields->append(field);
1389       }
1390     }
1391     ik = ik->superklass();
1392   }
1393   fields->sort(compare);
1394   for (int i = 0; i < fields->length(); i++) {
1395     BasicType type = fields->at(i)._type;
1396     int offset = base_offset + fields->at(i)._offset;
1397     // Check for flattened inline type field before accessing the ScopeValue because it might not have any fields
1398     if (type == T_PRIMITIVE_OBJECT) {
1399       // Recursively re-assign flattened inline type fields
1400       InstanceKlass* vk = fields->at(i)._klass;
1401       assert(vk != NULL, "must be resolved");
1402       offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1403       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1404       continue; // Continue because we don't need to increment svIndex
1405     }
1406     intptr_t val;
1407     ScopeValue* scope_field = sv->field_at(svIndex);
1408     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1409     switch (type) {
1410       case T_OBJECT:
1411       case T_ARRAY:
1412         assert(value->type() == T_OBJECT, "Agreement.");
1413         obj->obj_field_put(offset, value->get_obj()());
1414         break;
1415 
1416       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1417       case T_INT: case T_FLOAT: { // 4 bytes.
1418         assert(value->type() == T_INT, "Agreement.");
1419         bool big_value = false;
1420         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1421           if (scope_field->is_location()) {
1422             Location::Type type = ((LocationValue*) scope_field)->location().type();
1423             if (type == Location::dbl || type == Location::lng) {
1424               big_value = true;
1425             }
1426           }
1427           if (scope_field->is_constant_int()) {
1428             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1429             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1430               big_value = true;
1431             }

1471       case T_BYTE:
1472         assert(value->type() == T_INT, "Agreement.");
1473         val = value->get_int();
1474         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1475         break;
1476 
1477       case T_BOOLEAN:
1478         assert(value->type() == T_INT, "Agreement.");
1479         val = value->get_int();
1480         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1481         break;
1482 
1483       default:
1484         ShouldNotReachHere();
1485     }
1486     svIndex++;
1487   }
1488   return svIndex;
1489 }
1490 
1491 // restore fields of an eliminated inline type array
1492 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1493   InlineKlass* vk = vak->element_klass();
1494   assert(vk->flatten_array(), "should only be used for flattened inline type arrays");
1495   // Adjust offset to omit oop header
1496   int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1497   // Initialize all elements of the flattened inline type array
1498   for (int i = 0; i < sv->field_size(); i++) {
1499     ScopeValue* val = sv->field_at(i);
1500     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1501     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1502   }
1503 }
1504 
1505 // restore fields of all eliminated objects and arrays
1506 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1507   for (int i = 0; i < objects->length(); i++) {
1508     ObjectValue* sv = (ObjectValue*) objects->at(i);
1509     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1510     Handle obj = sv->value();
1511     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1512 #ifndef PRODUCT
1513     if (PrintDeoptimizationDetails) {
1514       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1515     }
1516 #endif // !PRODUCT
1517 
1518     if (obj.is_null()) {
1519       continue;
1520     }
1521 
1522     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1523     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1524       continue;
1525     }
1526 #ifdef COMPILER2
1527     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1528       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1529       ScopeValue* payload = sv->field_at(0);
1530       if (payload->is_location() &&
1531           payload->as_LocationValue()->location().type() == Location::vector) {
1532 #ifndef PRODUCT
1533         if (PrintDeoptimizationDetails) {
1534           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1535           if (Verbose) {
1536             Handle obj = sv->value();
1537             k->oop_print_on(obj(), tty);
1538           }
1539         }
1540 #endif // !PRODUCT
1541         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1542       }
1543       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1544       // which could be restored after vector object allocation.
1545     }
1546 #endif /* !COMPILER2 */
1547     if (k->is_instance_klass()) {
1548       InstanceKlass* ik = InstanceKlass::cast(k);
1549       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1550     } else if (k->is_flatArray_klass()) {
1551       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1552       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1553     } else if (k->is_typeArray_klass()) {
1554       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1555       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1556     } else if (k->is_objArray_klass()) {
1557       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1558     }
1559   }
1560 }
1561 
1562 
1563 // relock objects for which synchronization was eliminated
1564 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1565                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1566   bool relocked_objects = false;
1567   for (int i = 0; i < monitors->length(); i++) {
1568     MonitorInfo* mon_info = monitors->at(i);
1569     if (mon_info->eliminated()) {
1570       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1571       relocked_objects = true;
1572       if (!mon_info->owner_is_scalar_replaced()) {

1690 
1691     ttyLocker ttyl;
1692     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1693     cm->log_identity(xtty);
1694     xtty->end_head();
1695     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1696       xtty->begin_elem("jvms bci='%d'", sd->bci());
1697       xtty->method(sd->method());
1698       xtty->end_elem();
1699       if (sd->is_top())  break;
1700     }
1701     xtty->tail("deoptimized");
1702   }
1703 
1704   // Patch the compiled method so that when execution returns to it we will
1705   // deopt the execution state and return to the interpreter.
1706   fr.deoptimize(thread);
1707 }
1708 
1709 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1710   // Deoptimize only if the frame comes from compiled code.
1711   // Do not deoptimize the frame which is already patched
1712   // during the execution of the loops below.
1713   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1714     return;
1715   }
1716   ResourceMark rm;
1717   DeoptimizationMarker dm;
1718   deoptimize_single_frame(thread, fr, reason);
1719 }
1720 
1721 #if INCLUDE_JVMCI
1722 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1723   // there is no exception handler for this pc => deoptimize
1724   cm->make_not_entrant();
1725 
1726   // Use Deoptimization::deoptimize for all of its side-effects:
1727   // gathering traps statistics, logging...
1728   // it also patches the return pc but we do not care about that
1729   // since we return a continuation to the deopt_blob below.
1730   JavaThread* thread = JavaThread::current();
< prev index next >