< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  31 #include "code/debugInfoRec.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "jvm.h"
  42 #include "logging/log.hpp"
  43 #include "logging/logLevel.hpp"
  44 #include "logging/logMessage.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/constantPool.hpp"


  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "oops/method.hpp"
  53 #include "oops/objArrayKlass.hpp"
  54 #include "oops/objArrayOop.inline.hpp"
  55 #include "oops/oop.inline.hpp"

  56 #include "oops/typeArrayOop.inline.hpp"
  57 #include "oops/verifyOopClosure.hpp"
  58 #include "prims/jvmtiDeferredUpdates.hpp"
  59 #include "prims/jvmtiExport.hpp"
  60 #include "prims/jvmtiThreadState.hpp"
  61 #include "prims/methodHandles.hpp"
  62 #include "prims/vectorSupport.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/continuation.hpp"
  65 #include "runtime/continuationEntry.inline.hpp"
  66 #include "runtime/deoptimization.hpp"
  67 #include "runtime/escapeBarrier.hpp"
  68 #include "runtime/fieldDescriptor.hpp"
  69 #include "runtime/fieldDescriptor.inline.hpp"
  70 #include "runtime/frame.inline.hpp"
  71 #include "runtime/handles.inline.hpp"
  72 #include "runtime/interfaceSupport.inline.hpp"
  73 #include "runtime/javaThread.hpp"
  74 #include "runtime/jniHandles.inline.hpp"
  75 #include "runtime/keepStackGCProcessed.hpp"

 172 
 173   return fetch_unroll_info_helper(current, exec_mode);
 174 JRT_END
 175 
 176 #if COMPILER2_OR_JVMCI
 177 // print information about reallocated objects
 178 static void print_objects(JavaThread* deoptee_thread,
 179                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 180   ResourceMark rm;
 181   stringStream st;  // change to logStream with logging
 182   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 183   fieldDescriptor fd;
 184 
 185   for (int i = 0; i < objects->length(); i++) {
 186     ObjectValue* sv = (ObjectValue*) objects->at(i);
 187     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 188     Handle obj = sv->value();
 189 
 190     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 191     k->print_value_on(&st);
 192     assert(obj.not_null() || realloc_failures, "reallocation was missed");
 193     if (obj.is_null()) {
 194       st.print(" allocation failed");




 195     } else {
 196       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 197     }
 198     st.cr();
 199 
 200     if (Verbose && !obj.is_null()) {
 201       k->oop_print_on(obj(), &st);
 202     }
 203   }
 204   tty->print_raw(st.freeze());
 205 }
 206 
 207 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 208                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 209                                   bool& deoptimized_objects) {
 210   bool realloc_failures = false;
 211   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 212 
 213   JavaThread* deoptee_thread = chunk->at(0)->thread();
 214   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 215          "a frame can only be deoptimized by the owner thread");
 216 
 217   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 218 
 219   // The flag return_oop() indicates call sites which return oop
 220   // in compiled code. Such sites include java method calls,
 221   // runtime calls (for example, used to allocate new objects/arrays
 222   // on slow code path) and any other calls generated in compiled code.
 223   // It is not guaranteed that we can get such information here only
 224   // by analyzing bytecode in deoptimized frames. This is why this flag
 225   // is set during method compilation (see Compile::Process_OopMap_Node()).
 226   // If the previous frame was popped or if we are dispatching an exception,
 227   // we don't have an oop result.
 228   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 229   Handle return_value;











 230   if (save_oop_result) {
 231     // Reallocation may trigger GC. If deoptimization happened on return from
 232     // call which returns oop we need to save it since it is not in oopmap.
 233     oop result = deoptee.saved_oop_result(&map);
 234     assert(oopDesc::is_oop_or_null(result), "must be oop");
 235     return_value = Handle(thread, result);
 236     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 237     if (TraceDeoptimization) {
 238       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 239       tty->cr();
 240     }
 241   }
 242   if (objects != NULL) {
 243     if (exec_mode == Deoptimization::Unpack_none) {
 244       assert(thread->thread_state() == _thread_in_vm, "assumption");
 245       JavaThread* THREAD = thread; // For exception macros.
 246       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 247       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));







 248       deoptimized_objects = true;
 249     } else {
 250       JavaThread* current = thread; // For JRT_BLOCK
 251       JRT_BLOCK
 252       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);







 253       JRT_END
 254     }
 255     bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 256     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 257     if (TraceDeoptimization) {
 258       print_objects(deoptee_thread, objects, realloc_failures);
 259     }
 260   }
 261   if (save_oop_result) {
 262     // Restore result.
 263     deoptee.set_saved_oop_result(&map, return_value());

 264   }
 265   return realloc_failures;
 266 }
 267 
 268 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 269                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 270   JavaThread* deoptee_thread = chunk->at(0)->thread();
 271   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 272   assert(thread == Thread::current(), "should be");
 273   HandleMark hm(thread);
 274 #ifndef PRODUCT
 275   bool first = true;
 276 #endif // !PRODUCT
 277   for (int i = 0; i < chunk->length(); i++) {
 278     compiledVFrame* cvf = chunk->at(i);
 279     assert (cvf->scope() != NULL,"expect only compiled java frames");
 280     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 281     if (monitors->is_nonempty()) {
 282       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 283                                                      exec_mode, realloc_failures);

 578   // its caller's stack by. If the caller is a compiled frame then
 579   // we pretend that the callee has no parameters so that the
 580   // extension counts for the full amount of locals and not just
 581   // locals-parms. This is because without a c2i adapter the parm
 582   // area as created by the compiled frame will not be usable by
 583   // the interpreter. (Depending on the calling convention there
 584   // may not even be enough space).
 585 
 586   // QQQ I'd rather see this pushed down into last_frame_adjust
 587   // and have it take the sender (aka caller).
 588 
 589   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 590     caller_adjustment = last_frame_adjust(0, callee_locals);
 591   } else if (callee_locals > callee_parameters) {
 592     // The caller frame may need extending to accommodate
 593     // non-parameter locals of the first unpacked interpreted frame.
 594     // Compute that adjustment.
 595     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 596   }
 597 
 598   // If the sender is deoptimized the we must retrieve the address of the handler
 599   // since the frame will "magically" show the original pc before the deopt
 600   // and we'd undo the deopt.
 601 
 602   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 603   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 604     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 605   }
 606 
 607   assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc");
 608 
 609 #if INCLUDE_JVMCI
 610   if (exceptionObject() != NULL) {
 611     current->set_exception_oop(exceptionObject());
 612     exec_mode = Unpack_exception;
 613   }
 614 #endif
 615 
 616   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 617     assert(current->has_pending_exception(), "should have thrown OOME");
 618     current->set_exception_oop(current->pending_exception());

1063        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1064        default:;
1065      }
1066    }
1067    return NULL;
1068 }
1069 #endif // INCLUDE_JVMCI
1070 
1071 #if COMPILER2_OR_JVMCI
1072 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1073   Handle pending_exception(THREAD, thread->pending_exception());
1074   const char* exception_file = thread->exception_file();
1075   int exception_line = thread->exception_line();
1076   thread->clear_pending_exception();
1077 
1078   bool failures = false;
1079 
1080   for (int i = 0; i < objects->length(); i++) {
1081     assert(objects->at(i)->is_object(), "invalid debug information");
1082     ObjectValue* sv = (ObjectValue*) objects->at(i);
1083 
1084     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1085     oop obj = NULL;
1086 












1087     if (k->is_instance_klass()) {
1088 #if INCLUDE_JVMCI
1089       CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1090       if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1091         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1092         obj = get_cached_box(abv, fr, reg_map, THREAD);
1093         if (obj != NULL) {
1094           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1095           abv->set_cached(true);
1096         }
1097       }
1098 #endif // INCLUDE_JVMCI
1099 
1100       InstanceKlass* ik = InstanceKlass::cast(k);
1101       if (obj == NULL) {
1102 #ifdef COMPILER2
1103         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1104           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1105         } else {
1106           obj = ik->allocate_instance(THREAD);
1107         }
1108 #else
1109         obj = ik->allocate_instance(THREAD);
1110 #endif // COMPILER2
1111       }




1112     } else if (k->is_typeArray_klass()) {
1113       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1114       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1115       int len = sv->field_size() / type2size[ak->element_type()];
1116       obj = ak->allocate(len, THREAD);
1117     } else if (k->is_objArray_klass()) {
1118       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1119       obj = ak->allocate(sv->field_size(), THREAD);
1120     }
1121 
1122     if (obj == NULL) {
1123       failures = true;
1124     }
1125 
1126     assert(sv->value().is_null(), "redundant reallocation");
1127     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1128     CLEAR_PENDING_EXCEPTION;
1129     sv->set_value(obj);
1130   }
1131 
1132   if (failures) {
1133     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1134   } else if (pending_exception.not_null()) {
1135     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1136   }
1137 
1138   return failures;
1139 }
1140 















1141 #if INCLUDE_JVMCI
1142 /**
1143  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1144  * we need to somehow be able to recover the actual kind to be able to write the correct
1145  * amount of bytes.
1146  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1147  * the entries at index n + 1 to n + i are 'markers'.
1148  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1149  * expected form of the array would be:
1150  *
1151  * {b0, b1, b2, b3, INT, marker, b6, b7}
1152  *
1153  * Thus, in order to get back the size of the entry, we simply need to count the number
1154  * of marked entries
1155  *
1156  * @param virtualArray the virtualized byte array
1157  * @param i index of the virtual entry we are recovering
1158  * @return The number of bytes the entry spans
1159  */
1160 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1293       default:
1294         ShouldNotReachHere();
1295     }
1296     index++;
1297   }
1298 }
1299 
1300 // restore fields of an eliminated object array
1301 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1302   for (int i = 0; i < sv->field_size(); i++) {
1303     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1304     assert(value->type() == T_OBJECT, "object element expected");
1305     obj->obj_at_put(i, value->get_obj()());
1306   }
1307 }
1308 
1309 class ReassignedField {
1310 public:
1311   int _offset;
1312   BasicType _type;

1313 public:
1314   ReassignedField() {
1315     _offset = 0;
1316     _type = T_ILLEGAL;

1317   }
1318 };
1319 
1320 int compare(ReassignedField* left, ReassignedField* right) {
1321   return left->_offset - right->_offset;
1322 }
1323 
1324 // Restore fields of an eliminated instance object using the same field order
1325 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1326 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1327   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1328   InstanceKlass* ik = klass;
1329   while (ik != NULL) {
1330     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1331       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1332         ReassignedField field;
1333         field._offset = fs.offset();
1334         field._type = Signature::basic_type(fs.signature());








1335         fields->append(field);
1336       }
1337     }
1338     ik = ik->superklass();
1339   }
1340   fields->sort(compare);
1341   for (int i = 0; i < fields->length(); i++) {











1342     intptr_t val;
1343     ScopeValue* scope_field = sv->field_at(svIndex);
1344     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1345     int offset = fields->at(i)._offset;
1346     BasicType type = fields->at(i)._type;
1347     switch (type) {
1348       case T_OBJECT: case T_ARRAY:

1349         assert(value->type() == T_OBJECT, "Agreement.");
1350         obj->obj_field_put(offset, value->get_obj()());
1351         break;
1352 
1353       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1354       case T_INT: case T_FLOAT: { // 4 bytes.
1355         assert(value->type() == T_INT, "Agreement.");
1356         bool big_value = false;
1357         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1358           if (scope_field->is_location()) {
1359             Location::Type type = ((LocationValue*) scope_field)->location().type();
1360             if (type == Location::dbl || type == Location::lng) {
1361               big_value = true;
1362             }
1363           }
1364           if (scope_field->is_constant_int()) {
1365             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1366             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1367               big_value = true;
1368             }

1408       case T_BYTE:
1409         assert(value->type() == T_INT, "Agreement.");
1410         val = value->get_int();
1411         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1412         break;
1413 
1414       case T_BOOLEAN:
1415         assert(value->type() == T_INT, "Agreement.");
1416         val = value->get_int();
1417         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1418         break;
1419 
1420       default:
1421         ShouldNotReachHere();
1422     }
1423     svIndex++;
1424   }
1425   return svIndex;
1426 }
1427 














1428 // restore fields of all eliminated objects and arrays
1429 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1430   for (int i = 0; i < objects->length(); i++) {
1431     ObjectValue* sv = (ObjectValue*) objects->at(i);
1432     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1433     Handle obj = sv->value();
1434     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1435 #ifndef PRODUCT
1436     if (PrintDeoptimizationDetails) {
1437       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1438     }
1439 #endif // !PRODUCT
1440 
1441     if (obj.is_null()) {
1442       continue;
1443     }
1444 
1445 #if INCLUDE_JVMCI
1446     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1447     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1448       continue;
1449     }
1450 #endif // INCLUDE_JVMCI
1451 #ifdef COMPILER2
1452     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1453       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1454       ScopeValue* payload = sv->field_at(0);
1455       if (payload->is_location() &&
1456           payload->as_LocationValue()->location().type() == Location::vector) {
1457 #ifndef PRODUCT
1458         if (PrintDeoptimizationDetails) {
1459           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1460           if (Verbose) {
1461             Handle obj = sv->value();
1462             k->oop_print_on(obj(), tty);
1463           }
1464         }
1465 #endif // !PRODUCT
1466         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1467       }
1468       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1469       // which could be restored after vector object allocation.
1470     }
1471 #endif /* !COMPILER2 */
1472     if (k->is_instance_klass()) {
1473       InstanceKlass* ik = InstanceKlass::cast(k);
1474       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);



1475     } else if (k->is_typeArray_klass()) {
1476       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1477       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1478     } else if (k->is_objArray_klass()) {
1479       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1480     }
1481   }
1482 }
1483 
1484 
1485 // relock objects for which synchronization was eliminated
1486 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1487                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1488   bool relocked_objects = false;
1489   for (int i = 0; i < monitors->length(); i++) {
1490     MonitorInfo* mon_info = monitors->at(i);
1491     if (mon_info->eliminated()) {
1492       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1493       relocked_objects = true;
1494       if (!mon_info->owner_is_scalar_replaced()) {

1614     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1615     cm->log_identity(xtty);
1616     xtty->end_head();
1617     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1618       xtty->begin_elem("jvms bci='%d'", sd->bci());
1619       xtty->method(sd->method());
1620       xtty->end_elem();
1621       if (sd->is_top())  break;
1622     }
1623     xtty->tail("deoptimized");
1624   }
1625 
1626   Continuation::notify_deopt(thread, fr.sp());
1627 
1628   // Patch the compiled method so that when execution returns to it we will
1629   // deopt the execution state and return to the interpreter.
1630   fr.deoptimize(thread);
1631 }
1632 
1633 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1634   // Deoptimize only if the frame comes from compile code.
1635   // Do not deoptimize the frame which is already patched
1636   // during the execution of the loops below.
1637   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1638     return;
1639   }
1640   ResourceMark rm;
1641   deoptimize_single_frame(thread, fr, reason);
1642 }
1643 
1644 #if INCLUDE_JVMCI
1645 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1646   // there is no exception handler for this pc => deoptimize
1647   cm->make_not_entrant();
1648 
1649   // Use Deoptimization::deoptimize for all of its side-effects:
1650   // gathering traps statistics, logging...
1651   // it also patches the return pc but we do not care about that
1652   // since we return a continuation to the deopt_blob below.
1653   JavaThread* thread = JavaThread::current();
1654   RegisterMap reg_map(thread,

  31 #include "code/debugInfoRec.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "jvm.h"
  42 #include "logging/log.hpp"
  43 #include "logging/logLevel.hpp"
  44 #include "logging/logMessage.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/constantPool.hpp"
  51 #include "oops/flatArrayKlass.hpp"
  52 #include "oops/flatArrayOop.hpp"
  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/method.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "oops/inlineKlass.inline.hpp"
  59 #include "oops/typeArrayOop.inline.hpp"
  60 #include "oops/verifyOopClosure.hpp"
  61 #include "prims/jvmtiDeferredUpdates.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "prims/jvmtiThreadState.hpp"
  64 #include "prims/methodHandles.hpp"
  65 #include "prims/vectorSupport.hpp"
  66 #include "runtime/atomic.hpp"
  67 #include "runtime/continuation.hpp"
  68 #include "runtime/continuationEntry.inline.hpp"
  69 #include "runtime/deoptimization.hpp"
  70 #include "runtime/escapeBarrier.hpp"
  71 #include "runtime/fieldDescriptor.hpp"
  72 #include "runtime/fieldDescriptor.inline.hpp"
  73 #include "runtime/frame.inline.hpp"
  74 #include "runtime/handles.inline.hpp"
  75 #include "runtime/interfaceSupport.inline.hpp"
  76 #include "runtime/javaThread.hpp"
  77 #include "runtime/jniHandles.inline.hpp"
  78 #include "runtime/keepStackGCProcessed.hpp"

 175 
 176   return fetch_unroll_info_helper(current, exec_mode);
 177 JRT_END
 178 
 179 #if COMPILER2_OR_JVMCI
 180 // print information about reallocated objects
 181 static void print_objects(JavaThread* deoptee_thread,
 182                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 183   ResourceMark rm;
 184   stringStream st;  // change to logStream with logging
 185   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 186   fieldDescriptor fd;
 187 
 188   for (int i = 0; i < objects->length(); i++) {
 189     ObjectValue* sv = (ObjectValue*) objects->at(i);
 190     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 191     Handle obj = sv->value();
 192 
 193     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 194     k->print_value_on(&st);
 195     assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
 196     if (obj.is_null()) {
 197       if (k->is_inline_klass()) {
 198         st.print(" is null");
 199       } else {
 200         st.print(" allocation failed");
 201       }
 202     } else {
 203       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 204     }
 205     st.cr();
 206 
 207     if (Verbose && !obj.is_null()) {
 208       k->oop_print_on(obj(), &st);
 209     }
 210   }
 211   tty->print_raw(st.freeze());
 212 }
 213 
 214 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 215                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 216                                   bool& deoptimized_objects) {
 217   bool realloc_failures = false;
 218   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 219 
 220   JavaThread* deoptee_thread = chunk->at(0)->thread();
 221   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 222          "a frame can only be deoptimized by the owner thread");
 223 
 224   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 225 
 226   // The flag return_oop() indicates call sites which return oop
 227   // in compiled code. Such sites include java method calls,
 228   // runtime calls (for example, used to allocate new objects/arrays
 229   // on slow code path) and any other calls generated in compiled code.
 230   // It is not guaranteed that we can get such information here only
 231   // by analyzing bytecode in deoptimized frames. This is why this flag
 232   // is set during method compilation (see Compile::Process_OopMap_Node()).
 233   // If the previous frame was popped or if we are dispatching an exception,
 234   // we don't have an oop result.
 235   ScopeDesc* scope = chunk->at(0)->scope();
 236   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 237   // In case of the return of multiple values, we must take care
 238   // of all oop return values.
 239   GrowableArray<Handle> return_oops;
 240   InlineKlass* vk = NULL;
 241   if (save_oop_result && scope->return_scalarized()) {
 242     vk = InlineKlass::returned_inline_klass(map);
 243     if (vk != NULL) {
 244       vk->save_oop_fields(map, return_oops);
 245       save_oop_result = false;
 246     }
 247   }
 248   if (save_oop_result) {
 249     // Reallocation may trigger GC. If deoptimization happened on return from
 250     // call which returns oop we need to save it since it is not in oopmap.
 251     oop result = deoptee.saved_oop_result(&map);
 252     assert(oopDesc::is_oop_or_null(result), "must be oop");
 253     return_oops.push(Handle(thread, result));
 254     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 255     if (TraceDeoptimization) {
 256       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 257       tty->cr();
 258     }
 259   }
 260   if (objects != NULL || vk != NULL) {
 261     if (exec_mode == Deoptimization::Unpack_none) {
 262       assert(thread->thread_state() == _thread_in_vm, "assumption");
 263       JavaThread* THREAD = thread; // For exception macros.
 264       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 265       if (vk != NULL) {
 266         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 267       }
 268       if (objects != NULL) {
 269         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 270         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 271         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
 272       }
 273       deoptimized_objects = true;
 274     } else {
 275       JavaThread* current = thread; // For JRT_BLOCK
 276       JRT_BLOCK
 277       if (vk != NULL) {
 278         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 279       }
 280       if (objects != NULL) {
 281         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 282         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 283         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
 284       }
 285       JRT_END
 286     }


 287     if (TraceDeoptimization) {
 288       print_objects(deoptee_thread, objects, realloc_failures);
 289     }
 290   }
 291   if (save_oop_result || vk != NULL) {
 292     // Restore result.
 293     assert(return_oops.length() == 1, "no inline type");
 294     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 295   }
 296   return realloc_failures;
 297 }
 298 
 299 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 300                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 301   JavaThread* deoptee_thread = chunk->at(0)->thread();
 302   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 303   assert(thread == Thread::current(), "should be");
 304   HandleMark hm(thread);
 305 #ifndef PRODUCT
 306   bool first = true;
 307 #endif // !PRODUCT
 308   for (int i = 0; i < chunk->length(); i++) {
 309     compiledVFrame* cvf = chunk->at(i);
 310     assert (cvf->scope() != NULL,"expect only compiled java frames");
 311     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 312     if (monitors->is_nonempty()) {
 313       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 314                                                      exec_mode, realloc_failures);

 609   // its caller's stack by. If the caller is a compiled frame then
 610   // we pretend that the callee has no parameters so that the
 611   // extension counts for the full amount of locals and not just
 612   // locals-parms. This is because without a c2i adapter the parm
 613   // area as created by the compiled frame will not be usable by
 614   // the interpreter. (Depending on the calling convention there
 615   // may not even be enough space).
 616 
 617   // QQQ I'd rather see this pushed down into last_frame_adjust
 618   // and have it take the sender (aka caller).
 619 
 620   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 621     caller_adjustment = last_frame_adjust(0, callee_locals);
 622   } else if (callee_locals > callee_parameters) {
 623     // The caller frame may need extending to accommodate
 624     // non-parameter locals of the first unpacked interpreted frame.
 625     // Compute that adjustment.
 626     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 627   }
 628 
 629   // If the sender is deoptimized we must retrieve the address of the handler
 630   // since the frame will "magically" show the original pc before the deopt
 631   // and we'd undo the deopt.
 632 
 633   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 634   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 635     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 636   }
 637 
 638   assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc");
 639 
 640 #if INCLUDE_JVMCI
 641   if (exceptionObject() != NULL) {
 642     current->set_exception_oop(exceptionObject());
 643     exec_mode = Unpack_exception;
 644   }
 645 #endif
 646 
 647   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 648     assert(current->has_pending_exception(), "should have thrown OOME");
 649     current->set_exception_oop(current->pending_exception());

1094        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1095        default:;
1096      }
1097    }
1098    return NULL;
1099 }
1100 #endif // INCLUDE_JVMCI
1101 
1102 #if COMPILER2_OR_JVMCI
1103 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1104   Handle pending_exception(THREAD, thread->pending_exception());
1105   const char* exception_file = thread->exception_file();
1106   int exception_line = thread->exception_line();
1107   thread->clear_pending_exception();
1108 
1109   bool failures = false;
1110 
1111   for (int i = 0; i < objects->length(); i++) {
1112     assert(objects->at(i)->is_object(), "invalid debug information");
1113     ObjectValue* sv = (ObjectValue*) objects->at(i);

1114     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1115 
1116     // Check if the object may be null and has an additional is_init input that needs
1117     // to be checked before using the field values. Skip re-allocation if it is null.
1118     if (sv->maybe_null()) {
1119       assert(k->is_inline_klass(), "must be an inline klass");
1120       intptr_t init_value = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_int();
1121       jint is_init = (jint)*((jint*)&init_value);
1122       if (is_init == 0) {
1123         continue;
1124       }
1125     }
1126 
1127     oop obj = NULL;
1128     if (k->is_instance_klass()) {
1129 #if INCLUDE_JVMCI
1130       CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1131       if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1132         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1133         obj = get_cached_box(abv, fr, reg_map, THREAD);
1134         if (obj != NULL) {
1135           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1136           abv->set_cached(true);
1137         }
1138       }
1139 #endif // INCLUDE_JVMCI
1140 
1141       InstanceKlass* ik = InstanceKlass::cast(k);
1142       if (obj == NULL) {
1143 #ifdef COMPILER2
1144         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1145           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1146         } else {
1147           obj = ik->allocate_instance(THREAD);
1148         }
1149 #else
1150         obj = ik->allocate_instance(THREAD);
1151 #endif // COMPILER2
1152       }
1153     } else if (k->is_flatArray_klass()) {
1154       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1155       // Inline type array must be zeroed because not all memory is reassigned
1156       obj = ak->allocate(sv->field_size(), THREAD);
1157     } else if (k->is_typeArray_klass()) {
1158       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1159       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1160       int len = sv->field_size() / type2size[ak->element_type()];
1161       obj = ak->allocate(len, THREAD);
1162     } else if (k->is_objArray_klass()) {
1163       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1164       obj = ak->allocate(sv->field_size(), THREAD);
1165     }
1166 
1167     if (obj == NULL) {
1168       failures = true;
1169     }
1170 
1171     assert(sv->value().is_null(), "redundant reallocation");
1172     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1173     CLEAR_PENDING_EXCEPTION;
1174     sv->set_value(obj);
1175   }
1176 
1177   if (failures) {
1178     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1179   } else if (pending_exception.not_null()) {
1180     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1181   }
1182 
1183   return failures;
1184 }
1185 
1186 // We're deoptimizing at the return of a call, inline type fields are
1187 // in registers. When we go back to the interpreter, it will expect a
1188 // reference to an inline type instance. Allocate and initialize it from
1189 // the register values here.
1190 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1191   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1192   if (new_vt == NULL) {
1193     CLEAR_PENDING_EXCEPTION;
1194     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1195   }
1196   return_oops.clear();
1197   return_oops.push(Handle(THREAD, new_vt));
1198   return false;
1199 }
1200 
1201 #if INCLUDE_JVMCI
1202 /**
1203  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1204  * we need to somehow be able to recover the actual kind to be able to write the correct
1205  * amount of bytes.
1206  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1207  * the entries at index n + 1 to n + i are 'markers'.
1208  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1209  * expected form of the array would be:
1210  *
1211  * {b0, b1, b2, b3, INT, marker, b6, b7}
1212  *
1213  * Thus, in order to get back the size of the entry, we simply need to count the number
1214  * of marked entries
1215  *
1216  * @param virtualArray the virtualized byte array
1217  * @param i index of the virtual entry we are recovering
1218  * @return The number of bytes the entry spans
1219  */
1220 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1353       default:
1354         ShouldNotReachHere();
1355     }
1356     index++;
1357   }
1358 }
1359 
1360 // restore fields of an eliminated object array
1361 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1362   for (int i = 0; i < sv->field_size(); i++) {
1363     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1364     assert(value->type() == T_OBJECT, "object element expected");
1365     obj->obj_at_put(i, value->get_obj()());
1366   }
1367 }
1368 
1369 class ReassignedField {
1370 public:
1371   int _offset;
1372   BasicType _type;
1373   InstanceKlass* _klass;
1374 public:
1375   ReassignedField() {
1376     _offset = 0;
1377     _type = T_ILLEGAL;
1378     _klass = NULL;
1379   }
1380 };
1381 
1382 int compare(ReassignedField* left, ReassignedField* right) {
1383   return left->_offset - right->_offset;
1384 }
1385 
1386 // Restore fields of an eliminated instance object using the same field order
1387 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1388 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1389   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1390   InstanceKlass* ik = klass;
1391   while (ik != NULL) {
1392     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1393       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1394         ReassignedField field;
1395         field._offset = fs.offset();
1396         field._type = Signature::basic_type(fs.signature());
1397         if (fs.signature()->is_Q_signature()) {
1398           if (fs.is_inlined()) {
1399             // Resolve klass of flattened inline type field
1400             field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1401           } else {
1402             field._type = T_OBJECT;
1403           }
1404         }
1405         fields->append(field);
1406       }
1407     }
1408     ik = ik->superklass();
1409   }
1410   fields->sort(compare);
1411   for (int i = 0; i < fields->length(); i++) {
1412     BasicType type = fields->at(i)._type;
1413     int offset = base_offset + fields->at(i)._offset;
1414     // Check for flattened inline type field before accessing the ScopeValue because it might not have any fields
1415     if (type == T_PRIMITIVE_OBJECT) {
1416       // Recursively re-assign flattened inline type fields
1417       InstanceKlass* vk = fields->at(i)._klass;
1418       assert(vk != NULL, "must be resolved");
1419       offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1420       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1421       continue; // Continue because we don't need to increment svIndex
1422     }
1423     intptr_t val;
1424     ScopeValue* scope_field = sv->field_at(svIndex);
1425     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1426     switch (type) {
1427       case T_OBJECT:
1428       case T_ARRAY:
1429         assert(value->type() == T_OBJECT, "Agreement.");
1430         obj->obj_field_put(offset, value->get_obj()());
1431         break;
1432 
1433       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1434       case T_INT: case T_FLOAT: { // 4 bytes.
1435         assert(value->type() == T_INT, "Agreement.");
1436         bool big_value = false;
1437         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1438           if (scope_field->is_location()) {
1439             Location::Type type = ((LocationValue*) scope_field)->location().type();
1440             if (type == Location::dbl || type == Location::lng) {
1441               big_value = true;
1442             }
1443           }
1444           if (scope_field->is_constant_int()) {
1445             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1446             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1447               big_value = true;
1448             }

1488       case T_BYTE:
1489         assert(value->type() == T_INT, "Agreement.");
1490         val = value->get_int();
1491         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1492         break;
1493 
1494       case T_BOOLEAN:
1495         assert(value->type() == T_INT, "Agreement.");
1496         val = value->get_int();
1497         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1498         break;
1499 
1500       default:
1501         ShouldNotReachHere();
1502     }
1503     svIndex++;
1504   }
1505   return svIndex;
1506 }
1507 
1508 // restore fields of an eliminated inline type array
1509 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1510   InlineKlass* vk = vak->element_klass();
1511   assert(vk->flatten_array(), "should only be used for flattened inline type arrays");
1512   // Adjust offset to omit oop header
1513   int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1514   // Initialize all elements of the flattened inline type array
1515   for (int i = 0; i < sv->field_size(); i++) {
1516     ScopeValue* val = sv->field_at(i);
1517     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1518     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1519   }
1520 }
1521 
1522 // restore fields of all eliminated objects and arrays
1523 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1524   for (int i = 0; i < objects->length(); i++) {
1525     ObjectValue* sv = (ObjectValue*) objects->at(i);
1526     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1527     Handle obj = sv->value();
1528     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1529 #ifndef PRODUCT
1530     if (PrintDeoptimizationDetails) {
1531       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1532     }
1533 #endif // !PRODUCT
1534 
1535     if (obj.is_null()) {
1536       continue;
1537     }
1538 
1539 #if INCLUDE_JVMCI
1540     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1541     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1542       continue;
1543     }
1544 #endif // INCLUDE_JVMCI
1545 #ifdef COMPILER2
1546     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1547       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1548       ScopeValue* payload = sv->field_at(0);
1549       if (payload->is_location() &&
1550           payload->as_LocationValue()->location().type() == Location::vector) {
1551 #ifndef PRODUCT
1552         if (PrintDeoptimizationDetails) {
1553           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1554           if (Verbose) {
1555             Handle obj = sv->value();
1556             k->oop_print_on(obj(), tty);
1557           }
1558         }
1559 #endif // !PRODUCT
1560         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1561       }
1562       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1563       // which could be restored after vector object allocation.
1564     }
1565 #endif /* !COMPILER2 */
1566     if (k->is_instance_klass()) {
1567       InstanceKlass* ik = InstanceKlass::cast(k);
1568       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1569     } else if (k->is_flatArray_klass()) {
1570       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1571       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1572     } else if (k->is_typeArray_klass()) {
1573       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1574       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1575     } else if (k->is_objArray_klass()) {
1576       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1577     }
1578   }
1579 }
1580 
1581 
1582 // relock objects for which synchronization was eliminated
1583 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1584                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1585   bool relocked_objects = false;
1586   for (int i = 0; i < monitors->length(); i++) {
1587     MonitorInfo* mon_info = monitors->at(i);
1588     if (mon_info->eliminated()) {
1589       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1590       relocked_objects = true;
1591       if (!mon_info->owner_is_scalar_replaced()) {

1711     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1712     cm->log_identity(xtty);
1713     xtty->end_head();
1714     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1715       xtty->begin_elem("jvms bci='%d'", sd->bci());
1716       xtty->method(sd->method());
1717       xtty->end_elem();
1718       if (sd->is_top())  break;
1719     }
1720     xtty->tail("deoptimized");
1721   }
1722 
1723   Continuation::notify_deopt(thread, fr.sp());
1724 
1725   // Patch the compiled method so that when execution returns to it we will
1726   // deopt the execution state and return to the interpreter.
1727   fr.deoptimize(thread);
1728 }
1729 
1730 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1731   // Deoptimize only if the frame comes from compiled code.
1732   // Do not deoptimize the frame which is already patched
1733   // during the execution of the loops below.
1734   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1735     return;
1736   }
1737   ResourceMark rm;
1738   deoptimize_single_frame(thread, fr, reason);
1739 }
1740 
1741 #if INCLUDE_JVMCI
1742 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1743   // there is no exception handler for this pc => deoptimize
1744   cm->make_not_entrant();
1745 
1746   // Use Deoptimization::deoptimize for all of its side-effects:
1747   // gathering traps statistics, logging...
1748   // it also patches the return pc but we do not care about that
1749   // since we return a continuation to the deopt_blob below.
1750   JavaThread* thread = JavaThread::current();
1751   RegisterMap reg_map(thread,
< prev index next >