< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "interpreter/bytecode.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logLevel.hpp"
  44 #include "logging/logMessage.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/constantPool.hpp"


  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "oops/method.hpp"
  53 #include "oops/objArrayKlass.hpp"
  54 #include "oops/objArrayOop.inline.hpp"
  55 #include "oops/oop.inline.hpp"

  56 #include "oops/typeArrayOop.inline.hpp"
  57 #include "oops/verifyOopClosure.hpp"
  58 #include "prims/jvmtiDeferredUpdates.hpp"
  59 #include "prims/jvmtiExport.hpp"
  60 #include "prims/jvmtiThreadState.hpp"
  61 #include "prims/methodHandles.hpp"
  62 #include "prims/vectorSupport.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/continuation.hpp"
  65 #include "runtime/continuationEntry.inline.hpp"
  66 #include "runtime/deoptimization.hpp"
  67 #include "runtime/escapeBarrier.hpp"
  68 #include "runtime/fieldDescriptor.hpp"
  69 #include "runtime/fieldDescriptor.inline.hpp"
  70 #include "runtime/frame.inline.hpp"
  71 #include "runtime/handles.inline.hpp"
  72 #include "runtime/interfaceSupport.inline.hpp"
  73 #include "runtime/javaThread.hpp"
  74 #include "runtime/jniHandles.inline.hpp"
  75 #include "runtime/keepStackGCProcessed.hpp"

 185 
 186   return fetch_unroll_info_helper(current, exec_mode);
 187 JRT_END
 188 
 189 #if COMPILER2_OR_JVMCI
 190 // print information about reallocated objects
 191 static void print_objects(JavaThread* deoptee_thread,
 192                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 193   ResourceMark rm;
 194   stringStream st;  // change to logStream with logging
 195   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 196   fieldDescriptor fd;
 197 
 198   for (int i = 0; i < objects->length(); i++) {
 199     ObjectValue* sv = (ObjectValue*) objects->at(i);
 200     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 201     Handle obj = sv->value();
 202 
 203     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 204     k->print_value_on(&st);
 205     assert(obj.not_null() || realloc_failures, "reallocation was missed");
 206     if (obj.is_null()) {
 207       st.print(" allocation failed");




 208     } else {
 209       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 210     }
 211     st.cr();
 212 
 213     if (Verbose && !obj.is_null()) {
 214       k->oop_print_on(obj(), &st);
 215     }
 216   }
 217   tty->print_raw(st.freeze());
 218 }
 219 
 220 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 221                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 222                                   bool& deoptimized_objects) {
 223   bool realloc_failures = false;
 224   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 225 
 226   JavaThread* deoptee_thread = chunk->at(0)->thread();
 227   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 228          "a frame can only be deoptimized by the owner thread");
 229 
 230   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 231 
 232   // The flag return_oop() indicates call sites which return oop
 233   // in compiled code. Such sites include java method calls,
 234   // runtime calls (for example, used to allocate new objects/arrays
 235   // on slow code path) and any other calls generated in compiled code.
 236   // It is not guaranteed that we can get such information here only
 237   // by analyzing bytecode in deoptimized frames. This is why this flag
 238   // is set during method compilation (see Compile::Process_OopMap_Node()).
 239   // If the previous frame was popped or if we are dispatching an exception,
 240   // we don't have an oop result.
 241   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 242   Handle return_value;











 243   if (save_oop_result) {
 244     // Reallocation may trigger GC. If deoptimization happened on return from
 245     // call which returns oop we need to save it since it is not in oopmap.
 246     oop result = deoptee.saved_oop_result(&map);
 247     assert(oopDesc::is_oop_or_null(result), "must be oop");
 248     return_value = Handle(thread, result);
 249     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 250     if (TraceDeoptimization) {
 251       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 252       tty->cr();
 253     }
 254   }
 255   if (objects != NULL) {
 256     if (exec_mode == Deoptimization::Unpack_none) {
 257       assert(thread->thread_state() == _thread_in_vm, "assumption");
 258       JavaThread* THREAD = thread; // For exception macros.
 259       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 260       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));







 261       deoptimized_objects = true;
 262     } else {
 263       JavaThread* current = thread; // For JRT_BLOCK
 264       JRT_BLOCK
 265       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);







 266       JRT_END
 267     }
 268     bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 269     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 270     if (TraceDeoptimization) {
 271       print_objects(deoptee_thread, objects, realloc_failures);
 272     }
 273   }
 274   if (save_oop_result) {
 275     // Restore result.
 276     deoptee.set_saved_oop_result(&map, return_value());

 277   }
 278   return realloc_failures;
 279 }
 280 
 281 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 282                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 283   JavaThread* deoptee_thread = chunk->at(0)->thread();
 284   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 285   assert(thread == Thread::current(), "should be");
 286   HandleMark hm(thread);
 287 #ifndef PRODUCT
 288   bool first = true;
 289 #endif // !PRODUCT
 290   for (int i = 0; i < chunk->length(); i++) {
 291     compiledVFrame* cvf = chunk->at(i);
 292     assert (cvf->scope() != NULL,"expect only compiled java frames");
 293     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 294     if (monitors->is_nonempty()) {
 295       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 296                                                      exec_mode, realloc_failures);

 594   // its caller's stack by. If the caller is a compiled frame then
 595   // we pretend that the callee has no parameters so that the
 596   // extension counts for the full amount of locals and not just
 597   // locals-parms. This is because without a c2i adapter the parm
 598   // area as created by the compiled frame will not be usable by
 599   // the interpreter. (Depending on the calling convention there
 600   // may not even be enough space).
 601 
 602   // QQQ I'd rather see this pushed down into last_frame_adjust
 603   // and have it take the sender (aka caller).
 604 
 605   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 606     caller_adjustment = last_frame_adjust(0, callee_locals);
 607   } else if (callee_locals > callee_parameters) {
 608     // The caller frame may need extending to accommodate
 609     // non-parameter locals of the first unpacked interpreted frame.
 610     // Compute that adjustment.
 611     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 612   }
 613 
 614   // If the sender is deoptimized the we must retrieve the address of the handler
 615   // since the frame will "magically" show the original pc before the deopt
 616   // and we'd undo the deopt.
 617 
 618   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 619   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 620     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 621   }
 622 
 623   assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc");
 624 
 625 #if INCLUDE_JVMCI
 626   if (exceptionObject() != NULL) {
 627     current->set_exception_oop(exceptionObject());
 628     exec_mode = Unpack_exception;
 629   }
 630 #endif
 631 
 632   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 633     assert(current->has_pending_exception(), "should have thrown OOME");
 634     current->set_exception_oop(current->pending_exception());

1079        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1080        default:;
1081      }
1082    }
1083    return NULL;
1084 }
1085 #endif // INCLUDE_JVMCI
1086 
1087 #if COMPILER2_OR_JVMCI
1088 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1089   Handle pending_exception(THREAD, thread->pending_exception());
1090   const char* exception_file = thread->exception_file();
1091   int exception_line = thread->exception_line();
1092   thread->clear_pending_exception();
1093 
1094   bool failures = false;
1095 
1096   for (int i = 0; i < objects->length(); i++) {
1097     assert(objects->at(i)->is_object(), "invalid debug information");
1098     ObjectValue* sv = (ObjectValue*) objects->at(i);
1099 
1100     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1101     oop obj = NULL;
1102 












1103     if (k->is_instance_klass()) {
1104 #if INCLUDE_JVMCI
1105       CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1106       if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1107         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1108         obj = get_cached_box(abv, fr, reg_map, THREAD);
1109         if (obj != NULL) {
1110           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1111           abv->set_cached(true);
1112         }
1113       }
1114 #endif // INCLUDE_JVMCI
1115 
1116       InstanceKlass* ik = InstanceKlass::cast(k);
1117       if (obj == NULL) {
1118 #ifdef COMPILER2
1119         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1120           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1121         } else {
1122           obj = ik->allocate_instance(THREAD);
1123         }
1124 #else
1125         obj = ik->allocate_instance(THREAD);
1126 #endif // COMPILER2
1127       }




1128     } else if (k->is_typeArray_klass()) {
1129       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1130       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1131       int len = sv->field_size() / type2size[ak->element_type()];
1132       obj = ak->allocate(len, THREAD);
1133     } else if (k->is_objArray_klass()) {
1134       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1135       obj = ak->allocate(sv->field_size(), THREAD);
1136     }
1137 
1138     if (obj == NULL) {
1139       failures = true;
1140     }
1141 
1142     assert(sv->value().is_null(), "redundant reallocation");
1143     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1144     CLEAR_PENDING_EXCEPTION;
1145     sv->set_value(obj);
1146   }
1147 
1148   if (failures) {
1149     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1150   } else if (pending_exception.not_null()) {
1151     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1152   }
1153 
1154   return failures;
1155 }
1156 















1157 #if INCLUDE_JVMCI
1158 /**
1159  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1160  * we need to somehow be able to recover the actual kind to be able to write the correct
1161  * amount of bytes.
1162  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1163  * the entries at index n + 1 to n + i are 'markers'.
1164  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1165  * expected form of the array would be:
1166  *
1167  * {b0, b1, b2, b3, INT, marker, b6, b7}
1168  *
1169  * Thus, in order to get back the size of the entry, we simply need to count the number
1170  * of marked entries
1171  *
1172  * @param virtualArray the virtualized byte array
1173  * @param i index of the virtual entry we are recovering
1174  * @return The number of bytes the entry spans
1175  */
1176 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1309       default:
1310         ShouldNotReachHere();
1311     }
1312     index++;
1313   }
1314 }
1315 
1316 // restore fields of an eliminated object array
1317 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1318   for (int i = 0; i < sv->field_size(); i++) {
1319     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1320     assert(value->type() == T_OBJECT, "object element expected");
1321     obj->obj_at_put(i, value->get_obj()());
1322   }
1323 }
1324 
1325 class ReassignedField {
1326 public:
1327   int _offset;
1328   BasicType _type;

1329 public:
1330   ReassignedField() {
1331     _offset = 0;
1332     _type = T_ILLEGAL;

1333   }
1334 };
1335 
1336 int compare(ReassignedField* left, ReassignedField* right) {
1337   return left->_offset - right->_offset;
1338 }
1339 
1340 // Restore fields of an eliminated instance object using the same field order
1341 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1342 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1343   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1344   InstanceKlass* ik = klass;
1345   while (ik != NULL) {
1346     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1347       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1348         ReassignedField field;
1349         field._offset = fs.offset();
1350         field._type = Signature::basic_type(fs.signature());








1351         fields->append(field);
1352       }
1353     }
1354     ik = ik->superklass();
1355   }
1356   fields->sort(compare);
1357   for (int i = 0; i < fields->length(); i++) {











1358     intptr_t val;
1359     ScopeValue* scope_field = sv->field_at(svIndex);
1360     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1361     int offset = fields->at(i)._offset;
1362     BasicType type = fields->at(i)._type;
1363     switch (type) {
1364       case T_OBJECT: case T_ARRAY:

1365         assert(value->type() == T_OBJECT, "Agreement.");
1366         obj->obj_field_put(offset, value->get_obj()());
1367         break;
1368 
1369       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1370       case T_INT: case T_FLOAT: { // 4 bytes.
1371         assert(value->type() == T_INT, "Agreement.");
1372         bool big_value = false;
1373         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1374           if (scope_field->is_location()) {
1375             Location::Type type = ((LocationValue*) scope_field)->location().type();
1376             if (type == Location::dbl || type == Location::lng) {
1377               big_value = true;
1378             }
1379           }
1380           if (scope_field->is_constant_int()) {
1381             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1382             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1383               big_value = true;
1384             }

1424       case T_BYTE:
1425         assert(value->type() == T_INT, "Agreement.");
1426         val = value->get_int();
1427         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1428         break;
1429 
1430       case T_BOOLEAN:
1431         assert(value->type() == T_INT, "Agreement.");
1432         val = value->get_int();
1433         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1434         break;
1435 
1436       default:
1437         ShouldNotReachHere();
1438     }
1439     svIndex++;
1440   }
1441   return svIndex;
1442 }
1443 














1444 // restore fields of all eliminated objects and arrays
1445 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1446   for (int i = 0; i < objects->length(); i++) {
1447     ObjectValue* sv = (ObjectValue*) objects->at(i);
1448     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1449     Handle obj = sv->value();
1450     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1451 #ifndef PRODUCT
1452     if (PrintDeoptimizationDetails) {
1453       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1454     }
1455 #endif // !PRODUCT
1456 
1457     if (obj.is_null()) {
1458       continue;
1459     }
1460 
1461 #if INCLUDE_JVMCI
1462     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1463     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1464       continue;
1465     }
1466 #endif // INCLUDE_JVMCI
1467 #ifdef COMPILER2
1468     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1469       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1470       ScopeValue* payload = sv->field_at(0);
1471       if (payload->is_location() &&
1472           payload->as_LocationValue()->location().type() == Location::vector) {
1473 #ifndef PRODUCT
1474         if (PrintDeoptimizationDetails) {
1475           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1476           if (Verbose) {
1477             Handle obj = sv->value();
1478             k->oop_print_on(obj(), tty);
1479           }
1480         }
1481 #endif // !PRODUCT
1482         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1483       }
1484       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1485       // which could be restored after vector object allocation.
1486     }
1487 #endif /* !COMPILER2 */
1488     if (k->is_instance_klass()) {
1489       InstanceKlass* ik = InstanceKlass::cast(k);
1490       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);



1491     } else if (k->is_typeArray_klass()) {
1492       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1493       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1494     } else if (k->is_objArray_klass()) {
1495       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1496     }
1497   }
1498 }
1499 
1500 
1501 // relock objects for which synchronization was eliminated
1502 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1503                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1504   bool relocked_objects = false;
1505   for (int i = 0; i < monitors->length(); i++) {
1506     MonitorInfo* mon_info = monitors->at(i);
1507     if (mon_info->eliminated()) {
1508       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1509       relocked_objects = true;
1510       if (!mon_info->owner_is_scalar_replaced()) {

1630     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1631     cm->log_identity(xtty);
1632     xtty->end_head();
1633     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1634       xtty->begin_elem("jvms bci='%d'", sd->bci());
1635       xtty->method(sd->method());
1636       xtty->end_elem();
1637       if (sd->is_top())  break;
1638     }
1639     xtty->tail("deoptimized");
1640   }
1641 
1642   Continuation::notify_deopt(thread, fr.sp());
1643 
1644   // Patch the compiled method so that when execution returns to it we will
1645   // deopt the execution state and return to the interpreter.
1646   fr.deoptimize(thread);
1647 }
1648 
1649 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1650   // Deoptimize only if the frame comes from compile code.
1651   // Do not deoptimize the frame which is already patched
1652   // during the execution of the loops below.
1653   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1654     return;
1655   }
1656   ResourceMark rm;
1657   DeoptimizationMarker dm;
1658   deoptimize_single_frame(thread, fr, reason);
1659 }
1660 
1661 #if INCLUDE_JVMCI
1662 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1663   // there is no exception handler for this pc => deoptimize
1664   cm->make_not_entrant();
1665 
1666   // Use Deoptimization::deoptimize for all of its side-effects:
1667   // gathering traps statistics, logging...
1668   // it also patches the return pc but we do not care about that
1669   // since we return a continuation to the deopt_blob below.
1670   JavaThread* thread = JavaThread::current();

  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "interpreter/bytecode.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logLevel.hpp"
  44 #include "logging/logMessage.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/constantPool.hpp"
  51 #include "oops/flatArrayKlass.hpp"
  52 #include "oops/flatArrayOop.hpp"
  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/method.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "oops/inlineKlass.inline.hpp"
  59 #include "oops/typeArrayOop.inline.hpp"
  60 #include "oops/verifyOopClosure.hpp"
  61 #include "prims/jvmtiDeferredUpdates.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "prims/jvmtiThreadState.hpp"
  64 #include "prims/methodHandles.hpp"
  65 #include "prims/vectorSupport.hpp"
  66 #include "runtime/atomic.hpp"
  67 #include "runtime/continuation.hpp"
  68 #include "runtime/continuationEntry.inline.hpp"
  69 #include "runtime/deoptimization.hpp"
  70 #include "runtime/escapeBarrier.hpp"
  71 #include "runtime/fieldDescriptor.hpp"
  72 #include "runtime/fieldDescriptor.inline.hpp"
  73 #include "runtime/frame.inline.hpp"
  74 #include "runtime/handles.inline.hpp"
  75 #include "runtime/interfaceSupport.inline.hpp"
  76 #include "runtime/javaThread.hpp"
  77 #include "runtime/jniHandles.inline.hpp"
  78 #include "runtime/keepStackGCProcessed.hpp"

 188 
 189   return fetch_unroll_info_helper(current, exec_mode);
 190 JRT_END
 191 
 192 #if COMPILER2_OR_JVMCI
 193 // print information about reallocated objects
 194 static void print_objects(JavaThread* deoptee_thread,
 195                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 196   ResourceMark rm;
 197   stringStream st;  // change to logStream with logging
 198   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 199   fieldDescriptor fd;
 200 
 201   for (int i = 0; i < objects->length(); i++) {
 202     ObjectValue* sv = (ObjectValue*) objects->at(i);
 203     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 204     Handle obj = sv->value();
 205 
 206     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 207     k->print_value_on(&st);
 208     assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
 209     if (obj.is_null()) {
 210       if (k->is_inline_klass()) {
 211         st.print(" is null");
 212       } else {
 213         st.print(" allocation failed");
 214       }
 215     } else {
 216       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 217     }
 218     st.cr();
 219 
 220     if (Verbose && !obj.is_null()) {
 221       k->oop_print_on(obj(), &st);
 222     }
 223   }
 224   tty->print_raw(st.freeze());
 225 }
 226 
 227 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 228                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 229                                   bool& deoptimized_objects) {
 230   bool realloc_failures = false;
 231   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 232 
 233   JavaThread* deoptee_thread = chunk->at(0)->thread();
 234   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 235          "a frame can only be deoptimized by the owner thread");
 236 
 237   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 238 
 239   // The flag return_oop() indicates call sites which return oop
 240   // in compiled code. Such sites include java method calls,
 241   // runtime calls (for example, used to allocate new objects/arrays
 242   // on slow code path) and any other calls generated in compiled code.
 243   // It is not guaranteed that we can get such information here only
 244   // by analyzing bytecode in deoptimized frames. This is why this flag
 245   // is set during method compilation (see Compile::Process_OopMap_Node()).
 246   // If the previous frame was popped or if we are dispatching an exception,
 247   // we don't have an oop result.
 248   ScopeDesc* scope = chunk->at(0)->scope();
 249   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 250   // In case of the return of multiple values, we must take care
 251   // of all oop return values.
 252   GrowableArray<Handle> return_oops;
 253   InlineKlass* vk = NULL;
 254   if (save_oop_result && scope->return_scalarized()) {
 255     vk = InlineKlass::returned_inline_klass(map);
 256     if (vk != NULL) {
 257       vk->save_oop_fields(map, return_oops);
 258       save_oop_result = false;
 259     }
 260   }
 261   if (save_oop_result) {
 262     // Reallocation may trigger GC. If deoptimization happened on return from
 263     // call which returns oop we need to save it since it is not in oopmap.
 264     oop result = deoptee.saved_oop_result(&map);
 265     assert(oopDesc::is_oop_or_null(result), "must be oop");
 266     return_oops.push(Handle(thread, result));
 267     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 268     if (TraceDeoptimization) {
 269       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 270       tty->cr();
 271     }
 272   }
 273   if (objects != NULL || vk != NULL) {
 274     if (exec_mode == Deoptimization::Unpack_none) {
 275       assert(thread->thread_state() == _thread_in_vm, "assumption");
 276       JavaThread* THREAD = thread; // For exception macros.
 277       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 278       if (vk != NULL) {
 279         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 280       }
 281       if (objects != NULL) {
 282         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 283         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 284         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
 285       }
 286       deoptimized_objects = true;
 287     } else {
 288       JavaThread* current = thread; // For JRT_BLOCK
 289       JRT_BLOCK
 290       if (vk != NULL) {
 291         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 292       }
 293       if (objects != NULL) {
 294         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 295         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 296         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
 297       }
 298       JRT_END
 299     }


 300     if (TraceDeoptimization) {
 301       print_objects(deoptee_thread, objects, realloc_failures);
 302     }
 303   }
 304   if (save_oop_result || vk != NULL) {
 305     // Restore result.
 306     assert(return_oops.length() == 1, "no inline type");
 307     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 308   }
 309   return realloc_failures;
 310 }
 311 
 312 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 313                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 314   JavaThread* deoptee_thread = chunk->at(0)->thread();
 315   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 316   assert(thread == Thread::current(), "should be");
 317   HandleMark hm(thread);
 318 #ifndef PRODUCT
 319   bool first = true;
 320 #endif // !PRODUCT
 321   for (int i = 0; i < chunk->length(); i++) {
 322     compiledVFrame* cvf = chunk->at(i);
 323     assert (cvf->scope() != NULL,"expect only compiled java frames");
 324     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 325     if (monitors->is_nonempty()) {
 326       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 327                                                      exec_mode, realloc_failures);

 625   // its caller's stack by. If the caller is a compiled frame then
 626   // we pretend that the callee has no parameters so that the
 627   // extension counts for the full amount of locals and not just
 628   // locals-parms. This is because without a c2i adapter the parm
 629   // area as created by the compiled frame will not be usable by
 630   // the interpreter. (Depending on the calling convention there
 631   // may not even be enough space).
 632 
 633   // QQQ I'd rather see this pushed down into last_frame_adjust
 634   // and have it take the sender (aka caller).
 635 
 636   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 637     caller_adjustment = last_frame_adjust(0, callee_locals);
 638   } else if (callee_locals > callee_parameters) {
 639     // The caller frame may need extending to accommodate
 640     // non-parameter locals of the first unpacked interpreted frame.
 641     // Compute that adjustment.
 642     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 643   }
 644 
 645   // If the sender is deoptimized we must retrieve the address of the handler
 646   // since the frame will "magically" show the original pc before the deopt
 647   // and we'd undo the deopt.
 648 
 649   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 650   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 651     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 652   }
 653 
 654   assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc");
 655 
 656 #if INCLUDE_JVMCI
 657   if (exceptionObject() != NULL) {
 658     current->set_exception_oop(exceptionObject());
 659     exec_mode = Unpack_exception;
 660   }
 661 #endif
 662 
 663   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 664     assert(current->has_pending_exception(), "should have thrown OOME");
 665     current->set_exception_oop(current->pending_exception());

1110        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1111        default:;
1112      }
1113    }
1114    return NULL;
1115 }
1116 #endif // INCLUDE_JVMCI
1117 
1118 #if COMPILER2_OR_JVMCI
1119 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1120   Handle pending_exception(THREAD, thread->pending_exception());
1121   const char* exception_file = thread->exception_file();
1122   int exception_line = thread->exception_line();
1123   thread->clear_pending_exception();
1124 
1125   bool failures = false;
1126 
1127   for (int i = 0; i < objects->length(); i++) {
1128     assert(objects->at(i)->is_object(), "invalid debug information");
1129     ObjectValue* sv = (ObjectValue*) objects->at(i);

1130     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1131 
1132     // Check if the object may be null and has an additional is_init input that needs
1133     // to be checked before using the field values. Skip re-allocation if it is null.
1134     if (sv->maybe_null()) {
1135       assert(k->is_inline_klass(), "must be an inline klass");
1136       intptr_t init_value = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_int();
1137       jint is_init = (jint)*((jint*)&init_value);
1138       if (is_init == 0) {
1139         continue;
1140       }
1141     }
1142 
1143     oop obj = NULL;
1144     if (k->is_instance_klass()) {
1145 #if INCLUDE_JVMCI
1146       CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1147       if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1148         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1149         obj = get_cached_box(abv, fr, reg_map, THREAD);
1150         if (obj != NULL) {
1151           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1152           abv->set_cached(true);
1153         }
1154       }
1155 #endif // INCLUDE_JVMCI
1156 
1157       InstanceKlass* ik = InstanceKlass::cast(k);
1158       if (obj == NULL) {
1159 #ifdef COMPILER2
1160         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1161           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1162         } else {
1163           obj = ik->allocate_instance(THREAD);
1164         }
1165 #else
1166         obj = ik->allocate_instance(THREAD);
1167 #endif // COMPILER2
1168       }
1169     } else if (k->is_flatArray_klass()) {
1170       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1171       // Inline type array must be zeroed because not all memory is reassigned
1172       obj = ak->allocate(sv->field_size(), THREAD);
1173     } else if (k->is_typeArray_klass()) {
1174       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1175       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1176       int len = sv->field_size() / type2size[ak->element_type()];
1177       obj = ak->allocate(len, THREAD);
1178     } else if (k->is_objArray_klass()) {
1179       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1180       obj = ak->allocate(sv->field_size(), THREAD);
1181     }
1182 
1183     if (obj == NULL) {
1184       failures = true;
1185     }
1186 
1187     assert(sv->value().is_null(), "redundant reallocation");
1188     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1189     CLEAR_PENDING_EXCEPTION;
1190     sv->set_value(obj);
1191   }
1192 
1193   if (failures) {
1194     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1195   } else if (pending_exception.not_null()) {
1196     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1197   }
1198 
1199   return failures;
1200 }
1201 
1202 // We're deoptimizing at the return of a call, inline type fields are
1203 // in registers. When we go back to the interpreter, it will expect a
1204 // reference to an inline type instance. Allocate and initialize it from
1205 // the register values here.
1206 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1207   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1208   if (new_vt == NULL) {
1209     CLEAR_PENDING_EXCEPTION;
1210     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1211   }
1212   return_oops.clear();
1213   return_oops.push(Handle(THREAD, new_vt));
1214   return false;
1215 }
1216 
1217 #if INCLUDE_JVMCI
1218 /**
1219  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1220  * we need to somehow be able to recover the actual kind to be able to write the correct
1221  * amount of bytes.
1222  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1223  * the entries at index n + 1 to n + i are 'markers'.
1224  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1225  * expected form of the array would be:
1226  *
1227  * {b0, b1, b2, b3, INT, marker, b6, b7}
1228  *
1229  * Thus, in order to get back the size of the entry, we simply need to count the number
1230  * of marked entries
1231  *
1232  * @param virtualArray the virtualized byte array
1233  * @param i index of the virtual entry we are recovering
1234  * @return The number of bytes the entry spans
1235  */
1236 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1369       default:
1370         ShouldNotReachHere();
1371     }
1372     index++;
1373   }
1374 }
1375 
1376 // restore fields of an eliminated object array
1377 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1378   for (int i = 0; i < sv->field_size(); i++) {
1379     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1380     assert(value->type() == T_OBJECT, "object element expected");
1381     obj->obj_at_put(i, value->get_obj()());
1382   }
1383 }
1384 
1385 class ReassignedField {
1386 public:
1387   int _offset;
1388   BasicType _type;
1389   InstanceKlass* _klass;
1390 public:
1391   ReassignedField() {
1392     _offset = 0;
1393     _type = T_ILLEGAL;
1394     _klass = NULL;
1395   }
1396 };
1397 
1398 int compare(ReassignedField* left, ReassignedField* right) {
1399   return left->_offset - right->_offset;
1400 }
1401 
1402 // Restore fields of an eliminated instance object using the same field order
1403 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1404 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1405   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1406   InstanceKlass* ik = klass;
1407   while (ik != NULL) {
1408     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1409       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1410         ReassignedField field;
1411         field._offset = fs.offset();
1412         field._type = Signature::basic_type(fs.signature());
1413         if (fs.signature()->is_Q_signature()) {
1414           if (fs.is_inlined()) {
1415             // Resolve klass of flattened inline type field
1416             field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1417           } else {
1418             field._type = T_OBJECT;
1419           }
1420         }
1421         fields->append(field);
1422       }
1423     }
1424     ik = ik->superklass();
1425   }
1426   fields->sort(compare);
1427   for (int i = 0; i < fields->length(); i++) {
1428     BasicType type = fields->at(i)._type;
1429     int offset = base_offset + fields->at(i)._offset;
1430     // Check for flattened inline type field before accessing the ScopeValue because it might not have any fields
1431     if (type == T_PRIMITIVE_OBJECT) {
1432       // Recursively re-assign flattened inline type fields
1433       InstanceKlass* vk = fields->at(i)._klass;
1434       assert(vk != NULL, "must be resolved");
1435       offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1436       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1437       continue; // Continue because we don't need to increment svIndex
1438     }
1439     intptr_t val;
1440     ScopeValue* scope_field = sv->field_at(svIndex);
1441     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1442     switch (type) {
1443       case T_OBJECT:
1444       case T_ARRAY:
1445         assert(value->type() == T_OBJECT, "Agreement.");
1446         obj->obj_field_put(offset, value->get_obj()());
1447         break;
1448 
1449       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1450       case T_INT: case T_FLOAT: { // 4 bytes.
1451         assert(value->type() == T_INT, "Agreement.");
1452         bool big_value = false;
1453         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1454           if (scope_field->is_location()) {
1455             Location::Type type = ((LocationValue*) scope_field)->location().type();
1456             if (type == Location::dbl || type == Location::lng) {
1457               big_value = true;
1458             }
1459           }
1460           if (scope_field->is_constant_int()) {
1461             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1462             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1463               big_value = true;
1464             }

1504       case T_BYTE:
1505         assert(value->type() == T_INT, "Agreement.");
1506         val = value->get_int();
1507         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1508         break;
1509 
1510       case T_BOOLEAN:
1511         assert(value->type() == T_INT, "Agreement.");
1512         val = value->get_int();
1513         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1514         break;
1515 
1516       default:
1517         ShouldNotReachHere();
1518     }
1519     svIndex++;
1520   }
1521   return svIndex;
1522 }
1523 
1524 // restore fields of an eliminated inline type array
1525 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1526   InlineKlass* vk = vak->element_klass();
1527   assert(vk->flatten_array(), "should only be used for flattened inline type arrays");
1528   // Adjust offset to omit oop header
1529   int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1530   // Initialize all elements of the flattened inline type array
1531   for (int i = 0; i < sv->field_size(); i++) {
1532     ScopeValue* val = sv->field_at(i);
1533     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1534     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1535   }
1536 }
1537 
1538 // restore fields of all eliminated objects and arrays
1539 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1540   for (int i = 0; i < objects->length(); i++) {
1541     ObjectValue* sv = (ObjectValue*) objects->at(i);
1542     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1543     Handle obj = sv->value();
1544     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1545 #ifndef PRODUCT
1546     if (PrintDeoptimizationDetails) {
1547       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1548     }
1549 #endif // !PRODUCT
1550 
1551     if (obj.is_null()) {
1552       continue;
1553     }
1554 
1555 #if INCLUDE_JVMCI
1556     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1557     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1558       continue;
1559     }
1560 #endif // INCLUDE_JVMCI
1561 #ifdef COMPILER2
1562     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1563       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1564       ScopeValue* payload = sv->field_at(0);
1565       if (payload->is_location() &&
1566           payload->as_LocationValue()->location().type() == Location::vector) {
1567 #ifndef PRODUCT
1568         if (PrintDeoptimizationDetails) {
1569           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1570           if (Verbose) {
1571             Handle obj = sv->value();
1572             k->oop_print_on(obj(), tty);
1573           }
1574         }
1575 #endif // !PRODUCT
1576         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1577       }
1578       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1579       // which could be restored after vector object allocation.
1580     }
1581 #endif /* !COMPILER2 */
1582     if (k->is_instance_klass()) {
1583       InstanceKlass* ik = InstanceKlass::cast(k);
1584       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1585     } else if (k->is_flatArray_klass()) {
1586       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1587       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1588     } else if (k->is_typeArray_klass()) {
1589       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1590       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1591     } else if (k->is_objArray_klass()) {
1592       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1593     }
1594   }
1595 }
1596 
1597 
1598 // relock objects for which synchronization was eliminated
1599 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1600                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1601   bool relocked_objects = false;
1602   for (int i = 0; i < monitors->length(); i++) {
1603     MonitorInfo* mon_info = monitors->at(i);
1604     if (mon_info->eliminated()) {
1605       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1606       relocked_objects = true;
1607       if (!mon_info->owner_is_scalar_replaced()) {

1727     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1728     cm->log_identity(xtty);
1729     xtty->end_head();
1730     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1731       xtty->begin_elem("jvms bci='%d'", sd->bci());
1732       xtty->method(sd->method());
1733       xtty->end_elem();
1734       if (sd->is_top())  break;
1735     }
1736     xtty->tail("deoptimized");
1737   }
1738 
1739   Continuation::notify_deopt(thread, fr.sp());
1740 
1741   // Patch the compiled method so that when execution returns to it we will
1742   // deopt the execution state and return to the interpreter.
1743   fr.deoptimize(thread);
1744 }
1745 
1746 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1747   // Deoptimize only if the frame comes from compiled code.
1748   // Do not deoptimize the frame which is already patched
1749   // during the execution of the loops below.
1750   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1751     return;
1752   }
1753   ResourceMark rm;
1754   DeoptimizationMarker dm;
1755   deoptimize_single_frame(thread, fr, reason);
1756 }
1757 
1758 #if INCLUDE_JVMCI
1759 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1760   // there is no exception handler for this pc => deoptimize
1761   cm->make_not_entrant();
1762 
1763   // Use Deoptimization::deoptimize for all of its side-effects:
1764   // gathering traps statistics, logging...
1765   // it also patches the return pc but we do not care about that
1766   // since we return a continuation to the deopt_blob below.
1767   JavaThread* thread = JavaThread::current();
< prev index next >