< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  26 #include "jvm.h"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/oopFactory.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/constantPool.hpp"


  46 #include "oops/method.hpp"
  47 #include "oops/objArrayKlass.hpp"
  48 #include "oops/objArrayOop.inline.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/fieldStreams.inline.hpp"

  51 #include "oops/typeArrayOop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "prims/jvmtiDeferredUpdates.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/vectorSupport.hpp"
  57 #include "prims/methodHandles.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/deoptimization.hpp"
  60 #include "runtime/escapeBarrier.hpp"
  61 #include "runtime/fieldDescriptor.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/jniHandles.inline.hpp"
  67 #include "runtime/keepStackGCProcessed.hpp"
  68 #include "runtime/objectMonitor.inline.hpp"
  69 #include "runtime/osThread.hpp"
  70 #include "runtime/safepointVerifiers.hpp"

 180   return fetch_unroll_info_helper(current, exec_mode);
 181 JRT_END
 182 
 183 #if COMPILER2_OR_JVMCI
 184 #ifndef PRODUCT
 185 // print information about reallocated objects
 186 static void print_objects(JavaThread* deoptee_thread,
 187                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 188   ResourceMark rm;
 189   stringStream st;  // change to logStream with logging
 190   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 191   fieldDescriptor fd;
 192 
 193   for (int i = 0; i < objects->length(); i++) {
 194     ObjectValue* sv = (ObjectValue*) objects->at(i);
 195     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 196     Handle obj = sv->value();
 197 
 198     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 199     k->print_value_on(&st);
 200     assert(obj.not_null() || realloc_failures, "reallocation was missed");
 201     if (obj.is_null()) {
 202       st.print(" allocation failed");




 203     } else {
 204       st.print(" allocated (%d bytes)", obj->size() * HeapWordSize);
 205     }
 206     st.cr();
 207 
 208     if (Verbose && !obj.is_null()) {
 209       k->oop_print_on(obj(), &st);
 210     }
 211   }
 212   tty->print_raw(st.as_string());
 213 }
 214 #endif
 215 
 216 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 217                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 218                                   bool& deoptimized_objects) {
 219   bool realloc_failures = false;
 220   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 221 
 222   JavaThread* deoptee_thread = chunk->at(0)->thread();
 223   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 224          "a frame can only be deoptimized by the owner thread");
 225 
 226   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 227 
 228   // The flag return_oop() indicates call sites which return oop
 229   // in compiled code. Such sites include java method calls,
 230   // runtime calls (for example, used to allocate new objects/arrays
 231   // on slow code path) and any other calls generated in compiled code.
 232   // It is not guaranteed that we can get such information here only
 233   // by analyzing bytecode in deoptimized frames. This is why this flag
 234   // is set during method compilation (see Compile::Process_OopMap_Node()).
 235   // If the previous frame was popped or if we are dispatching an exception,
 236   // we don't have an oop result.
 237   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 238   Handle return_value;











 239   if (save_oop_result) {
 240     // Reallocation may trigger GC. If deoptimization happened on return from
 241     // call which returns oop we need to save it since it is not in oopmap.
 242     oop result = deoptee.saved_oop_result(&map);
 243     assert(oopDesc::is_oop_or_null(result), "must be oop");
 244     return_value = Handle(thread, result);
 245     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 246     if (TraceDeoptimization) {
 247       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 248     }
 249   }
 250   if (objects != NULL) {
 251     if (exec_mode == Deoptimization::Unpack_none) {
 252       assert(thread->thread_state() == _thread_in_vm, "assumption");
 253       JavaThread* THREAD = thread; // For exception macros.
 254       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 255       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));







 256       deoptimized_objects = true;
 257     } else {
 258       JavaThread* current = thread; // For JRT_BLOCK
 259       JRT_BLOCK
 260       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);







 261       JRT_END
 262     }
 263     bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 264     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 265 #ifndef PRODUCT
 266     if (TraceDeoptimization) {
 267       print_objects(deoptee_thread, objects, realloc_failures);
 268     }
 269 #endif
 270   }
 271   if (save_oop_result) {
 272     // Restore result.
 273     deoptee.set_saved_oop_result(&map, return_value());

 274   }
 275   return realloc_failures;
 276 }
 277 
 278 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 279                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 280   JavaThread* deoptee_thread = chunk->at(0)->thread();
 281   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 282   assert(thread == Thread::current(), "should be");
 283   HandleMark hm(thread);
 284 #ifndef PRODUCT
 285   bool first = true;
 286 #endif
 287   for (int i = 0; i < chunk->length(); i++) {
 288     compiledVFrame* cvf = chunk->at(i);
 289     assert (cvf->scope() != NULL,"expect only compiled java frames");
 290     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 291     if (monitors->is_nonempty()) {
 292       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 293                                                      exec_mode, realloc_failures);

 577   // its caller's stack by. If the caller is a compiled frame then
 578   // we pretend that the callee has no parameters so that the
 579   // extension counts for the full amount of locals and not just
 580   // locals-parms. This is because without a c2i adapter the parm
 581   // area as created by the compiled frame will not be usable by
 582   // the interpreter. (Depending on the calling convention there
 583   // may not even be enough space).
 584 
 585   // QQQ I'd rather see this pushed down into last_frame_adjust
 586   // and have it take the sender (aka caller).
 587 
 588   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 589     caller_adjustment = last_frame_adjust(0, callee_locals);
 590   } else if (callee_locals > callee_parameters) {
 591     // The caller frame may need extending to accommodate
 592     // non-parameter locals of the first unpacked interpreted frame.
 593     // Compute that adjustment.
 594     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 595   }
 596 
 597   // If the sender is deoptimized the we must retrieve the address of the handler
 598   // since the frame will "magically" show the original pc before the deopt
 599   // and we'd undo the deopt.
 600 
 601   frame_pcs[0] = deopt_sender.raw_pc();
 602 
 603   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 604 
 605 #if INCLUDE_JVMCI
 606   if (exceptionObject() != NULL) {
 607     current->set_exception_oop(exceptionObject());
 608     exec_mode = Unpack_exception;
 609   }
 610 #endif
 611 
 612   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 613     assert(current->has_pending_exception(), "should have thrown OOME");
 614     current->set_exception_oop(current->pending_exception());
 615     current->clear_pending_exception();
 616     exec_mode = Unpack_exception;
 617   }

1042        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1043        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1044        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1045        default:;
1046      }
1047    }
1048    return NULL;
1049 }
1050 
1051 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1052   Handle pending_exception(THREAD, thread->pending_exception());
1053   const char* exception_file = thread->exception_file();
1054   int exception_line = thread->exception_line();
1055   thread->clear_pending_exception();
1056 
1057   bool failures = false;
1058 
1059   for (int i = 0; i < objects->length(); i++) {
1060     assert(objects->at(i)->is_object(), "invalid debug information");
1061     ObjectValue* sv = (ObjectValue*) objects->at(i);
1062 
1063     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1064     oop obj = NULL;
1065 












1066     if (k->is_instance_klass()) {
1067       if (sv->is_auto_box()) {
1068         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1069         obj = get_cached_box(abv, fr, reg_map, THREAD);
1070         if (obj != NULL) {
1071           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1072           abv->set_cached(true);
1073         }
1074       }
1075 
1076       InstanceKlass* ik = InstanceKlass::cast(k);
1077       if (obj == NULL) {
1078 #ifdef COMPILER2
1079         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1080           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1081         } else {
1082           obj = ik->allocate_instance(THREAD);
1083         }
1084 #else
1085         obj = ik->allocate_instance(THREAD);
1086 #endif // COMPILER2
1087       }




1088     } else if (k->is_typeArray_klass()) {
1089       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1090       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1091       int len = sv->field_size() / type2size[ak->element_type()];
1092       obj = ak->allocate(len, THREAD);
1093     } else if (k->is_objArray_klass()) {
1094       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1095       obj = ak->allocate(sv->field_size(), THREAD);
1096     }
1097 
1098     if (obj == NULL) {
1099       failures = true;
1100     }
1101 
1102     assert(sv->value().is_null(), "redundant reallocation");
1103     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1104     CLEAR_PENDING_EXCEPTION;
1105     sv->set_value(obj);
1106   }
1107 
1108   if (failures) {
1109     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1110   } else if (pending_exception.not_null()) {
1111     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1112   }
1113 
1114   return failures;
1115 }
1116 















1117 #if INCLUDE_JVMCI
1118 /**
1119  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1120  * we need to somehow be able to recover the actual kind to be able to write the correct
1121  * amount of bytes.
1122  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1123  * the entries at index n + 1 to n + i are 'markers'.
1124  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1125  * expected form of the array would be:
1126  *
1127  * {b0, b1, b2, b3, INT, marker, b6, b7}
1128  *
1129  * Thus, in order to get back the size of the entry, we simply need to count the number
1130  * of marked entries
1131  *
1132  * @param virtualArray the virtualized byte array
1133  * @param i index of the virtual entry we are recovering
1134  * @return The number of bytes the entry spans
1135  */
1136 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1269       default:
1270         ShouldNotReachHere();
1271     }
1272     index++;
1273   }
1274 }
1275 
1276 // restore fields of an eliminated object array
1277 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1278   for (int i = 0; i < sv->field_size(); i++) {
1279     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1280     assert(value->type() == T_OBJECT, "object element expected");
1281     obj->obj_at_put(i, value->get_obj()());
1282   }
1283 }
1284 
1285 class ReassignedField {
1286 public:
1287   int _offset;
1288   BasicType _type;

1289 public:
1290   ReassignedField() {
1291     _offset = 0;
1292     _type = T_ILLEGAL;

1293   }
1294 };
1295 
1296 int compare(ReassignedField* left, ReassignedField* right) {
1297   return left->_offset - right->_offset;
1298 }
1299 
1300 // Restore fields of an eliminated instance object using the same field order
1301 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1302 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1303   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1304   InstanceKlass* ik = klass;
1305   while (ik != NULL) {
1306     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1307       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1308         ReassignedField field;
1309         field._offset = fs.offset();
1310         field._type = Signature::basic_type(fs.signature());








1311         fields->append(field);
1312       }
1313     }
1314     ik = ik->superklass();
1315   }
1316   fields->sort(compare);
1317   for (int i = 0; i < fields->length(); i++) {











1318     intptr_t val;
1319     ScopeValue* scope_field = sv->field_at(svIndex);
1320     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1321     int offset = fields->at(i)._offset;
1322     BasicType type = fields->at(i)._type;
1323     switch (type) {
1324       case T_OBJECT: case T_ARRAY:

1325         assert(value->type() == T_OBJECT, "Agreement.");
1326         obj->obj_field_put(offset, value->get_obj()());
1327         break;
1328 
1329       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1330       case T_INT: case T_FLOAT: { // 4 bytes.
1331         assert(value->type() == T_INT, "Agreement.");
1332         bool big_value = false;
1333         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1334           if (scope_field->is_location()) {
1335             Location::Type type = ((LocationValue*) scope_field)->location().type();
1336             if (type == Location::dbl || type == Location::lng) {
1337               big_value = true;
1338             }
1339           }
1340           if (scope_field->is_constant_int()) {
1341             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1342             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1343               big_value = true;
1344             }

1384       case T_BYTE:
1385         assert(value->type() == T_INT, "Agreement.");
1386         val = value->get_int();
1387         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1388         break;
1389 
1390       case T_BOOLEAN:
1391         assert(value->type() == T_INT, "Agreement.");
1392         val = value->get_int();
1393         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1394         break;
1395 
1396       default:
1397         ShouldNotReachHere();
1398     }
1399     svIndex++;
1400   }
1401   return svIndex;
1402 }
1403 














1404 // restore fields of all eliminated objects and arrays
1405 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1406   for (int i = 0; i < objects->length(); i++) {
1407     ObjectValue* sv = (ObjectValue*) objects->at(i);
1408     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1409     Handle obj = sv->value();
1410     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1411     if (PrintDeoptimizationDetails) {
1412       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1413     }
1414     if (obj.is_null()) {
1415       continue;
1416     }
1417 
1418     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1419     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1420       continue;
1421     }
1422 #ifdef COMPILER2
1423     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1424       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1425       ScopeValue* payload = sv->field_at(0);
1426       if (payload->is_location() &&
1427           payload->as_LocationValue()->location().type() == Location::vector) {
1428         if (PrintDeoptimizationDetails) {
1429           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1430           if (Verbose) {
1431             Handle obj = sv->value();
1432             k->oop_print_on(obj(), tty);
1433           }
1434         }
1435         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1436       }
1437       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1438       // which could be restored after vector object allocation.
1439     }
1440 #endif
1441     if (k->is_instance_klass()) {
1442       InstanceKlass* ik = InstanceKlass::cast(k);
1443       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);



1444     } else if (k->is_typeArray_klass()) {
1445       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1446       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1447     } else if (k->is_objArray_klass()) {
1448       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1449     }
1450   }
1451 }
1452 
1453 
1454 // relock objects for which synchronization was eliminated
1455 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1456                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1457   bool relocked_objects = false;
1458   for (int i = 0; i < monitors->length(); i++) {
1459     MonitorInfo* mon_info = monitors->at(i);
1460     if (mon_info->eliminated()) {
1461       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1462       relocked_objects = true;
1463       if (!mon_info->owner_is_scalar_replaced()) {

1590 
1591     ttyLocker ttyl;
1592     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1593     cm->log_identity(xtty);
1594     xtty->end_head();
1595     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1596       xtty->begin_elem("jvms bci='%d'", sd->bci());
1597       xtty->method(sd->method());
1598       xtty->end_elem();
1599       if (sd->is_top())  break;
1600     }
1601     xtty->tail("deoptimized");
1602   }
1603 
1604   // Patch the compiled method so that when execution returns to it we will
1605   // deopt the execution state and return to the interpreter.
1606   fr.deoptimize(thread);
1607 }
1608 
1609 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1610   // Deoptimize only if the frame comes from compile code.
1611   // Do not deoptimize the frame which is already patched
1612   // during the execution of the loops below.
1613   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1614     return;
1615   }
1616   ResourceMark rm;
1617   DeoptimizationMarker dm;
1618   deoptimize_single_frame(thread, fr, reason);
1619 }
1620 
1621 #if INCLUDE_JVMCI
1622 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1623   // there is no exception handler for this pc => deoptimize
1624   cm->make_not_entrant();
1625 
1626   // Use Deoptimization::deoptimize for all of its side-effects:
1627   // gathering traps statistics, logging...
1628   // it also patches the return pc but we do not care about that
1629   // since we return a continuation to the deopt_blob below.
1630   JavaThread* thread = JavaThread::current();

  26 #include "jvm.h"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/oopFactory.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/constantPool.hpp"
  46 #include "oops/flatArrayKlass.hpp"
  47 #include "oops/flatArrayOop.hpp"
  48 #include "oops/method.hpp"
  49 #include "oops/objArrayKlass.hpp"
  50 #include "oops/objArrayOop.inline.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/fieldStreams.inline.hpp"
  53 #include "oops/inlineKlass.inline.hpp"
  54 #include "oops/typeArrayOop.inline.hpp"
  55 #include "oops/verifyOopClosure.hpp"
  56 #include "prims/jvmtiDeferredUpdates.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "prims/jvmtiThreadState.hpp"
  59 #include "prims/vectorSupport.hpp"
  60 #include "prims/methodHandles.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/deoptimization.hpp"
  63 #include "runtime/escapeBarrier.hpp"
  64 #include "runtime/fieldDescriptor.hpp"
  65 #include "runtime/fieldDescriptor.inline.hpp"
  66 #include "runtime/frame.inline.hpp"
  67 #include "runtime/handles.inline.hpp"
  68 #include "runtime/interfaceSupport.inline.hpp"
  69 #include "runtime/jniHandles.inline.hpp"
  70 #include "runtime/keepStackGCProcessed.hpp"
  71 #include "runtime/objectMonitor.inline.hpp"
  72 #include "runtime/osThread.hpp"
  73 #include "runtime/safepointVerifiers.hpp"

 183   return fetch_unroll_info_helper(current, exec_mode);
 184 JRT_END
 185 
 186 #if COMPILER2_OR_JVMCI
 187 #ifndef PRODUCT
 188 // print information about reallocated objects
 189 static void print_objects(JavaThread* deoptee_thread,
 190                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 191   ResourceMark rm;
 192   stringStream st;  // change to logStream with logging
 193   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 194   fieldDescriptor fd;
 195 
 196   for (int i = 0; i < objects->length(); i++) {
 197     ObjectValue* sv = (ObjectValue*) objects->at(i);
 198     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 199     Handle obj = sv->value();
 200 
 201     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 202     k->print_value_on(&st);
 203     assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed");
 204     if (obj.is_null()) {
 205       if (k->is_inline_klass()) {
 206         st.print(" is null");
 207       } else {
 208         st.print(" allocation failed");
 209       }
 210     } else {
 211       st.print(" allocated (%d bytes)", obj->size() * HeapWordSize);
 212     }
 213     st.cr();
 214 
 215     if (Verbose && !obj.is_null()) {
 216       k->oop_print_on(obj(), &st);
 217     }
 218   }
 219   tty->print_raw(st.as_string());
 220 }
 221 #endif
 222 
 223 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 224                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 225                                   bool& deoptimized_objects) {
 226   bool realloc_failures = false;
 227   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 228 
 229   JavaThread* deoptee_thread = chunk->at(0)->thread();
 230   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 231          "a frame can only be deoptimized by the owner thread");
 232 
 233   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 234 
 235   // The flag return_oop() indicates call sites which return oop
 236   // in compiled code. Such sites include java method calls,
 237   // runtime calls (for example, used to allocate new objects/arrays
 238   // on slow code path) and any other calls generated in compiled code.
 239   // It is not guaranteed that we can get such information here only
 240   // by analyzing bytecode in deoptimized frames. This is why this flag
 241   // is set during method compilation (see Compile::Process_OopMap_Node()).
 242   // If the previous frame was popped or if we are dispatching an exception,
 243   // we don't have an oop result.
 244   ScopeDesc* scope = chunk->at(0)->scope();
 245   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 246   // In case of the return of multiple values, we must take care
 247   // of all oop return values.
 248   GrowableArray<Handle> return_oops;
 249   InlineKlass* vk = NULL;
 250   if (save_oop_result && scope->return_scalarized()) {
 251     vk = InlineKlass::returned_inline_klass(map);
 252     if (vk != NULL) {
 253       vk->save_oop_fields(map, return_oops);
 254       save_oop_result = false;
 255     }
 256   }
 257   if (save_oop_result) {
 258     // Reallocation may trigger GC. If deoptimization happened on return from
 259     // call which returns oop we need to save it since it is not in oopmap.
 260     oop result = deoptee.saved_oop_result(&map);
 261     assert(oopDesc::is_oop_or_null(result), "must be oop");
 262     return_oops.push(Handle(thread, result));
 263     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 264     if (TraceDeoptimization) {
 265       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 266     }
 267   }
 268   if (objects != NULL || vk != NULL) {
 269     if (exec_mode == Deoptimization::Unpack_none) {
 270       assert(thread->thread_state() == _thread_in_vm, "assumption");
 271       JavaThread* THREAD = thread; // For exception macros.
 272       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 273       if (vk != NULL) {
 274         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 275       }
 276       if (objects != NULL) {
 277         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 278         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 279         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
 280       }
 281       deoptimized_objects = true;
 282     } else {
 283       JavaThread* current = thread; // For JRT_BLOCK
 284       JRT_BLOCK
 285       if (vk != NULL) {
 286         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 287       }
 288       if (objects != NULL) {
 289         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 290         bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 291         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
 292       }
 293       JRT_END
 294     }


 295 #ifndef PRODUCT
 296     if (TraceDeoptimization) {
 297       print_objects(deoptee_thread, objects, realloc_failures);
 298     }
 299 #endif
 300   }
 301   if (save_oop_result || vk != NULL) {
 302     // Restore result.
 303     assert(return_oops.length() == 1, "no inline type");
 304     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 305   }
 306   return realloc_failures;
 307 }
 308 
 309 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 310                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 311   JavaThread* deoptee_thread = chunk->at(0)->thread();
 312   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 313   assert(thread == Thread::current(), "should be");
 314   HandleMark hm(thread);
 315 #ifndef PRODUCT
 316   bool first = true;
 317 #endif
 318   for (int i = 0; i < chunk->length(); i++) {
 319     compiledVFrame* cvf = chunk->at(i);
 320     assert (cvf->scope() != NULL,"expect only compiled java frames");
 321     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 322     if (monitors->is_nonempty()) {
 323       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 324                                                      exec_mode, realloc_failures);

 608   // its caller's stack by. If the caller is a compiled frame then
 609   // we pretend that the callee has no parameters so that the
 610   // extension counts for the full amount of locals and not just
 611   // locals-parms. This is because without a c2i adapter the parm
 612   // area as created by the compiled frame will not be usable by
 613   // the interpreter. (Depending on the calling convention there
 614   // may not even be enough space).
 615 
 616   // QQQ I'd rather see this pushed down into last_frame_adjust
 617   // and have it take the sender (aka caller).
 618 
 619   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 620     caller_adjustment = last_frame_adjust(0, callee_locals);
 621   } else if (callee_locals > callee_parameters) {
 622     // The caller frame may need extending to accommodate
 623     // non-parameter locals of the first unpacked interpreted frame.
 624     // Compute that adjustment.
 625     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 626   }
 627 
 628   // If the sender is deoptimized we must retrieve the address of the handler
 629   // since the frame will "magically" show the original pc before the deopt
 630   // and we'd undo the deopt.
 631 
 632   frame_pcs[0] = deopt_sender.raw_pc();
 633 
 634   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 635 
 636 #if INCLUDE_JVMCI
 637   if (exceptionObject() != NULL) {
 638     current->set_exception_oop(exceptionObject());
 639     exec_mode = Unpack_exception;
 640   }
 641 #endif
 642 
 643   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 644     assert(current->has_pending_exception(), "should have thrown OOME");
 645     current->set_exception_oop(current->pending_exception());
 646     current->clear_pending_exception();
 647     exec_mode = Unpack_exception;
 648   }

1073        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1074        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1075        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1076        default:;
1077      }
1078    }
1079    return NULL;
1080 }
1081 
1082 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1083   Handle pending_exception(THREAD, thread->pending_exception());
1084   const char* exception_file = thread->exception_file();
1085   int exception_line = thread->exception_line();
1086   thread->clear_pending_exception();
1087 
1088   bool failures = false;
1089 
1090   for (int i = 0; i < objects->length(); i++) {
1091     assert(objects->at(i)->is_object(), "invalid debug information");
1092     ObjectValue* sv = (ObjectValue*) objects->at(i);

1093     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1094 
1095     // Check if the object may be null and has an additional is_init input that needs
1096     // to be checked before using the field values. Skip re-allocation if it is null.
1097     if (sv->maybe_null()) {
1098       assert(k->is_inline_klass(), "must be an inline klass");
1099       intptr_t init_value = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_int();
1100       jint is_init = (jint)*((jint*)&init_value);
1101       if (is_init == 0) {
1102         continue;
1103       }
1104     }
1105 
1106     oop obj = NULL;
1107     if (k->is_instance_klass()) {
1108       if (sv->is_auto_box()) {
1109         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1110         obj = get_cached_box(abv, fr, reg_map, THREAD);
1111         if (obj != NULL) {
1112           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1113           abv->set_cached(true);
1114         }
1115       }
1116 
1117       InstanceKlass* ik = InstanceKlass::cast(k);
1118       if (obj == NULL) {
1119 #ifdef COMPILER2
1120         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1121           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1122         } else {
1123           obj = ik->allocate_instance(THREAD);
1124         }
1125 #else
1126         obj = ik->allocate_instance(THREAD);
1127 #endif // COMPILER2
1128       }
1129     } else if (k->is_flatArray_klass()) {
1130       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1131       // Inline type array must be zeroed because not all memory is reassigned
1132       obj = ak->allocate(sv->field_size(), THREAD);
1133     } else if (k->is_typeArray_klass()) {
1134       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1135       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1136       int len = sv->field_size() / type2size[ak->element_type()];
1137       obj = ak->allocate(len, THREAD);
1138     } else if (k->is_objArray_klass()) {
1139       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1140       obj = ak->allocate(sv->field_size(), THREAD);
1141     }
1142 
1143     if (obj == NULL) {
1144       failures = true;
1145     }
1146 
1147     assert(sv->value().is_null(), "redundant reallocation");
1148     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1149     CLEAR_PENDING_EXCEPTION;
1150     sv->set_value(obj);
1151   }
1152 
1153   if (failures) {
1154     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1155   } else if (pending_exception.not_null()) {
1156     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1157   }
1158 
1159   return failures;
1160 }
1161 
1162 // We're deoptimizing at the return of a call, inline type fields are
1163 // in registers. When we go back to the interpreter, it will expect a
1164 // reference to an inline type instance. Allocate and initialize it from
1165 // the register values here.
1166 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1167   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1168   if (new_vt == NULL) {
1169     CLEAR_PENDING_EXCEPTION;
1170     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1171   }
1172   return_oops.clear();
1173   return_oops.push(Handle(THREAD, new_vt));
1174   return false;
1175 }
1176 
1177 #if INCLUDE_JVMCI
1178 /**
1179  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1180  * we need to somehow be able to recover the actual kind to be able to write the correct
1181  * amount of bytes.
1182  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1183  * the entries at index n + 1 to n + i are 'markers'.
1184  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1185  * expected form of the array would be:
1186  *
1187  * {b0, b1, b2, b3, INT, marker, b6, b7}
1188  *
1189  * Thus, in order to get back the size of the entry, we simply need to count the number
1190  * of marked entries
1191  *
1192  * @param virtualArray the virtualized byte array
1193  * @param i index of the virtual entry we are recovering
1194  * @return The number of bytes the entry spans
1195  */
1196 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1329       default:
1330         ShouldNotReachHere();
1331     }
1332     index++;
1333   }
1334 }
1335 
1336 // restore fields of an eliminated object array
1337 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1338   for (int i = 0; i < sv->field_size(); i++) {
1339     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1340     assert(value->type() == T_OBJECT, "object element expected");
1341     obj->obj_at_put(i, value->get_obj()());
1342   }
1343 }
1344 
1345 class ReassignedField {
1346 public:
1347   int _offset;
1348   BasicType _type;
1349   InstanceKlass* _klass;
1350 public:
1351   ReassignedField() {
1352     _offset = 0;
1353     _type = T_ILLEGAL;
1354     _klass = NULL;
1355   }
1356 };
1357 
1358 int compare(ReassignedField* left, ReassignedField* right) {
1359   return left->_offset - right->_offset;
1360 }
1361 
1362 // Restore fields of an eliminated instance object using the same field order
1363 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1364 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1365   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1366   InstanceKlass* ik = klass;
1367   while (ik != NULL) {
1368     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1369       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1370         ReassignedField field;
1371         field._offset = fs.offset();
1372         field._type = Signature::basic_type(fs.signature());
1373         if (fs.signature()->is_Q_signature()) {
1374           if (fs.is_inlined()) {
1375             // Resolve klass of flattened inline type field
1376             field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1377           } else {
1378             field._type = T_OBJECT;
1379           }
1380         }
1381         fields->append(field);
1382       }
1383     }
1384     ik = ik->superklass();
1385   }
1386   fields->sort(compare);
1387   for (int i = 0; i < fields->length(); i++) {
1388     BasicType type = fields->at(i)._type;
1389     int offset = base_offset + fields->at(i)._offset;
1390     // Check for flattened inline type field before accessing the ScopeValue because it might not have any fields
1391     if (type == T_INLINE_TYPE) {
1392       // Recursively re-assign flattened inline type fields
1393       InstanceKlass* vk = fields->at(i)._klass;
1394       assert(vk != NULL, "must be resolved");
1395       offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1396       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1397       continue; // Continue because we don't need to increment svIndex
1398     }
1399     intptr_t val;
1400     ScopeValue* scope_field = sv->field_at(svIndex);
1401     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1402     switch (type) {
1403       case T_OBJECT:
1404       case T_ARRAY:
1405         assert(value->type() == T_OBJECT, "Agreement.");
1406         obj->obj_field_put(offset, value->get_obj()());
1407         break;
1408 
1409       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1410       case T_INT: case T_FLOAT: { // 4 bytes.
1411         assert(value->type() == T_INT, "Agreement.");
1412         bool big_value = false;
1413         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1414           if (scope_field->is_location()) {
1415             Location::Type type = ((LocationValue*) scope_field)->location().type();
1416             if (type == Location::dbl || type == Location::lng) {
1417               big_value = true;
1418             }
1419           }
1420           if (scope_field->is_constant_int()) {
1421             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1422             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1423               big_value = true;
1424             }

1464       case T_BYTE:
1465         assert(value->type() == T_INT, "Agreement.");
1466         val = value->get_int();
1467         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1468         break;
1469 
1470       case T_BOOLEAN:
1471         assert(value->type() == T_INT, "Agreement.");
1472         val = value->get_int();
1473         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1474         break;
1475 
1476       default:
1477         ShouldNotReachHere();
1478     }
1479     svIndex++;
1480   }
1481   return svIndex;
1482 }
1483 
1484 // restore fields of an eliminated inline type array
1485 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1486   InlineKlass* vk = vak->element_klass();
1487   assert(vk->flatten_array(), "should only be used for flattened inline type arrays");
1488   // Adjust offset to omit oop header
1489   int base_offset = arrayOopDesc::base_offset_in_bytes(T_INLINE_TYPE) - InlineKlass::cast(vk)->first_field_offset();
1490   // Initialize all elements of the flattened inline type array
1491   for (int i = 0; i < sv->field_size(); i++) {
1492     ScopeValue* val = sv->field_at(i);
1493     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1494     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1495   }
1496 }
1497 
1498 // restore fields of all eliminated objects and arrays
1499 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1500   for (int i = 0; i < objects->length(); i++) {
1501     ObjectValue* sv = (ObjectValue*) objects->at(i);
1502     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1503     Handle obj = sv->value();
1504     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1505     if (PrintDeoptimizationDetails) {
1506       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1507     }
1508     if (obj.is_null()) {
1509       continue;
1510     }
1511 
1512     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1513     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1514       continue;
1515     }
1516 #ifdef COMPILER2
1517     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1518       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1519       ScopeValue* payload = sv->field_at(0);
1520       if (payload->is_location() &&
1521           payload->as_LocationValue()->location().type() == Location::vector) {
1522         if (PrintDeoptimizationDetails) {
1523           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1524           if (Verbose) {
1525             Handle obj = sv->value();
1526             k->oop_print_on(obj(), tty);
1527           }
1528         }
1529         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1530       }
1531       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1532       // which could be restored after vector object allocation.
1533     }
1534 #endif
1535     if (k->is_instance_klass()) {
1536       InstanceKlass* ik = InstanceKlass::cast(k);
1537       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1538     } else if (k->is_flatArray_klass()) {
1539       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1540       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1541     } else if (k->is_typeArray_klass()) {
1542       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1543       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1544     } else if (k->is_objArray_klass()) {
1545       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1546     }
1547   }
1548 }
1549 
1550 
1551 // relock objects for which synchronization was eliminated
1552 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1553                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1554   bool relocked_objects = false;
1555   for (int i = 0; i < monitors->length(); i++) {
1556     MonitorInfo* mon_info = monitors->at(i);
1557     if (mon_info->eliminated()) {
1558       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1559       relocked_objects = true;
1560       if (!mon_info->owner_is_scalar_replaced()) {

1687 
1688     ttyLocker ttyl;
1689     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1690     cm->log_identity(xtty);
1691     xtty->end_head();
1692     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1693       xtty->begin_elem("jvms bci='%d'", sd->bci());
1694       xtty->method(sd->method());
1695       xtty->end_elem();
1696       if (sd->is_top())  break;
1697     }
1698     xtty->tail("deoptimized");
1699   }
1700 
1701   // Patch the compiled method so that when execution returns to it we will
1702   // deopt the execution state and return to the interpreter.
1703   fr.deoptimize(thread);
1704 }
1705 
1706 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1707   // Deoptimize only if the frame comes from compiled code.
1708   // Do not deoptimize the frame which is already patched
1709   // during the execution of the loops below.
1710   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1711     return;
1712   }
1713   ResourceMark rm;
1714   DeoptimizationMarker dm;
1715   deoptimize_single_frame(thread, fr, reason);
1716 }
1717 
1718 #if INCLUDE_JVMCI
1719 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1720   // there is no exception handler for this pc => deoptimize
1721   cm->make_not_entrant();
1722 
1723   // Use Deoptimization::deoptimize for all of its side-effects:
1724   // gathering traps statistics, logging...
1725   // it also patches the return pc but we do not care about that
1726   // since we return a continuation to the deopt_blob below.
1727   JavaThread* thread = JavaThread::current();
< prev index next >