33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/memAllocator.hpp"
39 #include "interpreter/bytecode.hpp"
40 #include "interpreter/bytecodeStream.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/oopMapCache.hpp"
43 #include "jvm.h"
44 #include "logging/log.hpp"
45 #include "logging/logLevel.hpp"
46 #include "logging/logMessage.hpp"
47 #include "logging/logStream.hpp"
48 #include "memory/allocation.inline.hpp"
49 #include "memory/oopFactory.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/constantPool.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/method.hpp"
55 #include "oops/objArrayKlass.hpp"
56 #include "oops/objArrayOop.inline.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "oops/typeArrayOop.inline.hpp"
59 #include "oops/verifyOopClosure.hpp"
60 #include "prims/jvmtiDeferredUpdates.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/vectorSupport.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/basicLock.inline.hpp"
67 #include "runtime/continuation.hpp"
68 #include "runtime/continuationEntry.inline.hpp"
69 #include "runtime/deoptimization.hpp"
70 #include "runtime/escapeBarrier.hpp"
71 #include "runtime/fieldDescriptor.hpp"
72 #include "runtime/fieldDescriptor.inline.hpp"
73 #include "runtime/frame.inline.hpp"
74 #include "runtime/handles.inline.hpp"
75 #include "runtime/interfaceSupport.inline.hpp"
76 #include "runtime/javaThread.hpp"
77 #include "runtime/jniHandles.inline.hpp"
329 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
330 bool& deoptimized_objects) {
331 bool realloc_failures = false;
332 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
333
334 JavaThread* deoptee_thread = chunk->at(0)->thread();
335 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
336 "a frame can only be deoptimized by the owner thread");
337
338 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
339
340 // The flag return_oop() indicates call sites which return oop
341 // in compiled code. Such sites include java method calls,
342 // runtime calls (for example, used to allocate new objects/arrays
343 // on slow code path) and any other calls generated in compiled code.
344 // It is not guaranteed that we can get such information here only
345 // by analyzing bytecode in deoptimized frames. This is why this flag
346 // is set during method compilation (see Compile::Process_OopMap_Node()).
347 // If the previous frame was popped or if we are dispatching an exception,
348 // we don't have an oop result.
349 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
350 Handle return_value;
351 if (save_oop_result) {
352 // Reallocation may trigger GC. If deoptimization happened on return from
353 // call which returns oop we need to save it since it is not in oopmap.
354 oop result = deoptee.saved_oop_result(&map);
355 assert(oopDesc::is_oop_or_null(result), "must be oop");
356 return_value = Handle(thread, result);
357 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
358 if (TraceDeoptimization) {
359 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
360 tty->cr();
361 }
362 }
363 if (objects != nullptr) {
364 if (exec_mode == Deoptimization::Unpack_none) {
365 assert(thread->thread_state() == _thread_in_vm, "assumption");
366 JavaThread* THREAD = thread; // For exception macros.
367 // Clear pending OOM if reallocation fails and return true indicating allocation failure
368 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
369 deoptimized_objects = true;
370 } else {
371 JavaThread* current = thread; // For JRT_BLOCK
372 JRT_BLOCK
373 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
374 JRT_END
375 }
376 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
377 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
378 if (TraceDeoptimization) {
379 print_objects(deoptee_thread, objects, realloc_failures);
380 }
381 }
382 if (save_oop_result) {
383 // Restore result.
384 deoptee.set_saved_oop_result(&map, return_value());
385 }
386 return realloc_failures;
387 }
388
389 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
390 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
391 JavaThread* deoptee_thread = chunk->at(0)->thread();
392 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
393 assert(thread == Thread::current(), "should be");
394 HandleMark hm(thread);
395 #ifndef PRODUCT
396 bool first = true;
397 #endif // !PRODUCT
398 // Start locking from outermost/oldest frame
399 for (int i = (chunk->length() - 1); i >= 0; i--) {
400 compiledVFrame* cvf = chunk->at(i);
401 assert (cvf->scope() != nullptr,"expect only compiled java frames");
402 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
403 if (monitors->is_nonempty()) {
404 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
700 // its caller's stack by. If the caller is a compiled frame then
701 // we pretend that the callee has no parameters so that the
702 // extension counts for the full amount of locals and not just
703 // locals-parms. This is because without a c2i adapter the parm
704 // area as created by the compiled frame will not be usable by
705 // the interpreter. (Depending on the calling convention there
706 // may not even be enough space).
707
708 // QQQ I'd rather see this pushed down into last_frame_adjust
709 // and have it take the sender (aka caller).
710
711 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
712 caller_adjustment = last_frame_adjust(0, callee_locals);
713 } else if (callee_locals > callee_parameters) {
714 // The caller frame may need extending to accommodate
715 // non-parameter locals of the first unpacked interpreted frame.
716 // Compute that adjustment.
717 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
718 }
719
720 // If the sender is deoptimized the we must retrieve the address of the handler
721 // since the frame will "magically" show the original pc before the deopt
722 // and we'd undo the deopt.
723
724 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
725 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
726 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
727 }
728
729 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
730
731 #if INCLUDE_JVMCI
732 if (exceptionObject() != nullptr) {
733 current->set_exception_oop(exceptionObject());
734 exec_mode = Unpack_exception;
735 }
736 #endif
737
738 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
739 assert(current->has_pending_exception(), "should have thrown OOME");
740 current->set_exception_oop(current->pending_exception());
1200 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1201 default:;
1202 }
1203 }
1204 return nullptr;
1205 }
1206 #endif // INCLUDE_JVMCI
1207
1208 #if COMPILER2_OR_JVMCI
1209 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1210 Handle pending_exception(THREAD, thread->pending_exception());
1211 const char* exception_file = thread->exception_file();
1212 int exception_line = thread->exception_line();
1213 thread->clear_pending_exception();
1214
1215 bool failures = false;
1216
1217 for (int i = 0; i < objects->length(); i++) {
1218 assert(objects->at(i)->is_object(), "invalid debug information");
1219 ObjectValue* sv = (ObjectValue*) objects->at(i);
1220
1221 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1222 oop obj = nullptr;
1223
1224 bool cache_init_error = false;
1225 if (k->is_instance_klass()) {
1226 #if INCLUDE_JVMCI
1227 nmethod* nm = fr->cb()->as_nmethod_or_null();
1228 if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1229 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1230 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1231 if (obj != nullptr) {
1232 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1233 abv->set_cached(true);
1234 } else if (cache_init_error) {
1235 // Results in an OOME which is valid (as opposed to a class initialization error)
1236 // and is fine for the rare case a cache initialization failing.
1237 failures = true;
1238 }
1239 }
1240 #endif // INCLUDE_JVMCI
1241
1242 InstanceKlass* ik = InstanceKlass::cast(k);
1243 if (obj == nullptr && !cache_init_error) {
1244 InternalOOMEMark iom(THREAD);
1245 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1246 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1247 } else {
1248 obj = ik->allocate_instance(THREAD);
1249 }
1250 }
1251 } else if (k->is_typeArray_klass()) {
1252 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1253 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1254 int len = sv->field_size() / type2size[ak->element_type()];
1255 InternalOOMEMark iom(THREAD);
1256 obj = ak->allocate(len, THREAD);
1257 } else if (k->is_objArray_klass()) {
1258 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1259 InternalOOMEMark iom(THREAD);
1260 obj = ak->allocate(sv->field_size(), THREAD);
1261 }
1262
1263 if (obj == nullptr) {
1264 failures = true;
1265 }
1266
1267 assert(sv->value().is_null(), "redundant reallocation");
1268 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1269 CLEAR_PENDING_EXCEPTION;
1270 sv->set_value(obj);
1271 }
1272
1273 if (failures) {
1274 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1275 } else if (pending_exception.not_null()) {
1276 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1277 }
1278
1279 return failures;
1280 }
1281
1282 #if INCLUDE_JVMCI
1283 /**
1284 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1285 * we need to somehow be able to recover the actual kind to be able to write the correct
1286 * amount of bytes.
1287 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1288 * the entries at index n + 1 to n + i are 'markers'.
1289 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1290 * expected form of the array would be:
1291 *
1292 * {b0, b1, b2, b3, INT, marker, b6, b7}
1293 *
1294 * Thus, in order to get back the size of the entry, we simply need to count the number
1295 * of marked entries
1296 *
1297 * @param virtualArray the virtualized byte array
1298 * @param i index of the virtual entry we are recovering
1299 * @return The number of bytes the entry spans
1300 */
1301 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1427 default:
1428 ShouldNotReachHere();
1429 }
1430 index++;
1431 }
1432 }
1433
1434 // restore fields of an eliminated object array
1435 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1436 for (int i = 0; i < sv->field_size(); i++) {
1437 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1438 assert(value->type() == T_OBJECT, "object element expected");
1439 obj->obj_at_put(i, value->get_obj()());
1440 }
1441 }
1442
1443 class ReassignedField {
1444 public:
1445 int _offset;
1446 BasicType _type;
1447 public:
1448 ReassignedField() {
1449 _offset = 0;
1450 _type = T_ILLEGAL;
1451 }
1452 };
1453
1454 static int compare(ReassignedField* left, ReassignedField* right) {
1455 return left->_offset - right->_offset;
1456 }
1457
1458 // Restore fields of an eliminated instance object using the same field order
1459 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1460 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1461 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1462 InstanceKlass* ik = klass;
1463 while (ik != nullptr) {
1464 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1465 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1466 ReassignedField field;
1467 field._offset = fs.offset();
1468 field._type = Signature::basic_type(fs.signature());
1469 fields->append(field);
1470 }
1471 }
1472 ik = ik->superklass();
1473 }
1474 fields->sort(compare);
1475 for (int i = 0; i < fields->length(); i++) {
1476 ScopeValue* scope_field = sv->field_at(svIndex);
1477 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1478 int offset = fields->at(i)._offset;
1479 BasicType type = fields->at(i)._type;
1480 switch (type) {
1481 case T_OBJECT: case T_ARRAY:
1482 assert(value->type() == T_OBJECT, "Agreement.");
1483 obj->obj_field_put(offset, value->get_obj()());
1484 break;
1485
1486 case T_INT: case T_FLOAT: { // 4 bytes.
1487 assert(value->type() == T_INT, "Agreement.");
1488 bool big_value = false;
1489 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1490 if (scope_field->is_location()) {
1491 Location::Type type = ((LocationValue*) scope_field)->location().type();
1492 if (type == Location::dbl || type == Location::lng) {
1493 big_value = true;
1494 }
1495 }
1496 if (scope_field->is_constant_int()) {
1497 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1498 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1499 big_value = true;
1500 }
1501 }
1535 break;
1536
1537 case T_BYTE:
1538 assert(value->type() == T_INT, "Agreement.");
1539 obj->byte_field_put(offset, (jbyte)value->get_jint());
1540 break;
1541
1542 case T_BOOLEAN:
1543 assert(value->type() == T_INT, "Agreement.");
1544 obj->bool_field_put(offset, (jboolean)value->get_jint());
1545 break;
1546
1547 default:
1548 ShouldNotReachHere();
1549 }
1550 svIndex++;
1551 }
1552 return svIndex;
1553 }
1554
1555 // restore fields of all eliminated objects and arrays
1556 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1557 for (int i = 0; i < objects->length(); i++) {
1558 assert(objects->at(i)->is_object(), "invalid debug information");
1559 ObjectValue* sv = (ObjectValue*) objects->at(i);
1560 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1561 Handle obj = sv->value();
1562 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1563 #ifndef PRODUCT
1564 if (PrintDeoptimizationDetails) {
1565 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1566 }
1567 #endif // !PRODUCT
1568
1569 if (obj.is_null()) {
1570 continue;
1571 }
1572
1573 #if INCLUDE_JVMCI
1574 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1575 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1576 continue;
1577 }
1578 #endif // INCLUDE_JVMCI
1579 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1580 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1581 ScopeValue* payload = sv->field_at(0);
1582 if (payload->is_location() &&
1583 payload->as_LocationValue()->location().type() == Location::vector) {
1584 #ifndef PRODUCT
1585 if (PrintDeoptimizationDetails) {
1586 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1587 if (Verbose) {
1588 Handle obj = sv->value();
1589 k->oop_print_on(obj(), tty);
1590 }
1591 }
1592 #endif // !PRODUCT
1593 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1594 }
1595 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1596 // which could be restored after vector object allocation.
1597 }
1598 if (k->is_instance_klass()) {
1599 InstanceKlass* ik = InstanceKlass::cast(k);
1600 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1601 } else if (k->is_typeArray_klass()) {
1602 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1603 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1604 } else if (k->is_objArray_klass()) {
1605 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1606 }
1607 }
1608 // These objects may escape when we return to Interpreter after deoptimization.
1609 // We need barrier so that stores that initialize these objects can't be reordered
1610 // with subsequent stores that make these objects accessible by other threads.
1611 OrderAccess::storestore();
1612 }
1613
1614
1615 // relock objects for which synchronization was eliminated
1616 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1617 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1618 bool relocked_objects = false;
1619 for (int i = 0; i < monitors->length(); i++) {
1620 MonitorInfo* mon_info = monitors->at(i);
1770 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1771 nm->log_identity(xtty);
1772 xtty->end_head();
1773 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1774 xtty->begin_elem("jvms bci='%d'", sd->bci());
1775 xtty->method(sd->method());
1776 xtty->end_elem();
1777 if (sd->is_top()) break;
1778 }
1779 xtty->tail("deoptimized");
1780 }
1781
1782 Continuation::notify_deopt(thread, fr.sp());
1783
1784 // Patch the compiled method so that when execution returns to it we will
1785 // deopt the execution state and return to the interpreter.
1786 fr.deoptimize(thread);
1787 }
1788
1789 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1790 // Deoptimize only if the frame comes from compile code.
1791 // Do not deoptimize the frame which is already patched
1792 // during the execution of the loops below.
1793 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1794 return;
1795 }
1796 ResourceMark rm;
1797 deoptimize_single_frame(thread, fr, reason);
1798 }
1799
1800 #if INCLUDE_JVMCI
1801 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1802 // there is no exception handler for this pc => deoptimize
1803 nm->make_not_entrant();
1804
1805 // Use Deoptimization::deoptimize for all of its side-effects:
1806 // gathering traps statistics, logging...
1807 // it also patches the return pc but we do not care about that
1808 // since we return a continuation to the deopt_blob below.
1809 JavaThread* thread = JavaThread::current();
1810 RegisterMap reg_map(thread,
|
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/compilationPolicy.hpp"
36 #include "compiler/compilerDefinitions.inline.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/memAllocator.hpp"
39 #include "interpreter/bytecode.hpp"
40 #include "interpreter/bytecodeStream.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/oopMapCache.hpp"
43 #include "jvm.h"
44 #include "logging/log.hpp"
45 #include "logging/logLevel.hpp"
46 #include "logging/logMessage.hpp"
47 #include "logging/logStream.hpp"
48 #include "memory/allocation.inline.hpp"
49 #include "memory/oopFactory.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/constantPool.hpp"
53 #include "oops/flatArrayKlass.hpp"
54 #include "oops/flatArrayOop.hpp"
55 #include "oops/fieldStreams.inline.hpp"
56 #include "oops/method.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "oops/inlineKlass.inline.hpp"
61 #include "oops/typeArrayOop.inline.hpp"
62 #include "oops/verifyOopClosure.hpp"
63 #include "prims/jvmtiDeferredUpdates.hpp"
64 #include "prims/jvmtiExport.hpp"
65 #include "prims/jvmtiThreadState.hpp"
66 #include "prims/methodHandles.hpp"
67 #include "prims/vectorSupport.hpp"
68 #include "runtime/atomic.hpp"
69 #include "runtime/basicLock.inline.hpp"
70 #include "runtime/continuation.hpp"
71 #include "runtime/continuationEntry.inline.hpp"
72 #include "runtime/deoptimization.hpp"
73 #include "runtime/escapeBarrier.hpp"
74 #include "runtime/fieldDescriptor.hpp"
75 #include "runtime/fieldDescriptor.inline.hpp"
76 #include "runtime/frame.inline.hpp"
77 #include "runtime/handles.inline.hpp"
78 #include "runtime/interfaceSupport.inline.hpp"
79 #include "runtime/javaThread.hpp"
80 #include "runtime/jniHandles.inline.hpp"
332 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
333 bool& deoptimized_objects) {
334 bool realloc_failures = false;
335 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
336
337 JavaThread* deoptee_thread = chunk->at(0)->thread();
338 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
339 "a frame can only be deoptimized by the owner thread");
340
341 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
342
343 // The flag return_oop() indicates call sites which return oop
344 // in compiled code. Such sites include java method calls,
345 // runtime calls (for example, used to allocate new objects/arrays
346 // on slow code path) and any other calls generated in compiled code.
347 // It is not guaranteed that we can get such information here only
348 // by analyzing bytecode in deoptimized frames. This is why this flag
349 // is set during method compilation (see Compile::Process_OopMap_Node()).
350 // If the previous frame was popped or if we are dispatching an exception,
351 // we don't have an oop result.
352 ScopeDesc* scope = chunk->at(0)->scope();
353 bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
354 // In case of the return of multiple values, we must take care
355 // of all oop return values.
356 GrowableArray<Handle> return_oops;
357 InlineKlass* vk = nullptr;
358 if (save_oop_result && scope->return_scalarized()) {
359 vk = InlineKlass::returned_inline_klass(map);
360 if (vk != nullptr) {
361 vk->save_oop_fields(map, return_oops);
362 save_oop_result = false;
363 }
364 }
365 if (save_oop_result) {
366 // Reallocation may trigger GC. If deoptimization happened on return from
367 // call which returns oop we need to save it since it is not in oopmap.
368 oop result = deoptee.saved_oop_result(&map);
369 assert(oopDesc::is_oop_or_null(result), "must be oop");
370 return_oops.push(Handle(thread, result));
371 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
372 if (TraceDeoptimization) {
373 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
374 tty->cr();
375 }
376 }
377 if (objects != nullptr || vk != nullptr) {
378 if (exec_mode == Deoptimization::Unpack_none) {
379 assert(thread->thread_state() == _thread_in_vm, "assumption");
380 JavaThread* THREAD = thread; // For exception macros.
381 // Clear pending OOM if reallocation fails and return true indicating allocation failure
382 if (vk != nullptr) {
383 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
384 }
385 if (objects != nullptr) {
386 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
387 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
388 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
389 }
390 deoptimized_objects = true;
391 } else {
392 JavaThread* current = thread; // For JRT_BLOCK
393 JRT_BLOCK
394 if (vk != nullptr) {
395 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
396 }
397 if (objects != nullptr) {
398 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
399 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
400 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
401 }
402 JRT_END
403 }
404 if (TraceDeoptimization && objects != nullptr) {
405 print_objects(deoptee_thread, objects, realloc_failures);
406 }
407 }
408 if (save_oop_result || vk != nullptr) {
409 // Restore result.
410 assert(return_oops.length() == 1, "no inline type");
411 deoptee.set_saved_oop_result(&map, return_oops.pop()());
412 }
413 return realloc_failures;
414 }
415
416 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
417 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
418 JavaThread* deoptee_thread = chunk->at(0)->thread();
419 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
420 assert(thread == Thread::current(), "should be");
421 HandleMark hm(thread);
422 #ifndef PRODUCT
423 bool first = true;
424 #endif // !PRODUCT
425 // Start locking from outermost/oldest frame
426 for (int i = (chunk->length() - 1); i >= 0; i--) {
427 compiledVFrame* cvf = chunk->at(i);
428 assert (cvf->scope() != nullptr,"expect only compiled java frames");
429 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
430 if (monitors->is_nonempty()) {
431 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
727 // its caller's stack by. If the caller is a compiled frame then
728 // we pretend that the callee has no parameters so that the
729 // extension counts for the full amount of locals and not just
730 // locals-parms. This is because without a c2i adapter the parm
731 // area as created by the compiled frame will not be usable by
732 // the interpreter. (Depending on the calling convention there
733 // may not even be enough space).
734
735 // QQQ I'd rather see this pushed down into last_frame_adjust
736 // and have it take the sender (aka caller).
737
738 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
739 caller_adjustment = last_frame_adjust(0, callee_locals);
740 } else if (callee_locals > callee_parameters) {
741 // The caller frame may need extending to accommodate
742 // non-parameter locals of the first unpacked interpreted frame.
743 // Compute that adjustment.
744 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
745 }
746
747 // If the sender is deoptimized we must retrieve the address of the handler
748 // since the frame will "magically" show the original pc before the deopt
749 // and we'd undo the deopt.
750
751 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
752 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
753 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
754 }
755
756 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
757
758 #if INCLUDE_JVMCI
759 if (exceptionObject() != nullptr) {
760 current->set_exception_oop(exceptionObject());
761 exec_mode = Unpack_exception;
762 }
763 #endif
764
765 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
766 assert(current->has_pending_exception(), "should have thrown OOME");
767 current->set_exception_oop(current->pending_exception());
1227 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1228 default:;
1229 }
1230 }
1231 return nullptr;
1232 }
1233 #endif // INCLUDE_JVMCI
1234
1235 #if COMPILER2_OR_JVMCI
1236 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1237 Handle pending_exception(THREAD, thread->pending_exception());
1238 const char* exception_file = thread->exception_file();
1239 int exception_line = thread->exception_line();
1240 thread->clear_pending_exception();
1241
1242 bool failures = false;
1243
1244 for (int i = 0; i < objects->length(); i++) {
1245 assert(objects->at(i)->is_object(), "invalid debug information");
1246 ObjectValue* sv = (ObjectValue*) objects->at(i);
1247 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1248
1249 // Check if the object may be null and has an additional is_init input that needs
1250 // to be checked before using the field values. Skip re-allocation if it is null.
1251 if (sv->maybe_null()) {
1252 assert(k->is_inline_klass(), "must be an inline klass");
1253 jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
1254 if (is_init == 0) {
1255 continue;
1256 }
1257 }
1258
1259 oop obj = nullptr;
1260 bool cache_init_error = false;
1261 if (k->is_instance_klass()) {
1262 #if INCLUDE_JVMCI
1263 nmethod* nm = fr->cb()->as_nmethod_or_null();
1264 if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1265 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1266 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1267 if (obj != nullptr) {
1268 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1269 abv->set_cached(true);
1270 } else if (cache_init_error) {
1271 // Results in an OOME which is valid (as opposed to a class initialization error)
1272 // and is fine for the rare case a cache initialization failing.
1273 failures = true;
1274 }
1275 }
1276 #endif // INCLUDE_JVMCI
1277
1278 InstanceKlass* ik = InstanceKlass::cast(k);
1279 if (obj == nullptr && !cache_init_error) {
1280 InternalOOMEMark iom(THREAD);
1281 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1282 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1283 } else {
1284 obj = ik->allocate_instance(THREAD);
1285 }
1286 }
1287 } else if (k->is_flatArray_klass()) {
1288 FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1289 // Inline type array must be zeroed because not all memory is reassigned
1290 obj = ak->allocate(sv->field_size(), THREAD);
1291 } else if (k->is_typeArray_klass()) {
1292 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1293 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1294 int len = sv->field_size() / type2size[ak->element_type()];
1295 InternalOOMEMark iom(THREAD);
1296 obj = ak->allocate(len, THREAD);
1297 } else if (k->is_objArray_klass()) {
1298 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1299 InternalOOMEMark iom(THREAD);
1300 obj = ak->allocate(sv->field_size(), THREAD);
1301 }
1302
1303 if (obj == nullptr) {
1304 failures = true;
1305 }
1306
1307 assert(sv->value().is_null(), "redundant reallocation");
1308 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1309 CLEAR_PENDING_EXCEPTION;
1310 sv->set_value(obj);
1311 }
1312
1313 if (failures) {
1314 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1315 } else if (pending_exception.not_null()) {
1316 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1317 }
1318
1319 return failures;
1320 }
1321
1322 // We're deoptimizing at the return of a call, inline type fields are
1323 // in registers. When we go back to the interpreter, it will expect a
1324 // reference to an inline type instance. Allocate and initialize it from
1325 // the register values here.
1326 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1327 oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1328 if (new_vt == nullptr) {
1329 CLEAR_PENDING_EXCEPTION;
1330 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1331 }
1332 return_oops.clear();
1333 return_oops.push(Handle(THREAD, new_vt));
1334 return false;
1335 }
1336
1337 #if INCLUDE_JVMCI
1338 /**
1339 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1340 * we need to somehow be able to recover the actual kind to be able to write the correct
1341 * amount of bytes.
1342 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1343 * the entries at index n + 1 to n + i are 'markers'.
1344 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1345 * expected form of the array would be:
1346 *
1347 * {b0, b1, b2, b3, INT, marker, b6, b7}
1348 *
1349 * Thus, in order to get back the size of the entry, we simply need to count the number
1350 * of marked entries
1351 *
1352 * @param virtualArray the virtualized byte array
1353 * @param i index of the virtual entry we are recovering
1354 * @return The number of bytes the entry spans
1355 */
1356 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1482 default:
1483 ShouldNotReachHere();
1484 }
1485 index++;
1486 }
1487 }
1488
1489 // restore fields of an eliminated object array
1490 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1491 for (int i = 0; i < sv->field_size(); i++) {
1492 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1493 assert(value->type() == T_OBJECT, "object element expected");
1494 obj->obj_at_put(i, value->get_obj()());
1495 }
1496 }
1497
1498 class ReassignedField {
1499 public:
1500 int _offset;
1501 BasicType _type;
1502 InstanceKlass* _klass;
1503 bool _is_flat;
1504 public:
1505 ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false) { }
1506 };
1507
1508 static int compare(ReassignedField* left, ReassignedField* right) {
1509 return left->_offset - right->_offset;
1510 }
1511
1512 // Restore fields of an eliminated instance object using the same field order
1513 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1514 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1515 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1516 InstanceKlass* ik = klass;
1517 while (ik != nullptr) {
1518 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1519 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1520 ReassignedField field;
1521 field._offset = fs.offset();
1522 field._type = Signature::basic_type(fs.signature());
1523 if (fs.is_null_free_inline_type()) {
1524 if (fs.is_flat()) {
1525 field._is_flat = true;
1526 // Resolve klass of flat inline type field
1527 field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1528 } else {
1529 field._type = T_OBJECT; // Can be removed once Q-descriptors have been removed.
1530 }
1531 }
1532 fields->append(field);
1533 }
1534 }
1535 ik = ik->superklass();
1536 }
1537 fields->sort(compare);
1538 for (int i = 0; i < fields->length(); i++) {
1539 BasicType type = fields->at(i)._type;
1540 int offset = base_offset + fields->at(i)._offset;
1541 // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1542 if (fields->at(i)._is_flat) {
1543 // Recursively re-assign flat inline type fields
1544 InstanceKlass* vk = fields->at(i)._klass;
1545 assert(vk != nullptr, "must be resolved");
1546 offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1547 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1548 continue; // Continue because we don't need to increment svIndex
1549 }
1550 ScopeValue* scope_field = sv->field_at(svIndex);
1551 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1552 switch (type) {
1553 case T_OBJECT:
1554 case T_ARRAY:
1555 assert(value->type() == T_OBJECT, "Agreement.");
1556 obj->obj_field_put(offset, value->get_obj()());
1557 break;
1558
1559 case T_INT: case T_FLOAT: { // 4 bytes.
1560 assert(value->type() == T_INT, "Agreement.");
1561 bool big_value = false;
1562 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1563 if (scope_field->is_location()) {
1564 Location::Type type = ((LocationValue*) scope_field)->location().type();
1565 if (type == Location::dbl || type == Location::lng) {
1566 big_value = true;
1567 }
1568 }
1569 if (scope_field->is_constant_int()) {
1570 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1571 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1572 big_value = true;
1573 }
1574 }
1608 break;
1609
1610 case T_BYTE:
1611 assert(value->type() == T_INT, "Agreement.");
1612 obj->byte_field_put(offset, (jbyte)value->get_jint());
1613 break;
1614
1615 case T_BOOLEAN:
1616 assert(value->type() == T_INT, "Agreement.");
1617 obj->bool_field_put(offset, (jboolean)value->get_jint());
1618 break;
1619
1620 default:
1621 ShouldNotReachHere();
1622 }
1623 svIndex++;
1624 }
1625 return svIndex;
1626 }
1627
1628 // restore fields of an eliminated inline type array
1629 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1630 InlineKlass* vk = vak->element_klass();
1631 assert(vk->flat_array(), "should only be used for flat inline type arrays");
1632 // Adjust offset to omit oop header
1633 int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1634 // Initialize all elements of the flat inline type array
1635 for (int i = 0; i < sv->field_size(); i++) {
1636 ScopeValue* val = sv->field_at(i);
1637 int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1638 reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1639 }
1640 }
1641
1642 // restore fields of all eliminated objects and arrays
1643 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1644 for (int i = 0; i < objects->length(); i++) {
1645 assert(objects->at(i)->is_object(), "invalid debug information");
1646 ObjectValue* sv = (ObjectValue*) objects->at(i);
1647 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1648 Handle obj = sv->value();
1649 assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1650 #ifndef PRODUCT
1651 if (PrintDeoptimizationDetails) {
1652 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1653 }
1654 #endif // !PRODUCT
1655
1656 if (obj.is_null()) {
1657 continue;
1658 }
1659
1660 #if INCLUDE_JVMCI
1661 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1662 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1663 continue;
1664 }
1665 #endif // INCLUDE_JVMCI
1666 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1667 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1668 ScopeValue* payload = sv->field_at(0);
1669 if (payload->is_location() &&
1670 payload->as_LocationValue()->location().type() == Location::vector) {
1671 #ifndef PRODUCT
1672 if (PrintDeoptimizationDetails) {
1673 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1674 if (Verbose) {
1675 Handle obj = sv->value();
1676 k->oop_print_on(obj(), tty);
1677 }
1678 }
1679 #endif // !PRODUCT
1680 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1681 }
1682 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1683 // which could be restored after vector object allocation.
1684 }
1685 if (k->is_instance_klass()) {
1686 InstanceKlass* ik = InstanceKlass::cast(k);
1687 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1688 } else if (k->is_flatArray_klass()) {
1689 FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1690 reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1691 } else if (k->is_typeArray_klass()) {
1692 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1693 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1694 } else if (k->is_objArray_klass()) {
1695 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1696 }
1697 }
1698 // These objects may escape when we return to Interpreter after deoptimization.
1699 // We need barrier so that stores that initialize these objects can't be reordered
1700 // with subsequent stores that make these objects accessible by other threads.
1701 OrderAccess::storestore();
1702 }
1703
1704
1705 // relock objects for which synchronization was eliminated
1706 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1707 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1708 bool relocked_objects = false;
1709 for (int i = 0; i < monitors->length(); i++) {
1710 MonitorInfo* mon_info = monitors->at(i);
1860 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1861 nm->log_identity(xtty);
1862 xtty->end_head();
1863 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1864 xtty->begin_elem("jvms bci='%d'", sd->bci());
1865 xtty->method(sd->method());
1866 xtty->end_elem();
1867 if (sd->is_top()) break;
1868 }
1869 xtty->tail("deoptimized");
1870 }
1871
1872 Continuation::notify_deopt(thread, fr.sp());
1873
1874 // Patch the compiled method so that when execution returns to it we will
1875 // deopt the execution state and return to the interpreter.
1876 fr.deoptimize(thread);
1877 }
1878
1879 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1880 // Deoptimize only if the frame comes from compiled code.
1881 // Do not deoptimize the frame which is already patched
1882 // during the execution of the loops below.
1883 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1884 return;
1885 }
1886 ResourceMark rm;
1887 deoptimize_single_frame(thread, fr, reason);
1888 }
1889
1890 #if INCLUDE_JVMCI
1891 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1892 // there is no exception handler for this pc => deoptimize
1893 nm->make_not_entrant();
1894
1895 // Use Deoptimization::deoptimize for all of its side-effects:
1896 // gathering traps statistics, logging...
1897 // it also patches the return pc but we do not care about that
1898 // since we return a continuation to the deopt_blob below.
1899 JavaThread* thread = JavaThread::current();
1900 RegisterMap reg_map(thread,
|