1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/debugInfoRec.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "code/scopeDesc.hpp" 35 #include "compiler/compilationPolicy.hpp" 36 #include "compiler/compilerDefinitions.inline.hpp" 37 #include "gc/shared/collectedHeap.hpp" 38 #include "interpreter/bytecode.hpp" 39 #include "interpreter/bytecodeStream.hpp" 40 #include "interpreter/interpreter.hpp" 41 #include "interpreter/oopMapCache.hpp" 42 #include "jvm.h" 43 #include "logging/log.hpp" 44 #include "logging/logLevel.hpp" 45 #include "logging/logMessage.hpp" 46 #include "logging/logStream.hpp" 47 #include "memory/allocation.inline.hpp" 48 #include "memory/oopFactory.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/universe.hpp" 51 #include "oops/constantPool.hpp" 52 #include "oops/flatArrayKlass.hpp" 53 #include "oops/flatArrayOop.hpp" 54 #include "oops/fieldStreams.inline.hpp" 55 #include "oops/method.hpp" 56 #include "oops/objArrayKlass.hpp" 57 #include "oops/objArrayOop.inline.hpp" 58 #include "oops/oop.inline.hpp" 59 #include "oops/inlineKlass.inline.hpp" 60 #include "oops/typeArrayOop.inline.hpp" 61 #include "oops/verifyOopClosure.hpp" 62 #include "prims/jvmtiDeferredUpdates.hpp" 63 #include "prims/jvmtiExport.hpp" 64 #include "prims/jvmtiThreadState.hpp" 65 #include "prims/methodHandles.hpp" 66 #include "prims/vectorSupport.hpp" 67 #include "runtime/atomic.hpp" 68 #include "runtime/continuation.hpp" 69 #include "runtime/continuationEntry.inline.hpp" 70 #include "runtime/deoptimization.hpp" 71 #include "runtime/escapeBarrier.hpp" 72 #include "runtime/fieldDescriptor.hpp" 73 #include "runtime/fieldDescriptor.inline.hpp" 74 #include "runtime/frame.inline.hpp" 75 #include "runtime/handles.inline.hpp" 76 #include "runtime/interfaceSupport.inline.hpp" 77 #include "runtime/javaThread.hpp" 78 #include "runtime/jniHandles.inline.hpp" 79 #include "runtime/keepStackGCProcessed.hpp" 80 #include "runtime/objectMonitor.inline.hpp" 81 #include "runtime/osThread.hpp" 82 #include "runtime/safepointVerifiers.hpp" 83 #include "runtime/sharedRuntime.hpp" 84 #include "runtime/signature.hpp" 85 #include "runtime/stackFrameStream.inline.hpp" 86 #include "runtime/stackValue.hpp" 87 #include "runtime/stackWatermarkSet.hpp" 88 #include "runtime/stubRoutines.hpp" 89 #include "runtime/synchronizer.hpp" 90 #include "runtime/threadSMR.hpp" 91 #include "runtime/threadWXSetters.inline.hpp" 92 #include "runtime/vframe.hpp" 93 #include "runtime/vframeArray.hpp" 94 #include "runtime/vframe_hp.hpp" 95 #include "runtime/vmOperations.hpp" 96 #include "utilities/checkedCast.hpp" 97 #include "utilities/events.hpp" 98 #include "utilities/growableArray.hpp" 99 #include "utilities/macros.hpp" 100 #include "utilities/preserveException.hpp" 101 #include "utilities/xmlstream.hpp" 102 #if INCLUDE_JFR 103 #include "jfr/jfrEvents.hpp" 104 #include "jfr/metadata/jfrSerializer.hpp" 105 #endif 106 107 uint64_t DeoptimizationScope::_committed_deopt_gen = 0; 108 uint64_t DeoptimizationScope::_active_deopt_gen = 1; 109 bool DeoptimizationScope::_committing_in_progress = false; 110 111 DeoptimizationScope::DeoptimizationScope() : _required_gen(0) { 112 DEBUG_ONLY(_deopted = false;) 113 114 MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); 115 // If there is nothing to deopt _required_gen is the same as comitted. 116 _required_gen = DeoptimizationScope::_committed_deopt_gen; 117 } 118 119 DeoptimizationScope::~DeoptimizationScope() { 120 assert(_deopted, "Deopt not executed"); 121 } 122 123 void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) { 124 ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); 125 126 // If it's already marked but we still need it to be deopted. 127 if (cm->is_marked_for_deoptimization()) { 128 dependent(cm); 129 return; 130 } 131 132 CompiledMethod::DeoptimizationStatus status = 133 inc_recompile_counts ? CompiledMethod::deoptimize : CompiledMethod::deoptimize_noupdate; 134 Atomic::store(&cm->_deoptimization_status, status); 135 136 // Make sure active is not committed 137 assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be"); 138 assert(cm->_deoptimization_generation == 0, "Is already marked"); 139 140 cm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen; 141 _required_gen = DeoptimizationScope::_active_deopt_gen; 142 } 143 144 void DeoptimizationScope::dependent(CompiledMethod* cm) { 145 ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); 146 147 // A method marked by someone else may have a _required_gen lower than what we marked with. 148 // Therefore only store it if it's higher than _required_gen. 149 if (_required_gen < cm->_deoptimization_generation) { 150 _required_gen = cm->_deoptimization_generation; 151 } 152 } 153 154 void DeoptimizationScope::deoptimize_marked() { 155 assert(!_deopted, "Already deopted"); 156 157 // We are not alive yet. 158 if (!Universe::is_fully_initialized()) { 159 DEBUG_ONLY(_deopted = true;) 160 return; 161 } 162 163 // Safepoints are a special case, handled here. 164 if (SafepointSynchronize::is_at_safepoint()) { 165 DeoptimizationScope::_committed_deopt_gen = DeoptimizationScope::_active_deopt_gen; 166 DeoptimizationScope::_active_deopt_gen++; 167 Deoptimization::deoptimize_all_marked(); 168 DEBUG_ONLY(_deopted = true;) 169 return; 170 } 171 172 uint64_t comitting = 0; 173 bool wait = false; 174 while (true) { 175 { 176 ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); 177 178 // First we check if we or someone else already deopted the gen we want. 179 if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) { 180 DEBUG_ONLY(_deopted = true;) 181 return; 182 } 183 if (!_committing_in_progress) { 184 // The version we are about to commit. 185 comitting = DeoptimizationScope::_active_deopt_gen; 186 // Make sure new marks use a higher gen. 187 DeoptimizationScope::_active_deopt_gen++; 188 _committing_in_progress = true; 189 wait = false; 190 } else { 191 // Another thread is handshaking and committing a gen. 192 wait = true; 193 } 194 } 195 if (wait) { 196 // Wait and let the concurrent handshake be performed. 197 ThreadBlockInVM tbivm(JavaThread::current()); 198 os::naked_yield(); 199 } else { 200 // Performs the handshake. 201 Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred. 202 DEBUG_ONLY(_deopted = true;) 203 { 204 ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); 205 206 // Make sure that committed doesn't go backwards. 207 // Should only happen if we did a deopt during a safepoint above. 208 if (DeoptimizationScope::_committed_deopt_gen < comitting) { 209 DeoptimizationScope::_committed_deopt_gen = comitting; 210 } 211 _committing_in_progress = false; 212 213 assert(DeoptimizationScope::_committed_deopt_gen >= _required_gen, "Must be"); 214 215 return; 216 } 217 } 218 } 219 } 220 221 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, 222 int caller_adjustment, 223 int caller_actual_parameters, 224 int number_of_frames, 225 intptr_t* frame_sizes, 226 address* frame_pcs, 227 BasicType return_type, 228 int exec_mode) { 229 _size_of_deoptimized_frame = size_of_deoptimized_frame; 230 _caller_adjustment = caller_adjustment; 231 _caller_actual_parameters = caller_actual_parameters; 232 _number_of_frames = number_of_frames; 233 _frame_sizes = frame_sizes; 234 _frame_pcs = frame_pcs; 235 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); 236 _return_type = return_type; 237 _initial_info = 0; 238 // PD (x86 only) 239 _counter_temp = 0; 240 _unpack_kind = exec_mode; 241 _sender_sp_temp = 0; 242 243 _total_frame_sizes = size_of_frames(); 244 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode"); 245 } 246 247 Deoptimization::UnrollBlock::~UnrollBlock() { 248 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); 249 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); 250 FREE_C_HEAP_ARRAY(intptr_t, _register_block); 251 } 252 253 int Deoptimization::UnrollBlock::size_of_frames() const { 254 // Account first for the adjustment of the initial frame 255 intptr_t result = _caller_adjustment; 256 for (int index = 0; index < number_of_frames(); index++) { 257 result += frame_sizes()[index]; 258 } 259 return checked_cast<int>(result); 260 } 261 262 void Deoptimization::UnrollBlock::print() { 263 ResourceMark rm; 264 stringStream st; 265 st.print_cr("UnrollBlock"); 266 st.print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); 267 st.print( " frame_sizes: "); 268 for (int index = 0; index < number_of_frames(); index++) { 269 st.print(INTX_FORMAT " ", frame_sizes()[index]); 270 } 271 st.cr(); 272 tty->print_raw(st.freeze()); 273 } 274 275 // In order to make fetch_unroll_info work properly with escape 276 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY. 277 // The actual reallocation of previously eliminated objects occurs in realloc_objects, 278 // which is called from the method fetch_unroll_info_helper below. 279 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode)) 280 // fetch_unroll_info() is called at the beginning of the deoptimization 281 // handler. Note this fact before we start generating temporary frames 282 // that can confuse an asynchronous stack walker. This counter is 283 // decremented at the end of unpack_frames(). 284 current->inc_in_deopt_handler(); 285 286 if (exec_mode == Unpack_exception) { 287 // When we get here, a callee has thrown an exception into a deoptimized 288 // frame. That throw might have deferred stack watermark checking until 289 // after unwinding. So we deal with such deferred requests here. 290 StackWatermarkSet::after_unwind(current); 291 } 292 293 return fetch_unroll_info_helper(current, exec_mode); 294 JRT_END 295 296 #if COMPILER2_OR_JVMCI 297 // print information about reallocated objects 298 static void print_objects(JavaThread* deoptee_thread, 299 GrowableArray<ScopeValue*>* objects, bool realloc_failures) { 300 ResourceMark rm; 301 stringStream st; // change to logStream with logging 302 st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread)); 303 fieldDescriptor fd; 304 305 for (int i = 0; i < objects->length(); i++) { 306 ObjectValue* sv = (ObjectValue*) objects->at(i); 307 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 308 Handle obj = sv->value(); 309 310 st.print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()())); 311 k->print_value_on(&st); 312 assert(obj.not_null() || k->is_inline_klass() || realloc_failures, "reallocation was missed"); 313 if (obj.is_null()) { 314 if (k->is_inline_klass()) { 315 st.print(" is null"); 316 } else { 317 st.print(" allocation failed"); 318 } 319 } else { 320 st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize); 321 } 322 st.cr(); 323 324 if (Verbose && !obj.is_null()) { 325 k->oop_print_on(obj(), &st); 326 } 327 } 328 tty->print_raw(st.freeze()); 329 } 330 331 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method, 332 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk, 333 bool& deoptimized_objects) { 334 bool realloc_failures = false; 335 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames"); 336 337 JavaThread* deoptee_thread = chunk->at(0)->thread(); 338 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread), 339 "a frame can only be deoptimized by the owner thread"); 340 341 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map); 342 343 // The flag return_oop() indicates call sites which return oop 344 // in compiled code. Such sites include java method calls, 345 // runtime calls (for example, used to allocate new objects/arrays 346 // on slow code path) and any other calls generated in compiled code. 347 // It is not guaranteed that we can get such information here only 348 // by analyzing bytecode in deoptimized frames. This is why this flag 349 // is set during method compilation (see Compile::Process_OopMap_Node()). 350 // If the previous frame was popped or if we are dispatching an exception, 351 // we don't have an oop result. 352 ScopeDesc* scope = chunk->at(0)->scope(); 353 bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt); 354 // In case of the return of multiple values, we must take care 355 // of all oop return values. 356 GrowableArray<Handle> return_oops; 357 InlineKlass* vk = nullptr; 358 if (save_oop_result && scope->return_scalarized()) { 359 vk = InlineKlass::returned_inline_klass(map); 360 if (vk != nullptr) { 361 vk->save_oop_fields(map, return_oops); 362 save_oop_result = false; 363 } 364 } 365 if (save_oop_result) { 366 // Reallocation may trigger GC. If deoptimization happened on return from 367 // call which returns oop we need to save it since it is not in oopmap. 368 oop result = deoptee.saved_oop_result(&map); 369 assert(oopDesc::is_oop_or_null(result), "must be oop"); 370 return_oops.push(Handle(thread, result)); 371 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); 372 if (TraceDeoptimization) { 373 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); 374 tty->cr(); 375 } 376 } 377 if (objects != nullptr || vk != nullptr) { 378 if (exec_mode == Deoptimization::Unpack_none) { 379 assert(thread->thread_state() == _thread_in_vm, "assumption"); 380 JavaThread* THREAD = thread; // For exception macros. 381 // Clear pending OOM if reallocation fails and return true indicating allocation failure 382 if (vk != nullptr) { 383 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true)); 384 } 385 if (objects != nullptr) { 386 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true)); 387 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci(); 388 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true)); 389 } 390 deoptimized_objects = true; 391 } else { 392 JavaThread* current = thread; // For JRT_BLOCK 393 JRT_BLOCK 394 if (vk != nullptr) { 395 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD); 396 } 397 if (objects != nullptr) { 398 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD); 399 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci(); 400 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD); 401 } 402 JRT_END 403 } 404 if (TraceDeoptimization) { 405 print_objects(deoptee_thread, objects, realloc_failures); 406 } 407 } 408 if (save_oop_result || vk != nullptr) { 409 // Restore result. 410 assert(return_oops.length() == 1, "no inline type"); 411 deoptee.set_saved_oop_result(&map, return_oops.pop()()); 412 } 413 return realloc_failures; 414 } 415 416 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures, 417 frame& deoptee, int exec_mode, bool& deoptimized_objects) { 418 JavaThread* deoptee_thread = chunk->at(0)->thread(); 419 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once"); 420 assert(thread == Thread::current(), "should be"); 421 HandleMark hm(thread); 422 #ifndef PRODUCT 423 bool first = true; 424 #endif // !PRODUCT 425 for (int i = 0; i < chunk->length(); i++) { 426 compiledVFrame* cvf = chunk->at(i); 427 assert (cvf->scope() != nullptr,"expect only compiled java frames"); 428 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 429 if (monitors->is_nonempty()) { 430 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee, 431 exec_mode, realloc_failures); 432 deoptimized_objects = deoptimized_objects || relocked; 433 #ifndef PRODUCT 434 if (PrintDeoptimizationDetails) { 435 ResourceMark rm; 436 stringStream st; 437 for (int j = 0; j < monitors->length(); j++) { 438 MonitorInfo* mi = monitors->at(j); 439 if (mi->eliminated()) { 440 if (first) { 441 first = false; 442 st.print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 443 } 444 if (exec_mode == Deoptimization::Unpack_none) { 445 ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor(); 446 if (monitor != nullptr && monitor->object() == mi->owner()) { 447 st.print_cr(" object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner())); 448 continue; 449 } 450 } 451 if (mi->owner_is_scalar_replaced()) { 452 Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); 453 st.print_cr(" failed reallocation for klass %s", k->external_name()); 454 } else { 455 st.print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner())); 456 } 457 } 458 } 459 tty->print_raw(st.freeze()); 460 } 461 #endif // !PRODUCT 462 } 463 } 464 } 465 466 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI. 467 // The given vframes cover one physical frame. 468 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, 469 bool& realloc_failures) { 470 frame deoptee = chunk->at(0)->fr(); 471 JavaThread* deoptee_thread = chunk->at(0)->thread(); 472 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); 473 RegisterMap map(chunk->at(0)->register_map()); 474 bool deoptimized_objects = false; 475 476 bool const jvmci_enabled = JVMCI_ONLY(UseJVMCICompiler) NOT_JVMCI(false); 477 478 // Reallocate the non-escaping objects and restore their fields. 479 if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations) 480 || EliminateAutoBox || EnableVectorAggressiveReboxing)) { 481 realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects); 482 } 483 484 // MonitorInfo structures used in eliminate_locks are not GC safe. 485 NoSafepointVerifier no_safepoint; 486 487 // Now relock objects if synchronization on them was eliminated. 488 if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks))) { 489 restore_eliminated_locks(thread, chunk, realloc_failures, deoptee, Unpack_none, deoptimized_objects); 490 } 491 return deoptimized_objects; 492 } 493 #endif // COMPILER2_OR_JVMCI 494 495 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) 496 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) { 497 // When we get here we are about to unwind the deoptee frame. In order to 498 // catch not yet safe to use frames, the following stack watermark barrier 499 // poll will make such frames safe to use. 500 StackWatermarkSet::before_unwind(current); 501 502 // Note: there is a safepoint safety issue here. No matter whether we enter 503 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once 504 // the vframeArray is created. 505 // 506 507 // Allocate our special deoptimization ResourceMark 508 DeoptResourceMark* dmark = new DeoptResourceMark(current); 509 assert(current->deopt_mark() == nullptr, "Pending deopt!"); 510 current->set_deopt_mark(dmark); 511 512 frame stub_frame = current->last_frame(); // Makes stack walkable as side effect 513 RegisterMap map(current, 514 RegisterMap::UpdateMap::include, 515 RegisterMap::ProcessFrames::include, 516 RegisterMap::WalkContinuation::skip); 517 RegisterMap dummy_map(current, 518 RegisterMap::UpdateMap::skip, 519 RegisterMap::ProcessFrames::include, 520 RegisterMap::WalkContinuation::skip); 521 // Now get the deoptee with a valid map 522 frame deoptee = stub_frame.sender(&map); 523 // Set the deoptee nmethod 524 assert(current->deopt_compiled_method() == nullptr, "Pending deopt!"); 525 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); 526 current->set_deopt_compiled_method(cm); 527 528 if (VerifyStack) { 529 current->validate_frame_layout(); 530 } 531 532 // Create a growable array of VFrames where each VFrame represents an inlined 533 // Java frame. This storage is allocated with the usual system arena. 534 assert(deoptee.is_compiled_frame(), "Wrong frame type"); 535 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); 536 vframe* vf = vframe::new_vframe(&deoptee, &map, current); 537 while (!vf->is_top()) { 538 assert(vf->is_compiled_frame(), "Wrong frame type"); 539 chunk->push(compiledVFrame::cast(vf)); 540 vf = vf->sender(); 541 } 542 assert(vf->is_compiled_frame(), "Wrong frame type"); 543 chunk->push(compiledVFrame::cast(vf)); 544 545 bool realloc_failures = false; 546 547 #if COMPILER2_OR_JVMCI 548 bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false); 549 550 // Reallocate the non-escaping objects and restore their fields. Then 551 // relock objects if synchronization on them was eliminated. 552 if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations) 553 || EliminateAutoBox || EnableVectorAggressiveReboxing )) { 554 bool unused; 555 realloc_failures = rematerialize_objects(current, exec_mode, cm, deoptee, map, chunk, unused); 556 } 557 #endif // COMPILER2_OR_JVMCI 558 559 // Ensure that no safepoint is taken after pointers have been stored 560 // in fields of rematerialized objects. If a safepoint occurs from here on 561 // out the java state residing in the vframeArray will be missed. 562 // Locks may be rebaised in a safepoint. 563 NoSafepointVerifier no_safepoint; 564 565 #if COMPILER2_OR_JVMCI 566 if ((jvmci_enabled COMPILER2_PRESENT( || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks) )) 567 && !EscapeBarrier::objs_are_deoptimized(current, deoptee.id())) { 568 bool unused; 569 restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused); 570 } 571 #endif // COMPILER2_OR_JVMCI 572 573 ScopeDesc* trap_scope = chunk->at(0)->scope(); 574 Handle exceptionObject; 575 if (trap_scope->rethrow_exception()) { 576 #ifndef PRODUCT 577 if (PrintDeoptimizationDetails) { 578 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci()); 579 } 580 #endif // !PRODUCT 581 582 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions(); 583 guarantee(expressions != nullptr && expressions->length() > 0, "must have exception to throw"); 584 ScopeValue* topOfStack = expressions->top(); 585 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); 586 guarantee(exceptionObject() != nullptr, "exception oop can not be null"); 587 } 588 589 vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures); 590 #if COMPILER2_OR_JVMCI 591 if (realloc_failures) { 592 // This destroys all ScopedValue bindings. 593 current->clear_scopedValueBindings(); 594 pop_frames_failed_reallocs(current, array); 595 } 596 #endif 597 598 assert(current->vframe_array_head() == nullptr, "Pending deopt!"); 599 current->set_vframe_array_head(array); 600 601 // Now that the vframeArray has been created if we have any deferred local writes 602 // added by jvmti then we can free up that structure as the data is now in the 603 // vframeArray 604 605 JvmtiDeferredUpdates::delete_updates_for_frame(current, array->original().id()); 606 607 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. 608 CodeBlob* cb = stub_frame.cb(); 609 // Verify we have the right vframeArray 610 assert(cb->frame_size() >= 0, "Unexpected frame size"); 611 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); 612 613 // If the deopt call site is a MethodHandle invoke call site we have 614 // to adjust the unpack_sp. 615 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); 616 if (deoptee_nm != nullptr && deoptee_nm->is_method_handle_return(deoptee.pc())) 617 unpack_sp = deoptee.unextended_sp(); 618 619 #ifdef ASSERT 620 assert(cb->is_deoptimization_stub() || 621 cb->is_uncommon_trap_stub() || 622 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 || 623 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0, 624 "unexpected code blob: %s", cb->name()); 625 #endif 626 627 // This is a guarantee instead of an assert because if vframe doesn't match 628 // we will unpack the wrong deoptimized frame and wind up in strange places 629 // where it will be very difficult to figure out what went wrong. Better 630 // to die an early death here than some very obscure death later when the 631 // trail is cold. 632 // Note: on ia64 this guarantee can be fooled by frames with no memory stack 633 // in that it will fail to detect a problem when there is one. This needs 634 // more work in tiger timeframe. 635 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); 636 637 int number_of_frames = array->frames(); 638 639 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost 640 // virtual activation, which is the reverse of the elements in the vframes array. 641 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); 642 // +1 because we always have an interpreter return address for the final slot. 643 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); 644 int popframe_extra_args = 0; 645 // Create an interpreter return address for the stub to use as its return 646 // address so the skeletal frames are perfectly walkable 647 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 648 649 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost 650 // activation be put back on the expression stack of the caller for reexecution 651 if (JvmtiExport::can_pop_frame() && current->popframe_forcing_deopt_reexecution()) { 652 popframe_extra_args = in_words(current->popframe_preserved_args_size_in_words()); 653 } 654 655 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized 656 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather 657 // than simply use array->sender.pc(). This requires us to walk the current set of frames 658 // 659 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame 660 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller 661 662 // It's possible that the number of parameters at the call site is 663 // different than number of arguments in the callee when method 664 // handles are used. If the caller is interpreted get the real 665 // value so that the proper amount of space can be added to it's 666 // frame. 667 bool caller_was_method_handle = false; 668 if (deopt_sender.is_interpreted_frame()) { 669 methodHandle method(current, deopt_sender.interpreter_frame_method()); 670 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); 671 if (cur.is_invokedynamic() || cur.is_invokehandle()) { 672 // Method handle invokes may involve fairly arbitrary chains of 673 // calls so it's impossible to know how much actual space the 674 // caller has for locals. 675 caller_was_method_handle = true; 676 } 677 } 678 679 // 680 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) 681 // frame_sizes/frame_pcs[1] next oldest frame (int) 682 // frame_sizes/frame_pcs[n] youngest frame (int) 683 // 684 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame 685 // owns the space for the return address to it's caller). Confusing ain't it. 686 // 687 // The vframe array can address vframes with indices running from 688 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. 689 // When we create the skeletal frames we need the oldest frame to be in the zero slot 690 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. 691 // so things look a little strange in this loop. 692 // 693 int callee_parameters = 0; 694 int callee_locals = 0; 695 for (int index = 0; index < array->frames(); index++ ) { 696 // frame[number_of_frames - 1 ] = on_stack_size(youngest) 697 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) 698 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) 699 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 700 callee_locals, 701 index == 0, 702 popframe_extra_args); 703 // This pc doesn't have to be perfect just good enough to identify the frame 704 // as interpreted so the skeleton frame will be walkable 705 // The correct pc will be set when the skeleton frame is completely filled out 706 // The final pc we store in the loop is wrong and will be overwritten below 707 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; 708 709 callee_parameters = array->element(index)->method()->size_of_parameters(); 710 callee_locals = array->element(index)->method()->max_locals(); 711 popframe_extra_args = 0; 712 } 713 714 // Compute whether the root vframe returns a float or double value. 715 BasicType return_type; 716 { 717 methodHandle method(current, array->element(0)->method()); 718 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); 719 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; 720 } 721 722 // Compute information for handling adapters and adjusting the frame size of the caller. 723 int caller_adjustment = 0; 724 725 // Compute the amount the oldest interpreter frame will have to adjust 726 // its caller's stack by. If the caller is a compiled frame then 727 // we pretend that the callee has no parameters so that the 728 // extension counts for the full amount of locals and not just 729 // locals-parms. This is because without a c2i adapter the parm 730 // area as created by the compiled frame will not be usable by 731 // the interpreter. (Depending on the calling convention there 732 // may not even be enough space). 733 734 // QQQ I'd rather see this pushed down into last_frame_adjust 735 // and have it take the sender (aka caller). 736 737 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) { 738 caller_adjustment = last_frame_adjust(0, callee_locals); 739 } else if (callee_locals > callee_parameters) { 740 // The caller frame may need extending to accommodate 741 // non-parameter locals of the first unpacked interpreted frame. 742 // Compute that adjustment. 743 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); 744 } 745 746 // If the sender is deoptimized we must retrieve the address of the handler 747 // since the frame will "magically" show the original pc before the deopt 748 // and we'd undo the deopt. 749 750 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc(); 751 if (Continuation::is_continuation_enterSpecial(deopt_sender)) { 752 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0); 753 } 754 755 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc"); 756 757 #if INCLUDE_JVMCI 758 if (exceptionObject() != nullptr) { 759 current->set_exception_oop(exceptionObject()); 760 exec_mode = Unpack_exception; 761 } 762 #endif 763 764 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) { 765 assert(current->has_pending_exception(), "should have thrown OOME"); 766 current->set_exception_oop(current->pending_exception()); 767 current->clear_pending_exception(); 768 exec_mode = Unpack_exception; 769 } 770 771 #if INCLUDE_JVMCI 772 if (current->frames_to_pop_failed_realloc() > 0) { 773 current->set_pending_monitorenter(false); 774 } 775 #endif 776 777 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 778 caller_adjustment * BytesPerWord, 779 caller_was_method_handle ? 0 : callee_parameters, 780 number_of_frames, 781 frame_sizes, 782 frame_pcs, 783 return_type, 784 exec_mode); 785 // On some platforms, we need a way to pass some platform dependent 786 // information to the unpacking code so the skeletal frames come out 787 // correct (initial fp value, unextended sp, ...) 788 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); 789 790 if (array->frames() > 1) { 791 if (VerifyStack && TraceDeoptimization) { 792 tty->print_cr("Deoptimizing method containing inlining"); 793 } 794 } 795 796 array->set_unroll_block(info); 797 return info; 798 } 799 800 // Called to cleanup deoptimization data structures in normal case 801 // after unpacking to stack and when stack overflow error occurs 802 void Deoptimization::cleanup_deopt_info(JavaThread *thread, 803 vframeArray *array) { 804 805 // Get array if coming from exception 806 if (array == nullptr) { 807 array = thread->vframe_array_head(); 808 } 809 thread->set_vframe_array_head(nullptr); 810 811 // Free the previous UnrollBlock 812 vframeArray* old_array = thread->vframe_array_last(); 813 thread->set_vframe_array_last(array); 814 815 if (old_array != nullptr) { 816 UnrollBlock* old_info = old_array->unroll_block(); 817 old_array->set_unroll_block(nullptr); 818 delete old_info; 819 delete old_array; 820 } 821 822 // Deallocate any resource creating in this routine and any ResourceObjs allocated 823 // inside the vframeArray (StackValueCollections) 824 825 delete thread->deopt_mark(); 826 thread->set_deopt_mark(nullptr); 827 thread->set_deopt_compiled_method(nullptr); 828 829 830 if (JvmtiExport::can_pop_frame()) { 831 // Regardless of whether we entered this routine with the pending 832 // popframe condition bit set, we should always clear it now 833 thread->clear_popframe_condition(); 834 } 835 836 // unpack_frames() is called at the end of the deoptimization handler 837 // and (in C2) at the end of the uncommon trap handler. Note this fact 838 // so that an asynchronous stack walker can work again. This counter is 839 // incremented at the beginning of fetch_unroll_info() and (in C2) at 840 // the beginning of uncommon_trap(). 841 thread->dec_in_deopt_handler(); 842 } 843 844 // Moved from cpu directories because none of the cpus has callee save values. 845 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. 846 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { 847 848 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in 849 // the days we had adapter frames. When we deoptimize a situation where a 850 // compiled caller calls a compiled caller will have registers it expects 851 // to survive the call to the callee. If we deoptimize the callee the only 852 // way we can restore these registers is to have the oldest interpreter 853 // frame that we create restore these values. That is what this routine 854 // will accomplish. 855 856 // At the moment we have modified c2 to not have any callee save registers 857 // so this problem does not exist and this routine is just a place holder. 858 859 assert(f->is_interpreted_frame(), "must be interpreted"); 860 } 861 862 #ifndef PRODUCT 863 static bool falls_through(Bytecodes::Code bc) { 864 switch (bc) { 865 // List may be incomplete. Here we really only care about bytecodes where compiled code 866 // can deoptimize. 867 case Bytecodes::_goto: 868 case Bytecodes::_goto_w: 869 case Bytecodes::_athrow: 870 return false; 871 default: 872 return true; 873 } 874 } 875 #endif 876 877 // Return BasicType of value being returned 878 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) 879 assert(thread == JavaThread::current(), "pre-condition"); 880 881 // We are already active in the special DeoptResourceMark any ResourceObj's we 882 // allocate will be freed at the end of the routine. 883 884 // JRT_LEAF methods don't normally allocate handles and there is a 885 // NoHandleMark to enforce that. It is actually safe to use Handles 886 // in a JRT_LEAF method, and sometimes desirable, but to do so we 887 // must use ResetNoHandleMark to bypass the NoHandleMark, and 888 // then use a HandleMark to ensure any Handles we do create are 889 // cleaned up in this scope. 890 ResetNoHandleMark rnhm; 891 HandleMark hm(thread); 892 893 frame stub_frame = thread->last_frame(); 894 895 Continuation::notify_deopt(thread, stub_frame.sp()); 896 897 // Since the frame to unpack is the top frame of this thread, the vframe_array_head 898 // must point to the vframeArray for the unpack frame. 899 vframeArray* array = thread->vframe_array_head(); 900 UnrollBlock* info = array->unroll_block(); 901 902 // We set the last_Java frame. But the stack isn't really parsable here. So we 903 // clear it to make sure JFR understands not to try and walk stacks from events 904 // in here. 905 intptr_t* sp = thread->frame_anchor()->last_Java_sp(); 906 thread->frame_anchor()->set_last_Java_sp(nullptr); 907 908 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. 909 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); 910 911 thread->frame_anchor()->set_last_Java_sp(sp); 912 913 BasicType bt = info->return_type(); 914 915 // If we have an exception pending, claim that the return type is an oop 916 // so the deopt_blob does not overwrite the exception_oop. 917 918 if (exec_mode == Unpack_exception) 919 bt = T_OBJECT; 920 921 // Cleanup thread deopt data 922 cleanup_deopt_info(thread, array); 923 924 #ifndef PRODUCT 925 if (VerifyStack) { 926 ResourceMark res_mark; 927 // Clear pending exception to not break verification code (restored afterwards) 928 PreserveExceptionMark pm(thread); 929 930 thread->validate_frame_layout(); 931 932 // Verify that the just-unpacked frames match the interpreter's 933 // notions of expression stack and locals 934 vframeArray* cur_array = thread->vframe_array_last(); 935 RegisterMap rm(thread, 936 RegisterMap::UpdateMap::skip, 937 RegisterMap::ProcessFrames::include, 938 RegisterMap::WalkContinuation::skip); 939 rm.set_include_argument_oops(false); 940 bool is_top_frame = true; 941 int callee_size_of_parameters = 0; 942 int callee_max_locals = 0; 943 for (int i = 0; i < cur_array->frames(); i++) { 944 vframeArrayElement* el = cur_array->element(i); 945 frame* iframe = el->iframe(); 946 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); 947 948 // Get the oop map for this bci 949 InterpreterOopMap mask; 950 int cur_invoke_parameter_size = 0; 951 bool try_next_mask = false; 952 int next_mask_expression_stack_size = -1; 953 int top_frame_expression_stack_adjustment = 0; 954 methodHandle mh(thread, iframe->interpreter_frame_method()); 955 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); 956 BytecodeStream str(mh, iframe->interpreter_frame_bci()); 957 int max_bci = mh->code_size(); 958 // Get to the next bytecode if possible 959 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); 960 // Check to see if we can grab the number of outgoing arguments 961 // at an uncommon trap for an invoke (where the compiler 962 // generates debug info before the invoke has executed) 963 Bytecodes::Code cur_code = str.next(); 964 Bytecodes::Code next_code = Bytecodes::_shouldnotreachhere; 965 if (Bytecodes::is_invoke(cur_code)) { 966 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); 967 cur_invoke_parameter_size = invoke.size_of_parameters(); 968 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { 969 callee_size_of_parameters++; 970 } 971 } 972 if (str.bci() < max_bci) { 973 next_code = str.next(); 974 if (next_code >= 0) { 975 // The interpreter oop map generator reports results before 976 // the current bytecode has executed except in the case of 977 // calls. It seems to be hard to tell whether the compiler 978 // has emitted debug information matching the "state before" 979 // a given bytecode or the state after, so we try both 980 if (!Bytecodes::is_invoke(cur_code) && falls_through(cur_code)) { 981 // Get expression stack size for the next bytecode 982 InterpreterOopMap next_mask; 983 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); 984 next_mask_expression_stack_size = next_mask.expression_stack_size(); 985 if (Bytecodes::is_invoke(next_code)) { 986 Bytecode_invoke invoke(mh, str.bci()); 987 next_mask_expression_stack_size += invoke.size_of_parameters(); 988 } 989 // Need to subtract off the size of the result type of 990 // the bytecode because this is not described in the 991 // debug info but returned to the interpreter in the TOS 992 // caching register 993 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); 994 if (bytecode_result_type != T_ILLEGAL) { 995 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; 996 } 997 assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive"); 998 try_next_mask = true; 999 } 1000 } 1001 } 1002 1003 // Verify stack depth and oops in frame 1004 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) 1005 if (!( 1006 /* SPARC */ 1007 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || 1008 /* x86 */ 1009 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || 1010 (try_next_mask && 1011 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - 1012 top_frame_expression_stack_adjustment))) || 1013 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || 1014 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && 1015 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) 1016 )) { 1017 { 1018 // Print out some information that will help us debug the problem 1019 tty->print_cr("Wrong number of expression stack elements during deoptimization"); 1020 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); 1021 tty->print_cr(" Current code %s", Bytecodes::name(cur_code)); 1022 if (try_next_mask) { 1023 tty->print_cr(" Next code %s", Bytecodes::name(next_code)); 1024 } 1025 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", 1026 iframe->interpreter_frame_expression_stack_size()); 1027 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); 1028 tty->print_cr(" try_next_mask = %d", try_next_mask); 1029 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); 1030 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); 1031 tty->print_cr(" callee_max_locals = %d", callee_max_locals); 1032 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); 1033 tty->print_cr(" exec_mode = %d", exec_mode); 1034 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); 1035 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id()); 1036 tty->print_cr(" Interpreted frames:"); 1037 for (int k = 0; k < cur_array->frames(); k++) { 1038 vframeArrayElement* el = cur_array->element(k); 1039 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); 1040 } 1041 cur_array->print_on_2(tty); 1042 } 1043 guarantee(false, "wrong number of expression stack elements during deopt"); 1044 } 1045 VerifyOopClosure verify; 1046 iframe->oops_interpreted_do(&verify, &rm, false); 1047 callee_size_of_parameters = mh->size_of_parameters(); 1048 callee_max_locals = mh->max_locals(); 1049 is_top_frame = false; 1050 } 1051 } 1052 #endif // !PRODUCT 1053 1054 return bt; 1055 JRT_END 1056 1057 class DeoptimizeMarkedClosure : public HandshakeClosure { 1058 public: 1059 DeoptimizeMarkedClosure() : HandshakeClosure("Deoptimize") {} 1060 void do_thread(Thread* thread) { 1061 JavaThread* jt = JavaThread::cast(thread); 1062 jt->deoptimize_marked_methods(); 1063 } 1064 }; 1065 1066 void Deoptimization::deoptimize_all_marked() { 1067 ResourceMark rm; 1068 1069 // Make the dependent methods not entrant 1070 CodeCache::make_marked_nmethods_deoptimized(); 1071 1072 DeoptimizeMarkedClosure deopt; 1073 if (SafepointSynchronize::is_at_safepoint()) { 1074 Threads::java_threads_do(&deopt); 1075 } else { 1076 Handshake::execute(&deopt); 1077 } 1078 } 1079 1080 Deoptimization::DeoptAction Deoptimization::_unloaded_action 1081 = Deoptimization::Action_reinterpret; 1082 1083 #if INCLUDE_JVMCI 1084 template<typename CacheType> 1085 class BoxCacheBase : public CHeapObj<mtCompiler> { 1086 protected: 1087 static InstanceKlass* find_cache_klass(Thread* thread, Symbol* klass_name) { 1088 ResourceMark rm(thread); 1089 char* klass_name_str = klass_name->as_C_string(); 1090 InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle(), Handle()); 1091 guarantee(ik != nullptr, "%s must be loaded", klass_name_str); 1092 if (!ik->is_in_error_state()) { 1093 guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str); 1094 CacheType::compute_offsets(ik); 1095 } 1096 return ik; 1097 } 1098 }; 1099 1100 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache : public BoxCacheBase<CacheType> { 1101 PrimitiveType _low; 1102 PrimitiveType _high; 1103 jobject _cache; 1104 protected: 1105 static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton; 1106 BoxCache(Thread* thread) { 1107 InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(thread, CacheType::symbol()); 1108 if (ik->is_in_error_state()) { 1109 _low = 1; 1110 _high = 0; 1111 _cache = nullptr; 1112 } else { 1113 objArrayOop cache = CacheType::cache(ik); 1114 assert(cache->length() > 0, "Empty cache"); 1115 _low = BoxType::value(cache->obj_at(0)); 1116 _high = checked_cast<PrimitiveType>(_low + cache->length() - 1); 1117 _cache = JNIHandles::make_global(Handle(thread, cache)); 1118 } 1119 } 1120 ~BoxCache() { 1121 JNIHandles::destroy_global(_cache); 1122 } 1123 public: 1124 static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) { 1125 if (_singleton == nullptr) { 1126 BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread); 1127 if (!Atomic::replace_if_null(&_singleton, s)) { 1128 delete s; 1129 } 1130 } 1131 return _singleton; 1132 } 1133 oop lookup(PrimitiveType value) { 1134 if (_low <= value && value <= _high) { 1135 int offset = checked_cast<int>(value - _low); 1136 return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset); 1137 } 1138 return nullptr; 1139 } 1140 oop lookup_raw(intptr_t raw_value, bool& cache_init_error) { 1141 if (_cache == nullptr) { 1142 cache_init_error = true; 1143 return nullptr; 1144 } 1145 // Have to cast to avoid little/big-endian problems. 1146 if (sizeof(PrimitiveType) > sizeof(jint)) { 1147 jlong value = (jlong)raw_value; 1148 return lookup(value); 1149 } 1150 PrimitiveType value = (PrimitiveType)*((jint*)&raw_value); 1151 return lookup(value); 1152 } 1153 }; 1154 1155 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache; 1156 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache; 1157 typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character> CharacterBoxCache; 1158 typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache; 1159 typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache; 1160 1161 template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = nullptr; 1162 template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = nullptr; 1163 template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = nullptr; 1164 template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = nullptr; 1165 template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = nullptr; 1166 1167 class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> { 1168 jobject _true_cache; 1169 jobject _false_cache; 1170 protected: 1171 static BooleanBoxCache *_singleton; 1172 BooleanBoxCache(Thread *thread) { 1173 InstanceKlass* ik = find_cache_klass(thread, java_lang_Boolean::symbol()); 1174 if (ik->is_in_error_state()) { 1175 _true_cache = nullptr; 1176 _false_cache = nullptr; 1177 } else { 1178 _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik))); 1179 _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik))); 1180 } 1181 } 1182 ~BooleanBoxCache() { 1183 JNIHandles::destroy_global(_true_cache); 1184 JNIHandles::destroy_global(_false_cache); 1185 } 1186 public: 1187 static BooleanBoxCache* singleton(Thread* thread) { 1188 if (_singleton == nullptr) { 1189 BooleanBoxCache* s = new BooleanBoxCache(thread); 1190 if (!Atomic::replace_if_null(&_singleton, s)) { 1191 delete s; 1192 } 1193 } 1194 return _singleton; 1195 } 1196 oop lookup_raw(intptr_t raw_value, bool& cache_in_error) { 1197 if (_true_cache == nullptr) { 1198 cache_in_error = true; 1199 return nullptr; 1200 } 1201 // Have to cast to avoid little/big-endian problems. 1202 jboolean value = (jboolean)*((jint*)&raw_value); 1203 return lookup(value); 1204 } 1205 oop lookup(jboolean value) { 1206 if (value != 0) { 1207 return JNIHandles::resolve_non_null(_true_cache); 1208 } 1209 return JNIHandles::resolve_non_null(_false_cache); 1210 } 1211 }; 1212 1213 BooleanBoxCache* BooleanBoxCache::_singleton = nullptr; 1214 1215 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS) { 1216 Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()()); 1217 BasicType box_type = vmClasses::box_klass_type(k); 1218 if (box_type != T_OBJECT) { 1219 StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0)); 1220 switch(box_type) { 1221 case T_INT: return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error); 1222 case T_CHAR: return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error); 1223 case T_SHORT: return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error); 1224 case T_BYTE: return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error); 1225 case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error); 1226 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error); 1227 default:; 1228 } 1229 } 1230 return nullptr; 1231 } 1232 #endif // INCLUDE_JVMCI 1233 1234 #if COMPILER2_OR_JVMCI 1235 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) { 1236 Handle pending_exception(THREAD, thread->pending_exception()); 1237 const char* exception_file = thread->exception_file(); 1238 int exception_line = thread->exception_line(); 1239 thread->clear_pending_exception(); 1240 1241 bool failures = false; 1242 1243 for (int i = 0; i < objects->length(); i++) { 1244 assert(objects->at(i)->is_object(), "invalid debug information"); 1245 ObjectValue* sv = (ObjectValue*) objects->at(i); 1246 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1247 1248 // Check if the object may be null and has an additional is_init input that needs 1249 // to be checked before using the field values. Skip re-allocation if it is null. 1250 if (sv->maybe_null()) { 1251 assert(k->is_inline_klass(), "must be an inline klass"); 1252 jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint(); 1253 if (is_init == 0) { 1254 continue; 1255 } 1256 } 1257 1258 oop obj = nullptr; 1259 bool cache_init_error = false; 1260 if (k->is_instance_klass()) { 1261 #if INCLUDE_JVMCI 1262 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null(); 1263 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) { 1264 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv; 1265 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD); 1266 if (obj != nullptr) { 1267 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it. 1268 abv->set_cached(true); 1269 } else if (cache_init_error) { 1270 // Results in an OOME which is valid (as opposed to a class initialization error) 1271 // and is fine for the rare case a cache initialization failing. 1272 failures = true; 1273 } 1274 } 1275 #endif // INCLUDE_JVMCI 1276 1277 InstanceKlass* ik = InstanceKlass::cast(k); 1278 if (obj == nullptr && !cache_init_error) { 1279 #if COMPILER2_OR_JVMCI 1280 if (EnableVectorSupport && VectorSupport::is_vector(ik)) { 1281 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD); 1282 } else { 1283 obj = ik->allocate_instance(THREAD); 1284 } 1285 #else 1286 obj = ik->allocate_instance(THREAD); 1287 #endif // COMPILER2_OR_JVMCI 1288 } 1289 } else if (k->is_flatArray_klass()) { 1290 FlatArrayKlass* ak = FlatArrayKlass::cast(k); 1291 // Inline type array must be zeroed because not all memory is reassigned 1292 obj = ak->allocate(sv->field_size(), THREAD); 1293 } else if (k->is_typeArray_klass()) { 1294 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 1295 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); 1296 int len = sv->field_size() / type2size[ak->element_type()]; 1297 obj = ak->allocate(len, THREAD); 1298 } else if (k->is_objArray_klass()) { 1299 ObjArrayKlass* ak = ObjArrayKlass::cast(k); 1300 obj = ak->allocate(sv->field_size(), THREAD); 1301 } 1302 1303 if (obj == nullptr) { 1304 failures = true; 1305 } 1306 1307 assert(sv->value().is_null(), "redundant reallocation"); 1308 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception"); 1309 CLEAR_PENDING_EXCEPTION; 1310 sv->set_value(obj); 1311 } 1312 1313 if (failures) { 1314 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); 1315 } else if (pending_exception.not_null()) { 1316 thread->set_pending_exception(pending_exception(), exception_file, exception_line); 1317 } 1318 1319 return failures; 1320 } 1321 1322 // We're deoptimizing at the return of a call, inline type fields are 1323 // in registers. When we go back to the interpreter, it will expect a 1324 // reference to an inline type instance. Allocate and initialize it from 1325 // the register values here. 1326 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) { 1327 oop new_vt = vk->realloc_result(map, return_oops, THREAD); 1328 if (new_vt == nullptr) { 1329 CLEAR_PENDING_EXCEPTION; 1330 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true); 1331 } 1332 return_oops.clear(); 1333 return_oops.push(Handle(THREAD, new_vt)); 1334 return false; 1335 } 1336 1337 #if INCLUDE_JVMCI 1338 /** 1339 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints), 1340 * we need to somehow be able to recover the actual kind to be able to write the correct 1341 * amount of bytes. 1342 * For that purpose, this method assumes that, for an entry spanning n bytes at index i, 1343 * the entries at index n + 1 to n + i are 'markers'. 1344 * For example, if we were writing a short at index 4 of a byte array of size 8, the 1345 * expected form of the array would be: 1346 * 1347 * {b0, b1, b2, b3, INT, marker, b6, b7} 1348 * 1349 * Thus, in order to get back the size of the entry, we simply need to count the number 1350 * of marked entries 1351 * 1352 * @param virtualArray the virtualized byte array 1353 * @param i index of the virtual entry we are recovering 1354 * @return The number of bytes the entry spans 1355 */ 1356 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) { 1357 int index = i; 1358 while (++index < virtualArray->field_size() && 1359 virtualArray->field_at(index)->is_marker()) {} 1360 return index - i; 1361 } 1362 1363 /** 1364 * If there was a guarantee for byte array to always start aligned to a long, we could 1365 * do a simple check on the parity of the index. Unfortunately, that is not always the 1366 * case. Thus, we check alignment of the actual address we are writing to. 1367 * In the unlikely case index 0 is 5-aligned for example, it would then be possible to 1368 * write a long to index 3. 1369 */ 1370 static jbyte* check_alignment_get_addr(typeArrayOop obj, int index, int expected_alignment) { 1371 jbyte* res = obj->byte_at_addr(index); 1372 assert((((intptr_t) res) % expected_alignment) == 0, "Non-aligned write"); 1373 return res; 1374 } 1375 1376 static void byte_array_put(typeArrayOop obj, StackValue* value, int index, int byte_count) { 1377 switch (byte_count) { 1378 case 1: 1379 obj->byte_at_put(index, (jbyte) value->get_jint()); 1380 break; 1381 case 2: 1382 *((jshort *) check_alignment_get_addr(obj, index, 2)) = (jshort) value->get_jint(); 1383 break; 1384 case 4: 1385 *((jint *) check_alignment_get_addr(obj, index, 4)) = value->get_jint(); 1386 break; 1387 case 8: 1388 *((jlong *) check_alignment_get_addr(obj, index, 8)) = (jlong) value->get_intptr(); 1389 break; 1390 default: 1391 ShouldNotReachHere(); 1392 } 1393 } 1394 #endif // INCLUDE_JVMCI 1395 1396 1397 // restore elements of an eliminated type array 1398 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { 1399 int index = 0; 1400 1401 for (int i = 0; i < sv->field_size(); i++) { 1402 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 1403 switch(type) { 1404 case T_LONG: case T_DOUBLE: { 1405 assert(value->type() == T_INT, "Agreement."); 1406 StackValue* low = 1407 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 1408 #ifdef _LP64 1409 jlong res = (jlong)low->get_intptr(); 1410 #else 1411 jlong res = jlong_from(value->get_jint(), low->get_jint()); 1412 #endif 1413 obj->long_at_put(index, res); 1414 break; 1415 } 1416 1417 case T_INT: case T_FLOAT: { // 4 bytes. 1418 assert(value->type() == T_INT, "Agreement."); 1419 bool big_value = false; 1420 if (i + 1 < sv->field_size() && type == T_INT) { 1421 if (sv->field_at(i)->is_location()) { 1422 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); 1423 if (type == Location::dbl || type == Location::lng) { 1424 big_value = true; 1425 } 1426 } else if (sv->field_at(i)->is_constant_int()) { 1427 ScopeValue* next_scope_field = sv->field_at(i + 1); 1428 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1429 big_value = true; 1430 } 1431 } 1432 } 1433 1434 if (big_value) { 1435 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 1436 #ifdef _LP64 1437 jlong res = (jlong)low->get_intptr(); 1438 #else 1439 jlong res = jlong_from(value->get_jint(), low->get_jint()); 1440 #endif 1441 obj->int_at_put(index, *(jint*)&res); 1442 obj->int_at_put(++index, *((jint*)&res + 1)); 1443 } else { 1444 obj->int_at_put(index, value->get_jint()); 1445 } 1446 break; 1447 } 1448 1449 case T_SHORT: 1450 assert(value->type() == T_INT, "Agreement."); 1451 obj->short_at_put(index, (jshort)value->get_jint()); 1452 break; 1453 1454 case T_CHAR: 1455 assert(value->type() == T_INT, "Agreement."); 1456 obj->char_at_put(index, (jchar)value->get_jint()); 1457 break; 1458 1459 case T_BYTE: { 1460 assert(value->type() == T_INT, "Agreement."); 1461 #if INCLUDE_JVMCI 1462 // The value we get is erased as a regular int. We will need to find its actual byte count 'by hand'. 1463 int byte_count = count_number_of_bytes_for_entry(sv, i); 1464 byte_array_put(obj, value, index, byte_count); 1465 // According to byte_count contract, the values from i + 1 to i + byte_count are illegal values. Skip. 1466 i += byte_count - 1; // Balance the loop counter. 1467 index += byte_count; 1468 // index has been updated so continue at top of loop 1469 continue; 1470 #else 1471 obj->byte_at_put(index, (jbyte)value->get_jint()); 1472 break; 1473 #endif // INCLUDE_JVMCI 1474 } 1475 1476 case T_BOOLEAN: { 1477 assert(value->type() == T_INT, "Agreement."); 1478 obj->bool_at_put(index, (jboolean)value->get_jint()); 1479 break; 1480 } 1481 1482 default: 1483 ShouldNotReachHere(); 1484 } 1485 index++; 1486 } 1487 } 1488 1489 // restore fields of an eliminated object array 1490 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { 1491 for (int i = 0; i < sv->field_size(); i++) { 1492 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 1493 assert(value->type() == T_OBJECT, "object element expected"); 1494 obj->obj_at_put(i, value->get_obj()()); 1495 } 1496 } 1497 1498 class ReassignedField { 1499 public: 1500 int _offset; 1501 BasicType _type; 1502 InstanceKlass* _klass; 1503 bool _is_flat; 1504 public: 1505 ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false) { } 1506 }; 1507 1508 int compare(ReassignedField* left, ReassignedField* right) { 1509 return left->_offset - right->_offset; 1510 } 1511 1512 // Restore fields of an eliminated instance object using the same field order 1513 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) 1514 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) { 1515 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>(); 1516 InstanceKlass* ik = klass; 1517 while (ik != nullptr) { 1518 for (AllFieldStream fs(ik); !fs.done(); fs.next()) { 1519 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) { 1520 ReassignedField field; 1521 field._offset = fs.offset(); 1522 field._type = Signature::basic_type(fs.signature()); 1523 if (fs.is_null_free_inline_type()) { 1524 if (fs.is_flat()) { 1525 field._is_flat = true; 1526 // Resolve klass of flat inline type field 1527 field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index())); 1528 } else { 1529 field._type = T_OBJECT; // Can be removed once Q-descriptors have been removed. 1530 } 1531 } 1532 fields->append(field); 1533 } 1534 } 1535 ik = ik->superklass(); 1536 } 1537 fields->sort(compare); 1538 for (int i = 0; i < fields->length(); i++) { 1539 BasicType type = fields->at(i)._type; 1540 int offset = base_offset + fields->at(i)._offset; 1541 // Check for flat inline type field before accessing the ScopeValue because it might not have any fields 1542 if (fields->at(i)._is_flat) { 1543 // Recursively re-assign flat inline type fields 1544 InstanceKlass* vk = fields->at(i)._klass; 1545 assert(vk != nullptr, "must be resolved"); 1546 offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header 1547 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0); 1548 continue; // Continue because we don't need to increment svIndex 1549 } 1550 ScopeValue* scope_field = sv->field_at(svIndex); 1551 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); 1552 switch (type) { 1553 case T_OBJECT: 1554 case T_ARRAY: 1555 assert(value->type() == T_OBJECT, "Agreement."); 1556 obj->obj_field_put(offset, value->get_obj()()); 1557 break; 1558 1559 case T_INT: case T_FLOAT: { // 4 bytes. 1560 assert(value->type() == T_INT, "Agreement."); 1561 bool big_value = false; 1562 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { 1563 if (scope_field->is_location()) { 1564 Location::Type type = ((LocationValue*) scope_field)->location().type(); 1565 if (type == Location::dbl || type == Location::lng) { 1566 big_value = true; 1567 } 1568 } 1569 if (scope_field->is_constant_int()) { 1570 ScopeValue* next_scope_field = sv->field_at(svIndex + 1); 1571 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1572 big_value = true; 1573 } 1574 } 1575 } 1576 1577 if (big_value) { 1578 i++; 1579 assert(i < fields->length(), "second T_INT field needed"); 1580 assert(fields->at(i)._type == T_INT, "T_INT field needed"); 1581 } else { 1582 obj->int_field_put(offset, value->get_jint()); 1583 break; 1584 } 1585 } 1586 /* no break */ 1587 1588 case T_LONG: case T_DOUBLE: { 1589 assert(value->type() == T_INT, "Agreement."); 1590 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); 1591 #ifdef _LP64 1592 jlong res = (jlong)low->get_intptr(); 1593 #else 1594 jlong res = jlong_from(value->get_jint(), low->get_jint()); 1595 #endif 1596 obj->long_field_put(offset, res); 1597 break; 1598 } 1599 1600 case T_SHORT: 1601 assert(value->type() == T_INT, "Agreement."); 1602 obj->short_field_put(offset, (jshort)value->get_jint()); 1603 break; 1604 1605 case T_CHAR: 1606 assert(value->type() == T_INT, "Agreement."); 1607 obj->char_field_put(offset, (jchar)value->get_jint()); 1608 break; 1609 1610 case T_BYTE: 1611 assert(value->type() == T_INT, "Agreement."); 1612 obj->byte_field_put(offset, (jbyte)value->get_jint()); 1613 break; 1614 1615 case T_BOOLEAN: 1616 assert(value->type() == T_INT, "Agreement."); 1617 obj->bool_field_put(offset, (jboolean)value->get_jint()); 1618 break; 1619 1620 default: 1621 ShouldNotReachHere(); 1622 } 1623 svIndex++; 1624 } 1625 return svIndex; 1626 } 1627 1628 // restore fields of an eliminated inline type array 1629 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) { 1630 InlineKlass* vk = vak->element_klass(); 1631 assert(vk->flat_array(), "should only be used for flat inline type arrays"); 1632 // Adjust offset to omit oop header 1633 int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset(); 1634 // Initialize all elements of the flat inline type array 1635 for (int i = 0; i < sv->field_size(); i++) { 1636 ScopeValue* val = sv->field_at(i); 1637 int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper())); 1638 reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK); 1639 } 1640 } 1641 1642 // restore fields of all eliminated objects and arrays 1643 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) { 1644 for (int i = 0; i < objects->length(); i++) { 1645 assert(objects->at(i)->is_object(), "invalid debug information"); 1646 ObjectValue* sv = (ObjectValue*) objects->at(i); 1647 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1648 Handle obj = sv->value(); 1649 assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed"); 1650 #ifndef PRODUCT 1651 if (PrintDeoptimizationDetails) { 1652 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string()); 1653 } 1654 #endif // !PRODUCT 1655 1656 if (obj.is_null()) { 1657 continue; 1658 } 1659 1660 #if INCLUDE_JVMCI 1661 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS. 1662 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) { 1663 continue; 1664 } 1665 #endif // INCLUDE_JVMCI 1666 #if COMPILER2_OR_JVMCI 1667 if (EnableVectorSupport && VectorSupport::is_vector(k)) { 1668 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string()); 1669 ScopeValue* payload = sv->field_at(0); 1670 if (payload->is_location() && 1671 payload->as_LocationValue()->location().type() == Location::vector) { 1672 #ifndef PRODUCT 1673 if (PrintDeoptimizationDetails) { 1674 tty->print_cr("skip field reassignment for this vector - it should be assigned already"); 1675 if (Verbose) { 1676 Handle obj = sv->value(); 1677 k->oop_print_on(obj(), tty); 1678 } 1679 } 1680 #endif // !PRODUCT 1681 continue; // Such vector's value was already restored in VectorSupport::allocate_vector(). 1682 } 1683 // Else fall-through to do assignment for scalar-replaced boxed vector representation 1684 // which could be restored after vector object allocation. 1685 } 1686 #endif /* !COMPILER2_OR_JVMCI */ 1687 if (k->is_instance_klass()) { 1688 InstanceKlass* ik = InstanceKlass::cast(k); 1689 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK); 1690 } else if (k->is_flatArray_klass()) { 1691 FlatArrayKlass* vak = FlatArrayKlass::cast(k); 1692 reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK); 1693 } else if (k->is_typeArray_klass()) { 1694 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 1695 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); 1696 } else if (k->is_objArray_klass()) { 1697 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); 1698 } 1699 } 1700 } 1701 1702 1703 // relock objects for which synchronization was eliminated 1704 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors, 1705 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) { 1706 bool relocked_objects = false; 1707 for (int i = 0; i < monitors->length(); i++) { 1708 MonitorInfo* mon_info = monitors->at(i); 1709 if (mon_info->eliminated()) { 1710 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); 1711 relocked_objects = true; 1712 if (!mon_info->owner_is_scalar_replaced()) { 1713 Handle obj(thread, mon_info->owner()); 1714 markWord mark = obj->mark(); 1715 if (exec_mode == Unpack_none) { 1716 if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) { 1717 // With exec_mode == Unpack_none obj may be thread local and locked in 1718 // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header. 1719 markWord dmw = mark.displaced_mark_helper(); 1720 mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr)); 1721 obj->set_mark(dmw); 1722 } 1723 if (mark.has_monitor()) { 1724 // defer relocking if the deoptee thread is currently waiting for obj 1725 ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor(); 1726 if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) { 1727 assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization"); 1728 mon_info->lock()->set_displaced_header(markWord::unused_mark()); 1729 JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread); 1730 continue; 1731 } 1732 } 1733 } 1734 if (LockingMode == LM_LIGHTWEIGHT && exec_mode == Unpack_none) { 1735 // We have lost information about the correct state of the lock stack. 1736 // Inflate the locks instead. Enter then inflate to avoid races with 1737 // deflation. 1738 ObjectSynchronizer::enter(obj, nullptr, deoptee_thread); 1739 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1740 ObjectMonitor* mon = ObjectSynchronizer::inflate(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal); 1741 assert(mon->owner() == deoptee_thread, "must be"); 1742 } else { 1743 BasicLock* lock = mon_info->lock(); 1744 ObjectSynchronizer::enter(obj, lock, deoptee_thread); 1745 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1746 } 1747 } 1748 } 1749 } 1750 return relocked_objects; 1751 } 1752 #endif // COMPILER2_OR_JVMCI 1753 1754 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) { 1755 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp())); 1756 1757 // Register map for next frame (used for stack crawl). We capture 1758 // the state of the deopt'ing frame's caller. Thus if we need to 1759 // stuff a C2I adapter we can properly fill in the callee-save 1760 // register locations. 1761 frame caller = fr.sender(reg_map); 1762 int frame_size = pointer_delta_as_int(caller.sp(), fr.sp()); 1763 1764 frame sender = caller; 1765 1766 // Since the Java thread being deoptimized will eventually adjust it's own stack, 1767 // the vframeArray containing the unpacking information is allocated in the C heap. 1768 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). 1769 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); 1770 1771 // Compare the vframeArray to the collected vframes 1772 assert(array->structural_compare(thread, chunk), "just checking"); 1773 1774 if (TraceDeoptimization) { 1775 ResourceMark rm; 1776 stringStream st; 1777 st.print_cr("DEOPT PACKING thread=" INTPTR_FORMAT " vframeArray=" INTPTR_FORMAT, p2i(thread), p2i(array)); 1778 st.print(" "); 1779 fr.print_on(&st); 1780 st.print_cr(" Virtual frames (innermost/newest first):"); 1781 for (int index = 0; index < chunk->length(); index++) { 1782 compiledVFrame* vf = chunk->at(index); 1783 int bci = vf->raw_bci(); 1784 const char* code_name; 1785 if (bci == SynchronizationEntryBCI) { 1786 code_name = "sync entry"; 1787 } else { 1788 Bytecodes::Code code = vf->method()->code_at(bci); 1789 code_name = Bytecodes::name(code); 1790 } 1791 1792 st.print(" VFrame %d (" INTPTR_FORMAT ")", index, p2i(vf)); 1793 st.print(" - %s", vf->method()->name_and_sig_as_C_string()); 1794 st.print(" - %s", code_name); 1795 st.print_cr(" @ bci=%d ", bci); 1796 } 1797 tty->print_raw(st.freeze()); 1798 tty->cr(); 1799 } 1800 1801 return array; 1802 } 1803 1804 #if COMPILER2_OR_JVMCI 1805 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { 1806 // Reallocation of some scalar replaced objects failed. Record 1807 // that we need to pop all the interpreter frames for the 1808 // deoptimized compiled frame. 1809 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); 1810 thread->set_frames_to_pop_failed_realloc(array->frames()); 1811 // Unlock all monitors here otherwise the interpreter will see a 1812 // mix of locked and unlocked monitors (because of failed 1813 // reallocations of synchronized objects) and be confused. 1814 for (int i = 0; i < array->frames(); i++) { 1815 MonitorChunk* monitors = array->element(i)->monitors(); 1816 if (monitors != nullptr) { 1817 for (int j = 0; j < monitors->number_of_monitors(); j++) { 1818 BasicObjectLock* src = monitors->at(j); 1819 if (src->obj() != nullptr) { 1820 ObjectSynchronizer::exit(src->obj(), src->lock(), thread); 1821 } 1822 } 1823 array->element(i)->free_monitors(thread); 1824 #ifdef ASSERT 1825 array->element(i)->set_removed_monitors(); 1826 #endif 1827 } 1828 } 1829 } 1830 #endif 1831 1832 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { 1833 assert(fr.can_be_deoptimized(), "checking frame type"); 1834 1835 gather_statistics(reason, Action_none, Bytecodes::_illegal); 1836 1837 if (LogCompilation && xtty != nullptr) { 1838 CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); 1839 assert(cm != nullptr, "only compiled methods can deopt"); 1840 1841 ttyLocker ttyl; 1842 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); 1843 cm->log_identity(xtty); 1844 xtty->end_head(); 1845 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { 1846 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1847 xtty->method(sd->method()); 1848 xtty->end_elem(); 1849 if (sd->is_top()) break; 1850 } 1851 xtty->tail("deoptimized"); 1852 } 1853 1854 Continuation::notify_deopt(thread, fr.sp()); 1855 1856 // Patch the compiled method so that when execution returns to it we will 1857 // deopt the execution state and return to the interpreter. 1858 fr.deoptimize(thread); 1859 } 1860 1861 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) { 1862 // Deoptimize only if the frame comes from compiled code. 1863 // Do not deoptimize the frame which is already patched 1864 // during the execution of the loops below. 1865 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { 1866 return; 1867 } 1868 ResourceMark rm; 1869 deoptimize_single_frame(thread, fr, reason); 1870 } 1871 1872 #if INCLUDE_JVMCI 1873 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) { 1874 // there is no exception handler for this pc => deoptimize 1875 cm->make_not_entrant(); 1876 1877 // Use Deoptimization::deoptimize for all of its side-effects: 1878 // gathering traps statistics, logging... 1879 // it also patches the return pc but we do not care about that 1880 // since we return a continuation to the deopt_blob below. 1881 JavaThread* thread = JavaThread::current(); 1882 RegisterMap reg_map(thread, 1883 RegisterMap::UpdateMap::skip, 1884 RegisterMap::ProcessFrames::include, 1885 RegisterMap::WalkContinuation::skip); 1886 frame runtime_frame = thread->last_frame(); 1887 frame caller_frame = runtime_frame.sender(®_map); 1888 assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method"); 1889 vframe* vf = vframe::new_vframe(&caller_frame, ®_map, thread); 1890 compiledVFrame* cvf = compiledVFrame::cast(vf); 1891 ScopeDesc* imm_scope = cvf->scope(); 1892 MethodData* imm_mdo = get_method_data(thread, methodHandle(thread, imm_scope->method()), true); 1893 if (imm_mdo != nullptr) { 1894 ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), nullptr); 1895 if (pdata != nullptr && pdata->is_BitData()) { 1896 BitData* bit_data = (BitData*) pdata; 1897 bit_data->set_exception_seen(); 1898 } 1899 } 1900 1901 Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler); 1902 1903 MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true); 1904 if (trap_mdo != nullptr) { 1905 trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler); 1906 } 1907 1908 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 1909 } 1910 #endif 1911 1912 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1913 assert(thread == Thread::current() || 1914 thread->is_handshake_safe_for(Thread::current()) || 1915 SafepointSynchronize::is_at_safepoint(), 1916 "can only deoptimize other thread at a safepoint/handshake"); 1917 // Compute frame and register map based on thread and sp. 1918 RegisterMap reg_map(thread, 1919 RegisterMap::UpdateMap::skip, 1920 RegisterMap::ProcessFrames::include, 1921 RegisterMap::WalkContinuation::skip); 1922 frame fr = thread->last_frame(); 1923 while (fr.id() != id) { 1924 fr = fr.sender(®_map); 1925 } 1926 deoptimize(thread, fr, reason); 1927 } 1928 1929 1930 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1931 Thread* current = Thread::current(); 1932 if (thread == current || thread->is_handshake_safe_for(current)) { 1933 Deoptimization::deoptimize_frame_internal(thread, id, reason); 1934 } else { 1935 VM_DeoptimizeFrame deopt(thread, id, reason); 1936 VMThread::execute(&deopt); 1937 } 1938 } 1939 1940 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { 1941 deoptimize_frame(thread, id, Reason_constraint); 1942 } 1943 1944 // JVMTI PopFrame support 1945 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) 1946 { 1947 assert(thread == JavaThread::current(), "pre-condition"); 1948 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); 1949 } 1950 JRT_END 1951 1952 MethodData* 1953 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m, 1954 bool create_if_missing) { 1955 JavaThread* THREAD = thread; // For exception macros. 1956 MethodData* mdo = m()->method_data(); 1957 if (mdo == nullptr && create_if_missing && !HAS_PENDING_EXCEPTION) { 1958 // Build an MDO. Ignore errors like OutOfMemory; 1959 // that simply means we won't have an MDO to update. 1960 Method::build_profiling_method_data(m, THREAD); 1961 if (HAS_PENDING_EXCEPTION) { 1962 // Only metaspace OOM is expected. No Java code executed. 1963 assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1964 CLEAR_PENDING_EXCEPTION; 1965 } 1966 mdo = m()->method_data(); 1967 } 1968 return mdo; 1969 } 1970 1971 #if COMPILER2_OR_JVMCI 1972 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) { 1973 // In case of an unresolved klass entry, load the class. 1974 // This path is exercised from case _ldc in Parse::do_one_bytecode, 1975 // and probably nowhere else. 1976 // Even that case would benefit from simply re-interpreting the 1977 // bytecode, without paying special attention to the class index. 1978 // So this whole "class index" feature should probably be removed. 1979 1980 if (constant_pool->tag_at(index).is_unresolved_klass()) { 1981 Klass* tk = constant_pool->klass_at(index, THREAD); 1982 if (HAS_PENDING_EXCEPTION) { 1983 // Exception happened during classloading. We ignore the exception here, since it 1984 // is going to be rethrown since the current activation is going to be deoptimized and 1985 // the interpreter will re-execute the bytecode. 1986 // Do not clear probable Async Exceptions. 1987 CLEAR_PENDING_NONASYNC_EXCEPTION; 1988 // Class loading called java code which may have caused a stack 1989 // overflow. If the exception was thrown right before the return 1990 // to the runtime the stack is no longer guarded. Reguard the 1991 // stack otherwise if we return to the uncommon trap blob and the 1992 // stack bang causes a stack overflow we crash. 1993 JavaThread* jt = THREAD; 1994 bool guard_pages_enabled = jt->stack_overflow_state()->reguard_stack_if_needed(); 1995 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); 1996 } 1997 return; 1998 } 1999 2000 assert(!constant_pool->tag_at(index).is_symbol(), 2001 "no symbolic names here, please"); 2002 } 2003 2004 #if INCLUDE_JFR 2005 2006 class DeoptReasonSerializer : public JfrSerializer { 2007 public: 2008 void serialize(JfrCheckpointWriter& writer) { 2009 writer.write_count((u4)(Deoptimization::Reason_LIMIT + 1)); // + Reason::many (-1) 2010 for (int i = -1; i < Deoptimization::Reason_LIMIT; ++i) { 2011 writer.write_key((u8)i); 2012 writer.write(Deoptimization::trap_reason_name(i)); 2013 } 2014 } 2015 }; 2016 2017 class DeoptActionSerializer : public JfrSerializer { 2018 public: 2019 void serialize(JfrCheckpointWriter& writer) { 2020 static const u4 nof_actions = Deoptimization::Action_LIMIT; 2021 writer.write_count(nof_actions); 2022 for (u4 i = 0; i < Deoptimization::Action_LIMIT; ++i) { 2023 writer.write_key(i); 2024 writer.write(Deoptimization::trap_action_name((int)i)); 2025 } 2026 } 2027 }; 2028 2029 static void register_serializers() { 2030 static int critical_section = 0; 2031 if (1 == critical_section || Atomic::cmpxchg(&critical_section, 0, 1) == 1) { 2032 return; 2033 } 2034 JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONREASON, true, new DeoptReasonSerializer()); 2035 JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONACTION, true, new DeoptActionSerializer()); 2036 } 2037 2038 static void post_deoptimization_event(CompiledMethod* nm, 2039 const Method* method, 2040 int trap_bci, 2041 int instruction, 2042 Deoptimization::DeoptReason reason, 2043 Deoptimization::DeoptAction action) { 2044 assert(nm != nullptr, "invariant"); 2045 assert(method != nullptr, "invariant"); 2046 if (EventDeoptimization::is_enabled()) { 2047 static bool serializers_registered = false; 2048 if (!serializers_registered) { 2049 register_serializers(); 2050 serializers_registered = true; 2051 } 2052 EventDeoptimization event; 2053 event.set_compileId(nm->compile_id()); 2054 event.set_compiler(nm->compiler_type()); 2055 event.set_method(method); 2056 event.set_lineNumber(method->line_number_from_bci(trap_bci)); 2057 event.set_bci(trap_bci); 2058 event.set_instruction(instruction); 2059 event.set_reason(reason); 2060 event.set_action(action); 2061 event.commit(); 2062 } 2063 } 2064 2065 #endif // INCLUDE_JFR 2066 2067 static void log_deopt(CompiledMethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci, 2068 const char* reason_name, const char* reason_action) { 2069 LogTarget(Debug, deoptimization) lt; 2070 if (lt.is_enabled()) { 2071 LogStream ls(lt); 2072 bool is_osr = nm->is_osr_method(); 2073 ls.print("cid=%4d %s level=%d", 2074 nm->compile_id(), (is_osr ? "osr" : " "), nm->comp_level()); 2075 ls.print(" %s", tm->name_and_sig_as_C_string()); 2076 ls.print(" trap_bci=%d ", trap_bci); 2077 if (is_osr) { 2078 ls.print("osr_bci=%d ", nm->osr_entry_bci()); 2079 } 2080 ls.print("%s ", reason_name); 2081 ls.print("%s ", reason_action); 2082 ls.print_cr("pc=" INTPTR_FORMAT " relative_pc=" INTPTR_FORMAT, 2083 pc, fr.pc() - nm->code_begin()); 2084 } 2085 } 2086 2087 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint trap_request)) { 2088 HandleMark hm(current); 2089 2090 // uncommon_trap() is called at the beginning of the uncommon trap 2091 // handler. Note this fact before we start generating temporary frames 2092 // that can confuse an asynchronous stack walker. This counter is 2093 // decremented at the end of unpack_frames(). 2094 2095 current->inc_in_deopt_handler(); 2096 2097 #if INCLUDE_JVMCI 2098 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid 2099 RegisterMap reg_map(current, 2100 RegisterMap::UpdateMap::include, 2101 RegisterMap::ProcessFrames::include, 2102 RegisterMap::WalkContinuation::skip); 2103 #else 2104 RegisterMap reg_map(current, 2105 RegisterMap::UpdateMap::skip, 2106 RegisterMap::ProcessFrames::include, 2107 RegisterMap::WalkContinuation::skip); 2108 #endif 2109 frame stub_frame = current->last_frame(); 2110 frame fr = stub_frame.sender(®_map); 2111 2112 // Log a message 2113 Events::log_deopt_message(current, "Uncommon trap: trap_request=" INT32_FORMAT_X_0 " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, 2114 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin()); 2115 2116 { 2117 ResourceMark rm; 2118 2119 DeoptReason reason = trap_request_reason(trap_request); 2120 DeoptAction action = trap_request_action(trap_request); 2121 #if INCLUDE_JVMCI 2122 int debug_id = trap_request_debug_id(trap_request); 2123 #endif 2124 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 2125 2126 vframe* vf = vframe::new_vframe(&fr, ®_map, current); 2127 compiledVFrame* cvf = compiledVFrame::cast(vf); 2128 2129 CompiledMethod* nm = cvf->code(); 2130 2131 ScopeDesc* trap_scope = cvf->scope(); 2132 2133 bool is_receiver_constraint_failure = COMPILER2_PRESENT(VerifyReceiverTypes &&) (reason == Deoptimization::Reason_receiver_constraint); 2134 2135 if (is_receiver_constraint_failure) { 2136 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), 2137 trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string() 2138 JVMCI_ONLY(COMMA debug_id)); 2139 } 2140 2141 methodHandle trap_method(current, trap_scope->method()); 2142 int trap_bci = trap_scope->bci(); 2143 #if INCLUDE_JVMCI 2144 jlong speculation = current->pending_failed_speculation(); 2145 if (nm->is_compiled_by_jvmci()) { 2146 nm->as_nmethod()->update_speculation(current); 2147 } else { 2148 assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers"); 2149 } 2150 2151 if (trap_bci == SynchronizationEntryBCI) { 2152 trap_bci = 0; 2153 current->set_pending_monitorenter(true); 2154 } 2155 2156 if (reason == Deoptimization::Reason_transfer_to_interpreter) { 2157 current->set_pending_transfer_to_interpreter(true); 2158 } 2159 #endif 2160 2161 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); 2162 // Record this event in the histogram. 2163 gather_statistics(reason, action, trap_bc); 2164 2165 // Ensure that we can record deopt. history: 2166 // Need MDO to record RTM code generation state. 2167 bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking ); 2168 2169 methodHandle profiled_method; 2170 #if INCLUDE_JVMCI 2171 if (nm->is_compiled_by_jvmci()) { 2172 profiled_method = methodHandle(current, nm->method()); 2173 } else { 2174 profiled_method = trap_method; 2175 } 2176 #else 2177 profiled_method = trap_method; 2178 #endif 2179 2180 MethodData* trap_mdo = 2181 get_method_data(current, profiled_method, create_if_missing); 2182 2183 { // Log Deoptimization event for JFR, UL and event system 2184 Method* tm = trap_method(); 2185 const char* reason_name = trap_reason_name(reason); 2186 const char* reason_action = trap_action_name(action); 2187 intptr_t pc = p2i(fr.pc()); 2188 2189 JFR_ONLY(post_deoptimization_event(nm, tm, trap_bci, trap_bc, reason, action);) 2190 log_deopt(nm, tm, pc, fr, trap_bci, reason_name, reason_action); 2191 Events::log_deopt_message(current, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s", 2192 reason_name, reason_action, pc, 2193 tm->name_and_sig_as_C_string(), trap_bci, nm->compiler_name()); 2194 } 2195 2196 // Print a bunch of diagnostics, if requested. 2197 if (TraceDeoptimization || LogCompilation || is_receiver_constraint_failure) { 2198 ResourceMark rm; 2199 ttyLocker ttyl; 2200 char buf[100]; 2201 if (xtty != nullptr) { 2202 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", 2203 os::current_thread_id(), 2204 format_trap_request(buf, sizeof(buf), trap_request)); 2205 #if INCLUDE_JVMCI 2206 if (speculation != 0) { 2207 xtty->print(" speculation='" JLONG_FORMAT "'", speculation); 2208 } 2209 #endif 2210 nm->log_identity(xtty); 2211 } 2212 Symbol* class_name = nullptr; 2213 bool unresolved = false; 2214 if (unloaded_class_index >= 0) { 2215 constantPoolHandle constants (current, trap_method->constants()); 2216 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { 2217 class_name = constants->klass_name_at(unloaded_class_index); 2218 unresolved = true; 2219 if (xtty != nullptr) 2220 xtty->print(" unresolved='1'"); 2221 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { 2222 class_name = constants->symbol_at(unloaded_class_index); 2223 } 2224 if (xtty != nullptr) 2225 xtty->name(class_name); 2226 } 2227 if (xtty != nullptr && trap_mdo != nullptr && (int)reason < (int)MethodData::_trap_hist_limit) { 2228 // Dump the relevant MDO state. 2229 // This is the deopt count for the current reason, any previous 2230 // reasons or recompiles seen at this point. 2231 int dcnt = trap_mdo->trap_count(reason); 2232 if (dcnt != 0) 2233 xtty->print(" count='%d'", dcnt); 2234 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); 2235 int dos = (pdata == nullptr)? 0: pdata->trap_state(); 2236 if (dos != 0) { 2237 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); 2238 if (trap_state_is_recompiled(dos)) { 2239 int recnt2 = trap_mdo->overflow_recompile_count(); 2240 if (recnt2 != 0) 2241 xtty->print(" recompiles2='%d'", recnt2); 2242 } 2243 } 2244 } 2245 if (xtty != nullptr) { 2246 xtty->stamp(); 2247 xtty->end_head(); 2248 } 2249 if (TraceDeoptimization) { // make noise on the tty 2250 stringStream st; 2251 st.print("UNCOMMON TRAP method=%s", trap_scope->method()->name_and_sig_as_C_string()); 2252 st.print(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT JVMCI_ONLY(", debug_id=%d"), 2253 trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin() JVMCI_ONLY(COMMA debug_id)); 2254 st.print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id()); 2255 #if INCLUDE_JVMCI 2256 if (nm->is_nmethod()) { 2257 const char* installed_code_name = nm->as_nmethod()->jvmci_name(); 2258 if (installed_code_name != nullptr) { 2259 st.print(" (JVMCI: installed code name=%s) ", installed_code_name); 2260 } 2261 } 2262 #endif 2263 st.print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), 2264 p2i(fr.pc()), 2265 os::current_thread_id(), 2266 trap_reason_name(reason), 2267 trap_action_name(action), 2268 unloaded_class_index 2269 #if INCLUDE_JVMCI 2270 , debug_id 2271 #endif 2272 ); 2273 if (class_name != nullptr) { 2274 st.print(unresolved ? " unresolved class: " : " symbol: "); 2275 class_name->print_symbol_on(&st); 2276 } 2277 st.cr(); 2278 tty->print_raw(st.freeze()); 2279 } 2280 if (xtty != nullptr) { 2281 // Log the precise location of the trap. 2282 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { 2283 xtty->begin_elem("jvms bci='%d'", sd->bci()); 2284 xtty->method(sd->method()); 2285 xtty->end_elem(); 2286 if (sd->is_top()) break; 2287 } 2288 xtty->tail("uncommon_trap"); 2289 } 2290 } 2291 // (End diagnostic printout.) 2292 2293 if (is_receiver_constraint_failure) { 2294 fatal("missing receiver type check"); 2295 } 2296 2297 // Load class if necessary 2298 if (unloaded_class_index >= 0) { 2299 constantPoolHandle constants(current, trap_method->constants()); 2300 load_class_by_index(constants, unloaded_class_index, THREAD); 2301 } 2302 2303 // Flush the nmethod if necessary and desirable. 2304 // 2305 // We need to avoid situations where we are re-flushing the nmethod 2306 // because of a hot deoptimization site. Repeated flushes at the same 2307 // point need to be detected by the compiler and avoided. If the compiler 2308 // cannot avoid them (or has a bug and "refuses" to avoid them), this 2309 // module must take measures to avoid an infinite cycle of recompilation 2310 // and deoptimization. There are several such measures: 2311 // 2312 // 1. If a recompilation is ordered a second time at some site X 2313 // and for the same reason R, the action is adjusted to 'reinterpret', 2314 // to give the interpreter time to exercise the method more thoroughly. 2315 // If this happens, the method's overflow_recompile_count is incremented. 2316 // 2317 // 2. If the compiler fails to reduce the deoptimization rate, then 2318 // the method's overflow_recompile_count will begin to exceed the set 2319 // limit PerBytecodeRecompilationCutoff. If this happens, the action 2320 // is adjusted to 'make_not_compilable', and the method is abandoned 2321 // to the interpreter. This is a performance hit for hot methods, 2322 // but is better than a disastrous infinite cycle of recompilations. 2323 // (Actually, only the method containing the site X is abandoned.) 2324 // 2325 // 3. In parallel with the previous measures, if the total number of 2326 // recompilations of a method exceeds the much larger set limit 2327 // PerMethodRecompilationCutoff, the method is abandoned. 2328 // This should only happen if the method is very large and has 2329 // many "lukewarm" deoptimizations. The code which enforces this 2330 // limit is elsewhere (class nmethod, class Method). 2331 // 2332 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance 2333 // to recompile at each bytecode independently of the per-BCI cutoff. 2334 // 2335 // The decision to update code is up to the compiler, and is encoded 2336 // in the Action_xxx code. If the compiler requests Action_none 2337 // no trap state is changed, no compiled code is changed, and the 2338 // computation suffers along in the interpreter. 2339 // 2340 // The other action codes specify various tactics for decompilation 2341 // and recompilation. Action_maybe_recompile is the loosest, and 2342 // allows the compiled code to stay around until enough traps are seen, 2343 // and until the compiler gets around to recompiling the trapping method. 2344 // 2345 // The other actions cause immediate removal of the present code. 2346 2347 // Traps caused by injected profile shouldn't pollute trap counts. 2348 bool injected_profile_trap = trap_method->has_injected_profile() && 2349 (reason == Reason_intrinsic || reason == Reason_unreached); 2350 2351 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap; 2352 bool make_not_entrant = false; 2353 bool make_not_compilable = false; 2354 bool reprofile = false; 2355 switch (action) { 2356 case Action_none: 2357 // Keep the old code. 2358 update_trap_state = false; 2359 break; 2360 case Action_maybe_recompile: 2361 // Do not need to invalidate the present code, but we can 2362 // initiate another 2363 // Start compiler without (necessarily) invalidating the nmethod. 2364 // The system will tolerate the old code, but new code should be 2365 // generated when possible. 2366 break; 2367 case Action_reinterpret: 2368 // Go back into the interpreter for a while, and then consider 2369 // recompiling form scratch. 2370 make_not_entrant = true; 2371 // Reset invocation counter for outer most method. 2372 // This will allow the interpreter to exercise the bytecodes 2373 // for a while before recompiling. 2374 // By contrast, Action_make_not_entrant is immediate. 2375 // 2376 // Note that the compiler will track null_check, null_assert, 2377 // range_check, and class_check events and log them as if they 2378 // had been traps taken from compiled code. This will update 2379 // the MDO trap history so that the next compilation will 2380 // properly detect hot trap sites. 2381 reprofile = true; 2382 break; 2383 case Action_make_not_entrant: 2384 // Request immediate recompilation, and get rid of the old code. 2385 // Make them not entrant, so next time they are called they get 2386 // recompiled. Unloaded classes are loaded now so recompile before next 2387 // time they are called. Same for uninitialized. The interpreter will 2388 // link the missing class, if any. 2389 make_not_entrant = true; 2390 break; 2391 case Action_make_not_compilable: 2392 // Give up on compiling this method at all. 2393 make_not_entrant = true; 2394 make_not_compilable = true; 2395 break; 2396 default: 2397 ShouldNotReachHere(); 2398 } 2399 2400 // Setting +ProfileTraps fixes the following, on all platforms: 2401 // 4852688: ProfileInterpreter is off by default for ia64. The result is 2402 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the 2403 // recompile relies on a MethodData* to record heroic opt failures. 2404 2405 // Whether the interpreter is producing MDO data or not, we also need 2406 // to use the MDO to detect hot deoptimization points and control 2407 // aggressive optimization. 2408 bool inc_recompile_count = false; 2409 ProfileData* pdata = nullptr; 2410 if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != nullptr) { 2411 assert(trap_mdo == get_method_data(current, profiled_method, false), "sanity"); 2412 uint this_trap_count = 0; 2413 bool maybe_prior_trap = false; 2414 bool maybe_prior_recompile = false; 2415 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, 2416 #if INCLUDE_JVMCI 2417 nm->is_compiled_by_jvmci() && nm->is_osr_method(), 2418 #endif 2419 nm->method(), 2420 //outputs: 2421 this_trap_count, 2422 maybe_prior_trap, 2423 maybe_prior_recompile); 2424 // Because the interpreter also counts null, div0, range, and class 2425 // checks, these traps from compiled code are double-counted. 2426 // This is harmless; it just means that the PerXTrapLimit values 2427 // are in effect a little smaller than they look. 2428 2429 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 2430 if (per_bc_reason != Reason_none) { 2431 // Now take action based on the partially known per-BCI history. 2432 if (maybe_prior_trap 2433 && this_trap_count >= (uint)PerBytecodeTrapLimit) { 2434 // If there are too many traps at this BCI, force a recompile. 2435 // This will allow the compiler to see the limit overflow, and 2436 // take corrective action, if possible. The compiler generally 2437 // does not use the exact PerBytecodeTrapLimit value, but instead 2438 // changes its tactics if it sees any traps at all. This provides 2439 // a little hysteresis, delaying a recompile until a trap happens 2440 // several times. 2441 // 2442 // Actually, since there is only one bit of counter per BCI, 2443 // the possible per-BCI counts are {0,1,(per-method count)}. 2444 // This produces accurate results if in fact there is only 2445 // one hot trap site, but begins to get fuzzy if there are 2446 // many sites. For example, if there are ten sites each 2447 // trapping two or more times, they each get the blame for 2448 // all of their traps. 2449 make_not_entrant = true; 2450 } 2451 2452 // Detect repeated recompilation at the same BCI, and enforce a limit. 2453 if (make_not_entrant && maybe_prior_recompile) { 2454 // More than one recompile at this point. 2455 inc_recompile_count = maybe_prior_trap; 2456 } 2457 } else { 2458 // For reasons which are not recorded per-bytecode, we simply 2459 // force recompiles unconditionally. 2460 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) 2461 make_not_entrant = true; 2462 } 2463 2464 // Go back to the compiler if there are too many traps in this method. 2465 if (this_trap_count >= per_method_trap_limit(reason)) { 2466 // If there are too many traps in this method, force a recompile. 2467 // This will allow the compiler to see the limit overflow, and 2468 // take corrective action, if possible. 2469 // (This condition is an unlikely backstop only, because the 2470 // PerBytecodeTrapLimit is more likely to take effect first, 2471 // if it is applicable.) 2472 make_not_entrant = true; 2473 } 2474 2475 // Here's more hysteresis: If there has been a recompile at 2476 // this trap point already, run the method in the interpreter 2477 // for a while to exercise it more thoroughly. 2478 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { 2479 reprofile = true; 2480 } 2481 } 2482 2483 // Take requested actions on the method: 2484 2485 // Recompile 2486 if (make_not_entrant) { 2487 if (!nm->make_not_entrant()) { 2488 return; // the call did not change nmethod's state 2489 } 2490 2491 if (pdata != nullptr) { 2492 // Record the recompilation event, if any. 2493 int tstate0 = pdata->trap_state(); 2494 int tstate1 = trap_state_set_recompiled(tstate0, true); 2495 if (tstate1 != tstate0) 2496 pdata->set_trap_state(tstate1); 2497 } 2498 2499 #if INCLUDE_RTM_OPT 2500 // Restart collecting RTM locking abort statistic if the method 2501 // is recompiled for a reason other than RTM state change. 2502 // Assume that in new recompiled code the statistic could be different, 2503 // for example, due to different inlining. 2504 if ((reason != Reason_rtm_state_change) && (trap_mdo != nullptr) && 2505 UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { 2506 trap_mdo->atomic_set_rtm_state(ProfileRTM); 2507 } 2508 #endif 2509 // For code aging we count traps separately here, using make_not_entrant() 2510 // as a guard against simultaneous deopts in multiple threads. 2511 if (reason == Reason_tenured && trap_mdo != nullptr) { 2512 trap_mdo->inc_tenure_traps(); 2513 } 2514 } 2515 2516 if (inc_recompile_count) { 2517 trap_mdo->inc_overflow_recompile_count(); 2518 if ((uint)trap_mdo->overflow_recompile_count() > 2519 (uint)PerBytecodeRecompilationCutoff) { 2520 // Give up on the method containing the bad BCI. 2521 if (trap_method() == nm->method()) { 2522 make_not_compilable = true; 2523 } else { 2524 trap_method->set_not_compilable("overflow_recompile_count > PerBytecodeRecompilationCutoff", CompLevel_full_optimization); 2525 // But give grace to the enclosing nm->method(). 2526 } 2527 } 2528 } 2529 2530 // Reprofile 2531 if (reprofile) { 2532 CompilationPolicy::reprofile(trap_scope, nm->is_osr_method()); 2533 } 2534 2535 // Give up compiling 2536 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { 2537 assert(make_not_entrant, "consistent"); 2538 nm->method()->set_not_compilable("give up compiling", CompLevel_full_optimization); 2539 } 2540 2541 } // Free marked resources 2542 2543 } 2544 JRT_END 2545 2546 ProfileData* 2547 Deoptimization::query_update_method_data(MethodData* trap_mdo, 2548 int trap_bci, 2549 Deoptimization::DeoptReason reason, 2550 bool update_total_trap_count, 2551 #if INCLUDE_JVMCI 2552 bool is_osr, 2553 #endif 2554 Method* compiled_method, 2555 //outputs: 2556 uint& ret_this_trap_count, 2557 bool& ret_maybe_prior_trap, 2558 bool& ret_maybe_prior_recompile) { 2559 bool maybe_prior_trap = false; 2560 bool maybe_prior_recompile = false; 2561 uint this_trap_count = 0; 2562 if (update_total_trap_count) { 2563 uint idx = reason; 2564 #if INCLUDE_JVMCI 2565 if (is_osr) { 2566 // Upper half of history array used for traps in OSR compilations 2567 idx += Reason_TRAP_HISTORY_LENGTH; 2568 } 2569 #endif 2570 uint prior_trap_count = trap_mdo->trap_count(idx); 2571 this_trap_count = trap_mdo->inc_trap_count(idx); 2572 2573 // If the runtime cannot find a place to store trap history, 2574 // it is estimated based on the general condition of the method. 2575 // If the method has ever been recompiled, or has ever incurred 2576 // a trap with the present reason , then this BCI is assumed 2577 // (pessimistically) to be the culprit. 2578 maybe_prior_trap = (prior_trap_count != 0); 2579 maybe_prior_recompile = (trap_mdo->decompile_count() != 0); 2580 } 2581 ProfileData* pdata = nullptr; 2582 2583 2584 // For reasons which are recorded per bytecode, we check per-BCI data. 2585 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 2586 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); 2587 if (per_bc_reason != Reason_none) { 2588 // Find the profile data for this BCI. If there isn't one, 2589 // try to allocate one from the MDO's set of spares. 2590 // This will let us detect a repeated trap at this point. 2591 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : nullptr); 2592 2593 if (pdata != nullptr) { 2594 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { 2595 if (LogCompilation && xtty != nullptr) { 2596 ttyLocker ttyl; 2597 // no more room for speculative traps in this MDO 2598 xtty->elem("speculative_traps_oom"); 2599 } 2600 } 2601 // Query the trap state of this profile datum. 2602 int tstate0 = pdata->trap_state(); 2603 if (!trap_state_has_reason(tstate0, per_bc_reason)) 2604 maybe_prior_trap = false; 2605 if (!trap_state_is_recompiled(tstate0)) 2606 maybe_prior_recompile = false; 2607 2608 // Update the trap state of this profile datum. 2609 int tstate1 = tstate0; 2610 // Record the reason. 2611 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); 2612 // Store the updated state on the MDO, for next time. 2613 if (tstate1 != tstate0) 2614 pdata->set_trap_state(tstate1); 2615 } else { 2616 if (LogCompilation && xtty != nullptr) { 2617 ttyLocker ttyl; 2618 // Missing MDP? Leave a small complaint in the log. 2619 xtty->elem("missing_mdp bci='%d'", trap_bci); 2620 } 2621 } 2622 } 2623 2624 // Return results: 2625 ret_this_trap_count = this_trap_count; 2626 ret_maybe_prior_trap = maybe_prior_trap; 2627 ret_maybe_prior_recompile = maybe_prior_recompile; 2628 return pdata; 2629 } 2630 2631 void 2632 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2633 ResourceMark rm; 2634 // Ignored outputs: 2635 uint ignore_this_trap_count; 2636 bool ignore_maybe_prior_trap; 2637 bool ignore_maybe_prior_recompile; 2638 assert(!reason_is_speculate(reason), "reason speculate only used by compiler"); 2639 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts 2640 bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler); 2641 query_update_method_data(trap_mdo, trap_bci, 2642 (DeoptReason)reason, 2643 update_total_counts, 2644 #if INCLUDE_JVMCI 2645 false, 2646 #endif 2647 nullptr, 2648 ignore_this_trap_count, 2649 ignore_maybe_prior_trap, 2650 ignore_maybe_prior_recompile); 2651 } 2652 2653 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* current, jint trap_request, jint exec_mode) { 2654 // Enable WXWrite: current function is called from methods compiled by C2 directly 2655 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current)); 2656 2657 // Still in Java no safepoints 2658 { 2659 // This enters VM and may safepoint 2660 uncommon_trap_inner(current, trap_request); 2661 } 2662 HandleMark hm(current); 2663 return fetch_unroll_info_helper(current, exec_mode); 2664 } 2665 2666 // Local derived constants. 2667 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. 2668 const int DS_REASON_MASK = ((uint)DataLayout::trap_mask) >> 1; 2669 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; 2670 2671 //---------------------------trap_state_reason--------------------------------- 2672 Deoptimization::DeoptReason 2673 Deoptimization::trap_state_reason(int trap_state) { 2674 // This assert provides the link between the width of DataLayout::trap_bits 2675 // and the encoding of "recorded" reasons. It ensures there are enough 2676 // bits to store all needed reasons in the per-BCI MDO profile. 2677 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2678 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2679 trap_state -= recompile_bit; 2680 if (trap_state == DS_REASON_MASK) { 2681 return Reason_many; 2682 } else { 2683 assert((int)Reason_none == 0, "state=0 => Reason_none"); 2684 return (DeoptReason)trap_state; 2685 } 2686 } 2687 //-------------------------trap_state_has_reason------------------------------- 2688 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2689 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); 2690 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2691 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2692 trap_state -= recompile_bit; 2693 if (trap_state == DS_REASON_MASK) { 2694 return -1; // true, unspecifically (bottom of state lattice) 2695 } else if (trap_state == reason) { 2696 return 1; // true, definitely 2697 } else if (trap_state == 0) { 2698 return 0; // false, definitely (top of state lattice) 2699 } else { 2700 return 0; // false, definitely 2701 } 2702 } 2703 //-------------------------trap_state_add_reason------------------------------- 2704 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { 2705 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); 2706 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2707 trap_state -= recompile_bit; 2708 if (trap_state == DS_REASON_MASK) { 2709 return trap_state + recompile_bit; // already at state lattice bottom 2710 } else if (trap_state == reason) { 2711 return trap_state + recompile_bit; // the condition is already true 2712 } else if (trap_state == 0) { 2713 return reason + recompile_bit; // no condition has yet been true 2714 } else { 2715 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom 2716 } 2717 } 2718 //-----------------------trap_state_is_recompiled------------------------------ 2719 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2720 return (trap_state & DS_RECOMPILE_BIT) != 0; 2721 } 2722 //-----------------------trap_state_set_recompiled----------------------------- 2723 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { 2724 if (z) return trap_state | DS_RECOMPILE_BIT; 2725 else return trap_state & ~DS_RECOMPILE_BIT; 2726 } 2727 //---------------------------format_trap_state--------------------------------- 2728 // This is used for debugging and diagnostics, including LogFile output. 2729 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2730 int trap_state) { 2731 assert(buflen > 0, "sanity"); 2732 DeoptReason reason = trap_state_reason(trap_state); 2733 bool recomp_flag = trap_state_is_recompiled(trap_state); 2734 // Re-encode the state from its decoded components. 2735 int decoded_state = 0; 2736 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) 2737 decoded_state = trap_state_add_reason(decoded_state, reason); 2738 if (recomp_flag) 2739 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); 2740 // If the state re-encodes properly, format it symbolically. 2741 // Because this routine is used for debugging and diagnostics, 2742 // be robust even if the state is a strange value. 2743 size_t len; 2744 if (decoded_state != trap_state) { 2745 // Random buggy state that doesn't decode?? 2746 len = jio_snprintf(buf, buflen, "#%d", trap_state); 2747 } else { 2748 len = jio_snprintf(buf, buflen, "%s%s", 2749 trap_reason_name(reason), 2750 recomp_flag ? " recompiled" : ""); 2751 } 2752 return buf; 2753 } 2754 2755 2756 //--------------------------------statics-------------------------------------- 2757 const char* Deoptimization::_trap_reason_name[] = { 2758 // Note: Keep this in sync. with enum DeoptReason. 2759 "none", 2760 "null_check", 2761 "null_assert" JVMCI_ONLY("_or_unreached0"), 2762 "range_check", 2763 "class_check", 2764 "array_check", 2765 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"), 2766 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"), 2767 "profile_predicate", 2768 "unloaded", 2769 "uninitialized", 2770 "initialized", 2771 "unreached", 2772 "unhandled", 2773 "constraint", 2774 "div0_check", 2775 "age", 2776 "predicate", 2777 "loop_limit_check", 2778 "speculate_class_check", 2779 "speculate_null_check", 2780 "speculate_null_assert", 2781 "rtm_state_change", 2782 "unstable_if", 2783 "unstable_fused_if", 2784 "receiver_constraint", 2785 #if INCLUDE_JVMCI 2786 "aliasing", 2787 "transfer_to_interpreter", 2788 "not_compiled_exception_handler", 2789 "unresolved", 2790 "jsr_mismatch", 2791 #endif 2792 "tenured" 2793 }; 2794 const char* Deoptimization::_trap_action_name[] = { 2795 // Note: Keep this in sync. with enum DeoptAction. 2796 "none", 2797 "maybe_recompile", 2798 "reinterpret", 2799 "make_not_entrant", 2800 "make_not_compilable" 2801 }; 2802 2803 const char* Deoptimization::trap_reason_name(int reason) { 2804 // Check that every reason has a name 2805 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT); 2806 2807 if (reason == Reason_many) return "many"; 2808 if ((uint)reason < Reason_LIMIT) 2809 return _trap_reason_name[reason]; 2810 static char buf[20]; 2811 os::snprintf_checked(buf, sizeof(buf), "reason%d", reason); 2812 return buf; 2813 } 2814 const char* Deoptimization::trap_action_name(int action) { 2815 // Check that every action has a name 2816 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT); 2817 2818 if ((uint)action < Action_LIMIT) 2819 return _trap_action_name[action]; 2820 static char buf[20]; 2821 os::snprintf_checked(buf, sizeof(buf), "action%d", action); 2822 return buf; 2823 } 2824 2825 // This is used for debugging and diagnostics, including LogFile output. 2826 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, 2827 int trap_request) { 2828 jint unloaded_class_index = trap_request_index(trap_request); 2829 const char* reason = trap_reason_name(trap_request_reason(trap_request)); 2830 const char* action = trap_action_name(trap_request_action(trap_request)); 2831 #if INCLUDE_JVMCI 2832 int debug_id = trap_request_debug_id(trap_request); 2833 #endif 2834 size_t len; 2835 if (unloaded_class_index < 0) { 2836 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"), 2837 reason, action 2838 #if INCLUDE_JVMCI 2839 ,debug_id 2840 #endif 2841 ); 2842 } else { 2843 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"), 2844 reason, action, unloaded_class_index 2845 #if INCLUDE_JVMCI 2846 ,debug_id 2847 #endif 2848 ); 2849 } 2850 return buf; 2851 } 2852 2853 juint Deoptimization::_deoptimization_hist 2854 [Deoptimization::Reason_LIMIT] 2855 [1 + Deoptimization::Action_LIMIT] 2856 [Deoptimization::BC_CASE_LIMIT] 2857 = {0}; 2858 2859 enum { 2860 LSB_BITS = 8, 2861 LSB_MASK = right_n_bits(LSB_BITS) 2862 }; 2863 2864 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2865 Bytecodes::Code bc) { 2866 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2867 assert(action >= 0 && action < Action_LIMIT, "oob"); 2868 _deoptimization_hist[Reason_none][0][0] += 1; // total 2869 _deoptimization_hist[reason][0][0] += 1; // per-reason total 2870 juint* cases = _deoptimization_hist[reason][1+action]; 2871 juint* bc_counter_addr = nullptr; 2872 juint bc_counter = 0; 2873 // Look for an unused counter, or an exact match to this BC. 2874 if (bc != Bytecodes::_illegal) { 2875 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2876 juint* counter_addr = &cases[bc_case]; 2877 juint counter = *counter_addr; 2878 if ((counter == 0 && bc_counter_addr == nullptr) 2879 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { 2880 // this counter is either free or is already devoted to this BC 2881 bc_counter_addr = counter_addr; 2882 bc_counter = counter | bc; 2883 } 2884 } 2885 } 2886 if (bc_counter_addr == nullptr) { 2887 // Overflow, or no given bytecode. 2888 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; 2889 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB 2890 } 2891 *bc_counter_addr = bc_counter + (1 << LSB_BITS); 2892 } 2893 2894 jint Deoptimization::total_deoptimization_count() { 2895 return _deoptimization_hist[Reason_none][0][0]; 2896 } 2897 2898 // Get the deopt count for a specific reason and a specific action. If either 2899 // one of 'reason' or 'action' is null, the method returns the sum of all 2900 // deoptimizations with the specific 'action' or 'reason' respectively. 2901 // If both arguments are null, the method returns the total deopt count. 2902 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) { 2903 if (reason_str == nullptr && action_str == nullptr) { 2904 return total_deoptimization_count(); 2905 } 2906 juint counter = 0; 2907 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2908 if (reason_str == nullptr || !strcmp(reason_str, trap_reason_name(reason))) { 2909 for (int action = 0; action < Action_LIMIT; action++) { 2910 if (action_str == nullptr || !strcmp(action_str, trap_action_name(action))) { 2911 juint* cases = _deoptimization_hist[reason][1+action]; 2912 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2913 counter += cases[bc_case] >> LSB_BITS; 2914 } 2915 } 2916 } 2917 } 2918 } 2919 return counter; 2920 } 2921 2922 void Deoptimization::print_statistics() { 2923 juint total = total_deoptimization_count(); 2924 juint account = total; 2925 if (total != 0) { 2926 ttyLocker ttyl; 2927 if (xtty != nullptr) xtty->head("statistics type='deoptimization'"); 2928 tty->print_cr("Deoptimization traps recorded:"); 2929 #define PRINT_STAT_LINE(name, r) \ 2930 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); 2931 PRINT_STAT_LINE("total", total); 2932 // For each non-zero entry in the histogram, print the reason, 2933 // the action, and (if specifically known) the type of bytecode. 2934 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2935 for (int action = 0; action < Action_LIMIT; action++) { 2936 juint* cases = _deoptimization_hist[reason][1+action]; 2937 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2938 juint counter = cases[bc_case]; 2939 if (counter != 0) { 2940 char name[1*K]; 2941 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); 2942 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) 2943 bc = Bytecodes::_illegal; 2944 os::snprintf_checked(name, sizeof(name), "%s/%s/%s", 2945 trap_reason_name(reason), 2946 trap_action_name(action), 2947 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); 2948 juint r = counter >> LSB_BITS; 2949 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); 2950 account -= r; 2951 } 2952 } 2953 } 2954 } 2955 if (account != 0) { 2956 PRINT_STAT_LINE("unaccounted", account); 2957 } 2958 #undef PRINT_STAT_LINE 2959 if (xtty != nullptr) xtty->tail("statistics"); 2960 } 2961 } 2962 2963 #else // COMPILER2_OR_JVMCI 2964 2965 2966 // Stubs for C1 only system. 2967 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2968 return false; 2969 } 2970 2971 const char* Deoptimization::trap_reason_name(int reason) { 2972 return "unknown"; 2973 } 2974 2975 jint Deoptimization::total_deoptimization_count() { 2976 return 0; 2977 } 2978 2979 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) { 2980 return 0; 2981 } 2982 2983 void Deoptimization::print_statistics() { 2984 // no output 2985 } 2986 2987 void 2988 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2989 // no update 2990 } 2991 2992 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2993 return 0; 2994 } 2995 2996 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2997 Bytecodes::Code bc) { 2998 // no update 2999 } 3000 3001 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 3002 int trap_state) { 3003 jio_snprintf(buf, buflen, "#%d", trap_state); 3004 return buf; 3005 } 3006 3007 #endif // COMPILER2_OR_JVMCI