1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/oopMapCache.hpp"
  31 #include "jvmtifiles/jvmtiEnv.hpp"
  32 #include "logging/log.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/instanceKlass.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/oopHandle.inline.hpp"
  40 #include "prims/jvmtiAgentThread.hpp"
  41 #include "prims/jvmtiEventController.inline.hpp"
  42 #include "prims/jvmtiImpl.hpp"
  43 #include "prims/jvmtiRedefineClasses.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/frame.inline.hpp"
  47 #include "runtime/handles.inline.hpp"
  48 #include "runtime/interfaceSupport.inline.hpp"
  49 #include "runtime/javaCalls.hpp"
  50 #include "runtime/javaThread.hpp"
  51 #include "runtime/jniHandles.hpp"
  52 #include "runtime/os.hpp"
  53 #include "runtime/serviceThread.hpp"
  54 #include "runtime/signature.hpp"
  55 #include "runtime/threadSMR.hpp"
  56 #include "runtime/vframe.inline.hpp"
  57 #include "runtime/vframe_hp.hpp"
  58 #include "runtime/vmOperations.hpp"
  59 #include "utilities/exceptions.hpp"
  60 
  61 //
  62 // class JvmtiAgentThread
  63 //
  64 // JavaThread used to wrap a thread started by an agent
  65 // using the JVMTI method RunAgentThread.
  66 //
  67 
  68 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
  69     : JavaThread(start_function_wrapper) {
  70     _env = env;
  71     _start_fn = start_fn;
  72     _start_arg = start_arg;
  73 }
  74 
  75 void
  76 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
  77     // It is expected that any Agent threads will be created as
  78     // Java Threads.  If this is the case, notification of the creation
  79     // of the thread is given in JavaThread::thread_main().
  80     assert(thread == JavaThread::current(), "sanity check");
  81 
  82     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
  83     dthread->call_start_function();
  84 }
  85 
  86 void
  87 JvmtiAgentThread::call_start_function() {
  88     ThreadToNativeFromVM transition(this);
  89     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
  90 }
  91 
  92 
  93 //
  94 // class GrowableCache - private methods
  95 //
  96 
  97 void GrowableCache::recache() {
  98   int len = _elements->length();
  99 
 100   FREE_C_HEAP_ARRAY(address, _cache);
 101   _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal);
 102 
 103   for (int i=0; i<len; i++) {
 104     _cache[i] = _elements->at(i)->getCacheValue();
 105     //
 106     // The cache entry has gone bad. Without a valid frame pointer
 107     // value, the entry is useless so we simply delete it in product
 108     // mode. The call to remove() will rebuild the cache again
 109     // without the bad entry.
 110     //
 111     if (_cache[i] == nullptr) {
 112       assert(false, "cannot recache null elements");
 113       remove(i);
 114       return;
 115     }
 116   }
 117   _cache[len] = nullptr;
 118 
 119   _listener_fun(_this_obj,_cache);
 120 }
 121 
 122 bool GrowableCache::equals(void* v, GrowableElement *e2) {
 123   GrowableElement *e1 = (GrowableElement *) v;
 124   assert(e1 != nullptr, "e1 != nullptr");
 125   assert(e2 != nullptr, "e2 != nullptr");
 126 
 127   return e1->equals(e2);
 128 }
 129 
 130 //
 131 // class GrowableCache - public methods
 132 //
 133 
 134 GrowableCache::GrowableCache() {
 135   _this_obj       = nullptr;
 136   _listener_fun   = nullptr;
 137   _elements       = nullptr;
 138   _cache          = nullptr;
 139 }
 140 
 141 GrowableCache::~GrowableCache() {
 142   clear();
 143   delete _elements;
 144   FREE_C_HEAP_ARRAY(address, _cache);
 145 }
 146 
 147 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
 148   _this_obj       = this_obj;
 149   _listener_fun   = listener_fun;
 150   _elements       = new (mtServiceability) GrowableArray<GrowableElement*>(5, mtServiceability);
 151   recache();
 152 }
 153 
 154 // number of elements in the collection
 155 int GrowableCache::length() {
 156   return _elements->length();
 157 }
 158 
 159 // get the value of the index element in the collection
 160 GrowableElement* GrowableCache::at(int index) {
 161   GrowableElement *e = (GrowableElement *) _elements->at(index);
 162   assert(e != nullptr, "e != nullptr");
 163   return e;
 164 }
 165 
 166 int GrowableCache::find(GrowableElement* e) {
 167   return _elements->find(e, GrowableCache::equals);
 168 }
 169 
 170 // append a copy of the element to the end of the collection
 171 void GrowableCache::append(GrowableElement* e) {
 172   GrowableElement *new_e = e->clone();
 173   _elements->append(new_e);
 174   recache();
 175 }
 176 
 177 // remove the element at index
 178 void GrowableCache::remove (int index) {
 179   GrowableElement *e = _elements->at(index);
 180   assert(e != nullptr, "e != nullptr");
 181   _elements->remove(e);
 182   delete e;
 183   recache();
 184 }
 185 
 186 // clear out all elements, release all heap space and
 187 // let our listener know that things have changed.
 188 void GrowableCache::clear() {
 189   int len = _elements->length();
 190   for (int i=0; i<len; i++) {
 191     delete _elements->at(i);
 192   }
 193   _elements->clear();
 194   recache();
 195 }
 196 
 197 //
 198 // class JvmtiBreakpoint
 199 //
 200 
 201 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
 202     : _method(m_method), _bci((int)location) {
 203   assert(_method != nullptr, "No method for breakpoint.");
 204   assert(_bci >= 0, "Negative bci for breakpoint.");
 205   oop class_holder_oop  = _method->method_holder()->klass_holder();
 206   _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), class_holder_oop);
 207 }
 208 
 209 JvmtiBreakpoint::~JvmtiBreakpoint() {
 210   _class_holder.release(JvmtiExport::jvmti_oop_storage());
 211 }
 212 
 213 void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
 214   _method   = bp._method;
 215   _bci      = bp._bci;
 216   _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), bp._class_holder.resolve());
 217 }
 218 
 219 bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
 220   return _method   == bp._method
 221     &&   _bci      == bp._bci;
 222 }
 223 
 224 address JvmtiBreakpoint::getBcp() const {
 225   return _method->bcp_from(_bci);
 226 }
 227 
 228 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
 229   ((Method*)_method->*meth_act)(_bci);
 230 
 231   // add/remove breakpoint to/from versions of the method that are EMCP.
 232   Thread *thread = Thread::current();
 233   InstanceKlass* ik = _method->method_holder();
 234   Symbol* m_name = _method->name();
 235   Symbol* m_signature = _method->signature();
 236 
 237   // search previous versions if they exist
 238   for (InstanceKlass* pv_node = ik->previous_versions();
 239        pv_node != nullptr;
 240        pv_node = pv_node->previous_versions()) {
 241     Array<Method*>* methods = pv_node->methods();
 242 
 243     for (int i = methods->length() - 1; i >= 0; i--) {
 244       Method* method = methods->at(i);
 245       // Only set breakpoints in EMCP methods.
 246       // EMCP methods are old but not obsolete. Equivalent
 247       // Modulo Constant Pool means the method is equivalent except
 248       // the constant pool and instructions that access the constant
 249       // pool might be different.
 250       // If a breakpoint is set in a redefined method, its EMCP methods
 251       // must have a breakpoint also.
 252       // None of the methods are deleted until none are running.
 253       // This code could set a breakpoint in a method that
 254       // is never reached, but this won't be noticeable to the programmer.
 255       if (!method->is_obsolete() &&
 256           method->name() == m_name &&
 257           method->signature() == m_signature) {
 258         ResourceMark rm;
 259         log_debug(redefine, class, breakpoint)
 260           ("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
 261            method->name()->as_C_string(), method->signature()->as_C_string());
 262         (method->*meth_act)(_bci);
 263         break;
 264       }
 265     }
 266   }
 267 }
 268 
 269 void JvmtiBreakpoint::set() {
 270   each_method_version_do(&Method::set_breakpoint);
 271 }
 272 
 273 void JvmtiBreakpoint::clear() {
 274   each_method_version_do(&Method::clear_breakpoint);
 275 }
 276 
 277 void JvmtiBreakpoint::print_on(outputStream* out) const {
 278 #ifndef PRODUCT
 279   ResourceMark rm;
 280   const char *class_name  = (_method == nullptr) ? "null" : _method->klass_name()->as_C_string();
 281   const char *method_name = (_method == nullptr) ? "null" : _method->name()->as_C_string();
 282   out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
 283 #endif
 284 }
 285 
 286 
 287 //
 288 // class VM_ChangeBreakpoints
 289 //
 290 // Modify the Breakpoints data structure at a safepoint
 291 //
 292 
 293 void VM_ChangeBreakpoints::doit() {
 294   switch (_operation) {
 295   case SET_BREAKPOINT:
 296     _breakpoints->set_at_safepoint(*_bp);
 297     break;
 298   case CLEAR_BREAKPOINT:
 299     _breakpoints->clear_at_safepoint(*_bp);
 300     break;
 301   default:
 302     assert(false, "Unknown operation");
 303   }
 304 }
 305 
 306 //
 307 // class JvmtiBreakpoints
 308 //
 309 // a JVMTI internal collection of JvmtiBreakpoint
 310 //
 311 
 312 JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
 313   _bps.initialize(this,listener_fun);
 314 }
 315 
 316 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
 317 
 318 void JvmtiBreakpoints::print() {
 319 #ifndef PRODUCT
 320   LogTarget(Trace, jvmti) log;
 321   LogStream log_stream(log);
 322 
 323   int n = _bps.length();
 324   for (int i=0; i<n; i++) {
 325     JvmtiBreakpoint& bp = _bps.at(i);
 326     log_stream.print("%d: ", i);
 327     bp.print_on(&log_stream);
 328     log_stream.cr();
 329   }
 330 #endif
 331 }
 332 
 333 
 334 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
 335   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 336 
 337   int i = _bps.find(bp);
 338   if (i == -1) {
 339     _bps.append(bp);
 340     bp.set();
 341   }
 342 }
 343 
 344 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
 345   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 346 
 347   int i = _bps.find(bp);
 348   if (i != -1) {
 349     _bps.remove(i);
 350     bp.clear();
 351   }
 352 }
 353 
 354 int JvmtiBreakpoints::length() { return _bps.length(); }
 355 
 356 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
 357   if ( _bps.find(bp) != -1) {
 358      return JVMTI_ERROR_DUPLICATE;
 359   }
 360   VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
 361   VMThread::execute(&set_breakpoint);
 362   return JVMTI_ERROR_NONE;
 363 }
 364 
 365 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
 366   if ( _bps.find(bp) == -1) {
 367      return JVMTI_ERROR_NOT_FOUND;
 368   }
 369 
 370   VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
 371   VMThread::execute(&clear_breakpoint);
 372   return JVMTI_ERROR_NONE;
 373 }
 374 
 375 void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
 376   bool changed = true;
 377   // We are going to run thru the list of bkpts
 378   // and delete some.  This deletion probably alters
 379   // the list in some implementation defined way such
 380   // that when we delete entry i, the next entry might
 381   // no longer be at i+1.  To be safe, each time we delete
 382   // an entry, we'll just start again from the beginning.
 383   // We'll stop when we make a pass thru the whole list without
 384   // deleting anything.
 385   while (changed) {
 386     int len = _bps.length();
 387     changed = false;
 388     for (int i = 0; i < len; i++) {
 389       JvmtiBreakpoint& bp = _bps.at(i);
 390       if (bp.method()->method_holder() == klass) {
 391         bp.clear();
 392         _bps.remove(i);
 393         // This changed 'i' so we have to start over.
 394         changed = true;
 395         break;
 396       }
 397     }
 398   }
 399 }
 400 
 401 //
 402 // class JvmtiCurrentBreakpoints
 403 //
 404 
 405 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = nullptr;
 406 address *         JvmtiCurrentBreakpoints::_breakpoint_list    = nullptr;
 407 
 408 
 409 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
 410   if (_jvmti_breakpoints != nullptr) return (*_jvmti_breakpoints);
 411   _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
 412   assert(_jvmti_breakpoints != nullptr, "_jvmti_breakpoints != nullptr");
 413   return (*_jvmti_breakpoints);
 414 }
 415 
 416 void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
 417   JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
 418   assert(this_jvmti != nullptr, "this_jvmti != nullptr");
 419 
 420   debug_only(int n = this_jvmti->length(););
 421   assert(cache[n] == nullptr, "cache must be null terminated");
 422 
 423   set_breakpoint_list(cache);
 424 }
 425 
 426 ///////////////////////////////////////////////////////////////
 427 //
 428 // class VM_BaseGetOrSetLocal
 429 //
 430 
 431 const jvalue VM_BaseGetOrSetLocal::_DEFAULT_VALUE = {0L};
 432 // Constructor for non-object getter
 433 
 434 VM_BaseGetOrSetLocal::VM_BaseGetOrSetLocal(JavaThread* calling_thread, jint depth,
 435                                            jint index, BasicType type, jvalue value, bool set, bool self)
 436   : _calling_thread(calling_thread)
 437   , _depth(depth)
 438   , _index(index)
 439   , _type(type)
 440   , _value(value)
 441   , _jvf(nullptr)
 442   , _set(set)
 443   , _self(self)
 444   , _result(JVMTI_ERROR_NONE)
 445 {
 446 }
 447 
 448 // Check that the klass is assignable to a type with the given signature.
 449 // Another solution could be to use the function Klass::is_subtype_of(type).
 450 // But the type class can be forced to load/initialize eagerly in such a case.
 451 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
 452 // It is better to avoid such a behavior.
 453 bool VM_BaseGetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
 454   assert(ty_sign != nullptr, "type signature must not be null");
 455   assert(thread != nullptr, "thread must not be null");
 456   assert(klass != nullptr, "klass must not be null");
 457 
 458   int len = (int) strlen(ty_sign);
 459   if ((ty_sign[0] == JVM_SIGNATURE_CLASS ||
 460        ty_sign[0] == JVM_SIGNATURE_PRIMITIVE_OBJECT) &&
 461       ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
 462     ty_sign++;
 463     len -= 2;
 464   }
 465   TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len);
 466   if (klass->name() == ty_sym) {
 467     return true;
 468   }
 469   // Compare primary supers
 470   int super_depth = klass->super_depth();
 471   int idx;
 472   for (idx = 0; idx < super_depth; idx++) {
 473     if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
 474       return true;
 475     }
 476   }
 477   // Compare secondary supers
 478   const Array<Klass*>* sec_supers = klass->secondary_supers();
 479   for (idx = 0; idx < sec_supers->length(); idx++) {
 480     if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
 481       return true;
 482     }
 483   }
 484   return false;
 485 }
 486 
 487 // Checks error conditions:
 488 //   JVMTI_ERROR_INVALID_SLOT
 489 //   JVMTI_ERROR_TYPE_MISMATCH
 490 // Returns: 'true' - everything is Ok, 'false' - error code
 491 
 492 bool VM_BaseGetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
 493   Method* method = jvf->method();
 494   if (!method->has_localvariable_table()) {
 495     // Just to check index boundaries.
 496     jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
 497     if (_index < 0 || _index + extra_slot >= method->max_locals()) {
 498       _result = JVMTI_ERROR_INVALID_SLOT;
 499       return false;
 500     }
 501     return true;
 502   }
 503 
 504   jint num_entries = method->localvariable_table_length();
 505   if (num_entries == 0) {
 506     _result = JVMTI_ERROR_INVALID_SLOT;
 507     return false;       // There are no slots
 508   }
 509   int signature_idx = -1;
 510   int vf_bci = jvf->bci();
 511   LocalVariableTableElement* table = method->localvariable_table_start();
 512   for (int i = 0; i < num_entries; i++) {
 513     int start_bci = table[i].start_bci;
 514     int end_bci = start_bci + table[i].length;
 515 
 516     // Here we assume that locations of LVT entries
 517     // with the same slot number cannot be overlapped
 518     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
 519       signature_idx = (int) table[i].descriptor_cp_index;
 520       break;
 521     }
 522   }
 523   if (signature_idx == -1) {
 524     _result = JVMTI_ERROR_INVALID_SLOT;
 525     return false;       // Incorrect slot index
 526   }
 527   Symbol*   sign_sym  = method->constants()->symbol_at(signature_idx);
 528   BasicType slot_type = Signature::basic_type(sign_sym);
 529 
 530   switch (slot_type) {
 531   case T_BYTE:
 532   case T_SHORT:
 533   case T_CHAR:
 534   case T_BOOLEAN:
 535     slot_type = T_INT;
 536     break;
 537   case T_ARRAY:
 538   case T_PRIMITIVE_OBJECT:
 539     slot_type = T_OBJECT;
 540     break;
 541   default:
 542     break;
 543   };
 544   if (_type != slot_type) {
 545     _result = JVMTI_ERROR_TYPE_MISMATCH;
 546     return false;
 547   }
 548 
 549   jobject jobj = _value.l;
 550   if (_set && slot_type == T_OBJECT && jobj != nullptr) { // null reference is allowed
 551     // Check that the jobject class matches the return type signature.
 552     oop obj = JNIHandles::resolve_external_guard(jobj);
 553     NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 554     Klass* ob_k = obj->klass();
 555     NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 556 
 557     const char* signature = (const char *) sign_sym->as_utf8();
 558     if (!is_assignable(signature, ob_k, VMThread::vm_thread())) {
 559       _result = JVMTI_ERROR_TYPE_MISMATCH;
 560       return false;
 561     }
 562   }
 563   return true;
 564 }
 565 
 566 bool VM_BaseGetOrSetLocal::check_slot_type_no_lvt(javaVFrame* jvf) {
 567   Method* method = jvf->method();
 568   jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
 569 
 570   if (_index < 0 || _index + extra_slot >= method->max_locals()) {
 571     _result = JVMTI_ERROR_INVALID_SLOT;
 572     return false;
 573   }
 574   StackValueCollection *locals = _jvf->locals();
 575   BasicType slot_type = locals->at(_index)->type();
 576 
 577   if (slot_type == T_CONFLICT) {
 578     _result = JVMTI_ERROR_INVALID_SLOT;
 579     return false;
 580   }
 581   if (extra_slot) {
 582     BasicType extra_slot_type = locals->at(_index + 1)->type();
 583     if (extra_slot_type != T_INT) {
 584       _result = JVMTI_ERROR_INVALID_SLOT;
 585       return false;
 586     }
 587   }
 588   if (_type != slot_type && (_type == T_OBJECT || slot_type != T_INT)) {
 589     _result = JVMTI_ERROR_TYPE_MISMATCH;
 590     return false;
 591   }
 592   return true;
 593 }
 594 
 595 static bool can_be_deoptimized(vframe* vf) {
 596   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
 597 }
 598 
 599 bool VM_GetOrSetLocal::doit_prologue() {
 600   if (!_eb.deoptimize_objects(_depth, _depth)) {
 601     // The target frame is affected by a reallocation failure.
 602     _result = JVMTI_ERROR_OUT_OF_MEMORY;
 603     return false;
 604   }
 605 
 606   return true;
 607 }
 608 
 609 void VM_BaseGetOrSetLocal::doit() {
 610   _jvf = get_java_vframe();
 611   if (_jvf == nullptr) {
 612     return;
 613   };
 614 
 615   frame fr = _jvf->fr();
 616   if (_set && _depth != 0 && Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
 617     _result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations
 618     return;
 619   }
 620 
 621   Method* method = _jvf->method();
 622   if (getting_receiver()) {
 623     if (method->is_static()) {
 624       _result = JVMTI_ERROR_INVALID_SLOT;
 625       return;
 626     }
 627   } else {
 628     if (method->is_native()) {
 629       _result = JVMTI_ERROR_OPAQUE_FRAME;
 630       return;
 631     }
 632 
 633     if (!check_slot_type_no_lvt(_jvf)) {
 634       return;
 635     }
 636     if (method->has_localvariable_table() &&
 637         !check_slot_type_lvt(_jvf)) {
 638       return;
 639     }
 640   }
 641 
 642   InterpreterOopMap oop_mask;
 643   _jvf->method()->mask_for(_jvf->bci(), &oop_mask);
 644   if (oop_mask.is_dead(_index)) {
 645     // The local can be invalid and uninitialized in the scope of current bci
 646     _result = JVMTI_ERROR_INVALID_SLOT;
 647     return;
 648   }
 649   if (_set) {
 650     if (fr.is_heap_frame()) { // we want this check after the check for JVMTI_ERROR_INVALID_SLOT
 651       assert(Continuation::is_frame_in_continuation(_jvf->thread(), fr), "sanity check");
 652       // If the topmost frame is a heap frame, then it hasn't been thawed. This can happen
 653       // if we are executing at a return barrier safepoint. The callee frame has been popped,
 654       // but the caller frame has not been thawed. We can't support a JVMTI SetLocal in the callee
 655       // frame at this point, because we aren't truly in the callee yet.
 656       // fr.is_heap_frame() is impossible if a continuation is at a single step or breakpoint.
 657       _result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations
 658       return;
 659     }
 660 
 661     // Force deoptimization of frame if compiled because it's
 662     // possible the compiler emitted some locals as constant values,
 663     // meaning they are not mutable.
 664     if (can_be_deoptimized(_jvf)) {
 665       // Continuation can't be unmounted at this point (it was checked/reported in get_java_vframe).
 666       if (Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
 667         _result = JVMTI_ERROR_OPAQUE_FRAME; // can't deoptimize for top continuation frame
 668         return;
 669       }
 670 
 671       // Schedule deoptimization so that eventually the local
 672       // update will be written to an interpreter frame.
 673       Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
 674 
 675       // Now store a new value for the local which will be applied
 676       // once deoptimization occurs. Note however that while this
 677       // write is deferred until deoptimization actually happens
 678       // can vframe created after this point will have its locals
 679       // reflecting this update so as far as anyone can see the
 680       // write has already taken place.
 681 
 682       // If we are updating an oop then get the oop from the handle
 683       // since the handle will be long gone by the time the deopt
 684       // happens. The oop stored in the deferred local will be
 685       // gc'd on its own.
 686       if (_type == T_OBJECT || _type == T_PRIMITIVE_OBJECT) {
 687         _value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
 688       }
 689       // Re-read the vframe so we can see that it is deoptimized
 690       // [ Only need because of assert in update_local() ]
 691       _jvf = get_java_vframe();
 692       ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
 693       return;
 694     }
 695     StackValueCollection *locals = _jvf->locals();
 696     Thread* current_thread = VMThread::vm_thread();
 697     HandleMark hm(current_thread);
 698 
 699     switch (_type) {
 700       case T_INT:    locals->set_int_at   (_index, _value.i); break;
 701       case T_LONG:   locals->set_long_at  (_index, _value.j); break;
 702       case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
 703       case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
 704       case T_OBJECT:
 705       case T_PRIMITIVE_OBJECT: {
 706         Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l));
 707         locals->set_obj_at (_index, ob_h);
 708         break;
 709       }
 710       default: ShouldNotReachHere();
 711     }
 712     _jvf->set_locals(locals);
 713   } else {
 714     if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
 715       assert(getting_receiver(), "Can only get here when getting receiver");
 716       oop receiver = _jvf->fr().get_native_receiver();
 717       _value.l = JNIHandles::make_local(_calling_thread, receiver);
 718     } else {
 719       StackValueCollection *locals = _jvf->locals();
 720 
 721       switch (_type) {
 722         case T_INT:    _value.i = locals->int_at   (_index);   break;
 723         case T_LONG:   _value.j = locals->long_at  (_index);   break;
 724         case T_FLOAT:  _value.f = locals->float_at (_index);   break;
 725         case T_DOUBLE: _value.d = locals->double_at(_index);   break;
 726         case T_OBJECT:
 727         case T_PRIMITIVE_OBJECT: {
 728           // Wrap the oop to be returned in a local JNI handle since
 729           // oops_do() no longer applies after doit() is finished.
 730           oop obj = locals->obj_at(_index)();
 731           _value.l = JNIHandles::make_local(_calling_thread, obj);
 732           break;
 733         }
 734         default: ShouldNotReachHere();
 735       }
 736     }
 737   }
 738 }
 739 
 740 bool VM_BaseGetOrSetLocal::allow_nested_vm_operations() const {
 741   return true; // May need to deoptimize
 742 }
 743 
 744 
 745 ///////////////////////////////////////////////////////////////
 746 //
 747 // class VM_GetOrSetLocal
 748 //
 749 
 750 // Constructor for non-object getter
 751 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, bool self)
 752   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, _DEFAULT_VALUE, false, self),
 753     _thread(thread),
 754     _eb(false, nullptr, nullptr)
 755 {
 756 }
 757 
 758 // Constructor for object or non-object setter
 759 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value, bool self)
 760   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, value, true, self),
 761     _thread(thread),
 762     _eb(type == T_OBJECT, JavaThread::current(), thread)
 763 {
 764 }
 765 
 766 // Constructor for object getter
 767 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index, bool self)
 768   : VM_BaseGetOrSetLocal(calling_thread, depth, index, T_OBJECT, _DEFAULT_VALUE, false, self),
 769     _thread(thread),
 770     _eb(true, calling_thread, thread)
 771 {
 772 }
 773 
 774 vframe *VM_GetOrSetLocal::get_vframe() {
 775   if (!_thread->has_last_Java_frame()) {
 776     return nullptr;
 777   }
 778   RegisterMap reg_map(_thread,
 779                       RegisterMap::UpdateMap::include,
 780                       RegisterMap::ProcessFrames::include,
 781                       RegisterMap::WalkContinuation::include);
 782   vframe *vf = JvmtiEnvBase::get_cthread_last_java_vframe(_thread, &reg_map);
 783   int d = 0;
 784   while ((vf != nullptr) && (d < _depth)) {
 785     vf = vf->java_sender();
 786     d++;
 787   }
 788   return vf;
 789 }
 790 
 791 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
 792   vframe* vf = get_vframe();
 793   if (!(_self || _thread->is_carrier_thread_suspended())) {
 794     _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
 795     return nullptr;
 796   }
 797   if (vf == nullptr) {
 798     _result = JVMTI_ERROR_NO_MORE_FRAMES;
 799     return nullptr;
 800   }
 801   javaVFrame *jvf = (javaVFrame*)vf;
 802 
 803   if (!vf->is_java_frame()) {
 804     _result = JVMTI_ERROR_OPAQUE_FRAME;
 805     return nullptr;
 806   }
 807   return jvf;
 808 }
 809 
 810 VM_GetReceiver::VM_GetReceiver(
 811     JavaThread* thread, JavaThread* caller_thread, jint depth, bool self)
 812     : VM_GetOrSetLocal(thread, caller_thread, depth, 0, self) {}
 813 
 814 
 815 ///////////////////////////////////////////////////////////////
 816 //
 817 // class VM_VirtualThreadGetOrSetLocal
 818 //
 819 
 820 // Constructor for non-object getter
 821 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, jint depth,
 822                                                              jint index, BasicType type, bool self)
 823   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, _DEFAULT_VALUE, false, self)
 824 {
 825   _env = env;
 826   _vthread_h = vthread_h;
 827 }
 828 
 829 // Constructor for object or non-object setter
 830 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, jint depth,
 831                                                              jint index, BasicType type, jvalue value, bool self)
 832   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, value, true, self)
 833 {
 834   _env = env;
 835   _vthread_h = vthread_h;
 836 }
 837 
 838 // Constructor for object getter
 839 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, JavaThread* calling_thread,
 840                                                              jint depth, int index, bool self)
 841   : VM_BaseGetOrSetLocal(calling_thread, depth, index, T_OBJECT, _DEFAULT_VALUE, false, self)
 842 {
 843   _env = env;
 844   _vthread_h = vthread_h;
 845 }
 846 
 847 javaVFrame *VM_VirtualThreadGetOrSetLocal::get_java_vframe() {
 848   Thread* cur_thread = Thread::current();
 849   oop cont = java_lang_VirtualThread::continuation(_vthread_h());
 850   assert(cont != nullptr, "vthread contintuation must not be null");
 851 
 852   javaVFrame* jvf = nullptr;
 853   JavaThread* java_thread = JvmtiEnvBase::get_JavaThread_or_null(_vthread_h());
 854   bool is_cont_mounted = (java_thread != nullptr);
 855 
 856   if (!(_self || JvmtiVTSuspender::is_vthread_suspended(_vthread_h()))) {
 857     _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
 858     return nullptr;
 859   }
 860 
 861   if (is_cont_mounted) {
 862     vframeStream vfs(java_thread);
 863 
 864     if (!vfs.at_end()) {
 865       jvf = vfs.asJavaVFrame();
 866       jvf = JvmtiEnvBase::check_and_skip_hidden_frames(java_thread, jvf);
 867     }
 868   } else {
 869     vframeStream vfs(cont);
 870 
 871     if (!vfs.at_end()) {
 872       jvf = vfs.asJavaVFrame();
 873       jvf = JvmtiEnvBase::check_and_skip_hidden_frames(_vthread_h(), jvf);
 874     }
 875   }
 876   int d = 0;
 877   while ((jvf != nullptr) && (d < _depth)) {
 878     jvf = jvf->java_sender();
 879     d++;
 880   }
 881 
 882   if (d < _depth || jvf == nullptr) {
 883     _result = JVMTI_ERROR_NO_MORE_FRAMES;
 884     return nullptr;
 885   }
 886 
 887   if ((_set && !is_cont_mounted) || !jvf->is_java_frame()) {
 888     _result = JVMTI_ERROR_OPAQUE_FRAME;
 889     return nullptr;
 890   }
 891   return jvf;
 892 }
 893 
 894 VM_VirtualThreadGetReceiver::VM_VirtualThreadGetReceiver(
 895     JvmtiEnv* env, Handle vthread_h, JavaThread* caller_thread, jint depth, bool self)
 896     : VM_VirtualThreadGetOrSetLocal(env, vthread_h, caller_thread, depth, 0, self) {}
 897 
 898 
 899 /////////////////////////////////////////////////////////////////////////////////////////
 900 //
 901 // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
 902 //
 903 
 904 bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
 905   return java_thread->java_suspend();
 906 }
 907 
 908 bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
 909   return java_thread->java_resume();
 910 }
 911 
 912 void JvmtiSuspendControl::print() {
 913 #ifndef PRODUCT
 914   ResourceMark rm;
 915   LogStreamHandle(Trace, jvmti) log_stream;
 916   log_stream.print("Suspended Threads: [");
 917   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
 918 #ifdef JVMTI_TRACE
 919     const char *name   = JvmtiTrace::safe_get_thread_name(thread);
 920 #else
 921     const char *name   = "";
 922 #endif /*JVMTI_TRACE */
 923     log_stream.print("%s(%c ", name, thread->is_suspended() ? 'S' : '_');
 924     if (!thread->has_last_Java_frame()) {
 925       log_stream.print("no stack");
 926     }
 927     log_stream.print(") ");
 928   }
 929   log_stream.print_cr("]");
 930 #endif
 931 }
 932 
 933 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
 934     nmethod* nm) {
 935   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
 936   event._event_data.compiled_method_load = nm;
 937   return event;
 938 }
 939 
 940 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
 941     jmethodID id, const void* code) {
 942   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
 943   event._event_data.compiled_method_unload.method_id = id;
 944   event._event_data.compiled_method_unload.code_begin = code;
 945   return event;
 946 }
 947 
 948 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
 949       const char* name, const void* code_begin, const void* code_end) {
 950   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
 951   // Need to make a copy of the name since we don't know how long
 952   // the event poster will keep it around after we enqueue the
 953   // deferred event and return. strdup() failure is handled in
 954   // the post() routine below.
 955   event._event_data.dynamic_code_generated.name = os::strdup(name);
 956   event._event_data.dynamic_code_generated.code_begin = code_begin;
 957   event._event_data.dynamic_code_generated.code_end = code_end;
 958   return event;
 959 }
 960 
 961 JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(const char* name) {
 962   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD);
 963   // Need to make a copy of the name since we don't know how long
 964   // the event poster will keep it around after we enqueue the
 965   // deferred event and return. strdup() failure is handled in
 966   // the post() routine below.
 967   event._event_data.class_unload.name = os::strdup(name);
 968   return event;
 969 }
 970 
 971 void JvmtiDeferredEvent::post() {
 972   assert(Thread::current()->is_service_thread(),
 973          "Service thread must post enqueued events");
 974   switch(_type) {
 975     case TYPE_COMPILED_METHOD_LOAD: {
 976       nmethod* nm = _event_data.compiled_method_load;
 977       JvmtiExport::post_compiled_method_load(nm);
 978       break;
 979     }
 980     case TYPE_COMPILED_METHOD_UNLOAD: {
 981       JvmtiExport::post_compiled_method_unload(
 982         _event_data.compiled_method_unload.method_id,
 983         _event_data.compiled_method_unload.code_begin);
 984       break;
 985     }
 986     case TYPE_DYNAMIC_CODE_GENERATED: {
 987       JvmtiExport::post_dynamic_code_generated_internal(
 988         // if strdup failed give the event a default name
 989         (_event_data.dynamic_code_generated.name == nullptr)
 990           ? "unknown_code" : _event_data.dynamic_code_generated.name,
 991         _event_data.dynamic_code_generated.code_begin,
 992         _event_data.dynamic_code_generated.code_end);
 993       if (_event_data.dynamic_code_generated.name != nullptr) {
 994         // release our copy
 995         os::free((void *)_event_data.dynamic_code_generated.name);
 996       }
 997       break;
 998     }
 999     case TYPE_CLASS_UNLOAD: {
1000       JvmtiExport::post_class_unload_internal(
1001         // if strdup failed give the event a default name
1002         (_event_data.class_unload.name == nullptr)
1003           ? "unknown_class" : _event_data.class_unload.name);
1004       if (_event_data.class_unload.name != nullptr) {
1005         // release our copy
1006         os::free((void *)_event_data.class_unload.name);
1007       }
1008       break;
1009     }
1010     default:
1011       ShouldNotReachHere();
1012   }
1013 }
1014 
1015 void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
1016   assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
1017   nmethod* nm = _event_data.compiled_method_load;
1018   JvmtiExport::post_compiled_method_load(env, nm);
1019 }
1020 
1021 void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
1022   if (_type == TYPE_COMPILED_METHOD_LOAD) {
1023     _event_data.compiled_method_load->run_nmethod_entry_barrier();
1024   }
1025 }
1026 
1027 
1028 // Keep the nmethod for compiled_method_load from being unloaded.
1029 void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) {
1030   if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
1031     cf->do_code_blob(_event_data.compiled_method_load);
1032   }
1033 }
1034 
1035 // The GC calls this and marks the nmethods here on the stack so that
1036 // they cannot be unloaded while in the queue.
1037 void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) {
1038   if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
1039     cf->do_code_blob(_event_data.compiled_method_load);
1040   }
1041 }
1042 
1043 
1044 bool JvmtiDeferredEventQueue::has_events() {
1045   // We save the queued events before the live phase and post them when it starts.
1046   // This code could skip saving the events on the queue before the live
1047   // phase and ignore them, but this would change how we do things now.
1048   // Starting the service thread earlier causes this to be called before the live phase begins.
1049   // The events on the queue should all be posted after the live phase so this is an
1050   // ok check.  Before the live phase, DynamicCodeGenerated events are posted directly.
1051   // If we add other types of events to the deferred queue, this could get ugly.
1052   return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE  && _queue_head != nullptr;
1053 }
1054 
1055 void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
1056   // Events get added to the end of the queue (and are pulled off the front).
1057   QueueNode* node = new QueueNode(event);
1058   if (_queue_tail == nullptr) {
1059     _queue_tail = _queue_head = node;
1060   } else {
1061     assert(_queue_tail->next() == nullptr, "Must be the last element in the list");
1062     _queue_tail->set_next(node);
1063     _queue_tail = node;
1064   }
1065 
1066   assert((_queue_head == nullptr) == (_queue_tail == nullptr),
1067          "Inconsistent queue markers");
1068 }
1069 
1070 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
1071   assert(_queue_head != nullptr, "Nothing to dequeue");
1072 
1073   if (_queue_head == nullptr) {
1074     // Just in case this happens in product; it shouldn't but let's not crash
1075     return JvmtiDeferredEvent();
1076   }
1077 
1078   QueueNode* node = _queue_head;
1079   _queue_head = _queue_head->next();
1080   if (_queue_head == nullptr) {
1081     _queue_tail = nullptr;
1082   }
1083 
1084   assert((_queue_head == nullptr) == (_queue_tail == nullptr),
1085          "Inconsistent queue markers");
1086 
1087   JvmtiDeferredEvent event = node->event();
1088   delete node;
1089   return event;
1090 }
1091 
1092 void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
1093   // Post events while nmethods are still in the queue and can't be unloaded.
1094   while (_queue_head != nullptr) {
1095     _queue_head->event().post_compiled_method_load_event(env);
1096     dequeue();
1097   }
1098 }
1099 
1100 void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
1101   for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
1102      node->event().run_nmethod_entry_barriers();
1103   }
1104 }
1105 
1106 
1107 void JvmtiDeferredEventQueue::oops_do(OopClosure* f, CodeBlobClosure* cf) {
1108   for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
1109      node->event().oops_do(f, cf);
1110   }
1111 }
1112 
1113 void JvmtiDeferredEventQueue::nmethods_do(CodeBlobClosure* cf) {
1114   for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
1115      node->event().nmethods_do(cf);
1116   }
1117 }