1 /*
  2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/javaClasses.hpp"
 26 #include "classfile/symbolTable.hpp"
 27 #include "code/nmethod.hpp"
 28 #include "interpreter/interpreter.hpp"
 29 #include "interpreter/oopMapCache.hpp"
 30 #include "jvmtifiles/jvmtiEnv.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logStream.hpp"
 33 #include "memory/allocation.inline.hpp"
 34 #include "memory/resourceArea.hpp"
 35 #include "oops/instanceKlass.hpp"
 36 #include "oops/klass.inline.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "oops/oopHandle.inline.hpp"
 39 #include "prims/jvmtiAgentThread.hpp"
 40 #include "prims/jvmtiEventController.inline.hpp"
 41 #include "prims/jvmtiImpl.hpp"
 42 #include "prims/jvmtiRedefineClasses.hpp"
 43 #include "runtime/atomicAccess.hpp"
 44 #include "runtime/continuation.hpp"
 45 #include "runtime/deoptimization.hpp"
 46 #include "runtime/frame.inline.hpp"
 47 #include "runtime/handles.inline.hpp"
 48 #include "runtime/interfaceSupport.inline.hpp"
 49 #include "runtime/javaCalls.hpp"
 50 #include "runtime/javaThread.hpp"
 51 #include "runtime/jniHandles.hpp"
 52 #include "runtime/os.hpp"
 53 #include "runtime/serviceThread.hpp"
 54 #include "runtime/signature.hpp"
 55 #include "runtime/threadSMR.hpp"
 56 #include "runtime/vframe.inline.hpp"
 57 #include "runtime/vframe_hp.hpp"
 58 #include "runtime/vmOperations.hpp"
 59 #include "utilities/exceptions.hpp"
 60 
 61 //
 62 // class JvmtiAgentThread
 63 //
 64 // JavaThread used to wrap a thread started by an agent
 65 // using the JVMTI method RunAgentThread.
 66 //
 67 
 68 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
 69     : JavaThread(start_function_wrapper) {
 70     _env = env;
 71     _start_fn = start_fn;
 72     _start_arg = start_arg;
 73 }
 74 
 75 void
 76 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
 77     // It is expected that any Agent threads will be created as
 78     // Java Threads.  If this is the case, notification of the creation
 79     // of the thread is given in JavaThread::thread_main().
 80     assert(thread == JavaThread::current(), "sanity check");
 81 
 82     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
 83     dthread->call_start_function();
 84 }
 85 
 86 void
 87 JvmtiAgentThread::call_start_function() {
 88     ThreadToNativeFromVM transition(this);
 89     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
 90 }
 91 
 92 //
 93 // class JvmtiBreakpoint
 94 //
 95 
 96 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
 97     : _method(m_method), _bci((int)location) {
 98   assert(_method != nullptr, "No method for breakpoint.");
 99   assert(_bci >= 0, "Negative bci for breakpoint.");
100   oop class_holder_oop = _method->method_holder()->klass_holder();
101   _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), class_holder_oop);
102 }
103 
104 JvmtiBreakpoint::JvmtiBreakpoint(const JvmtiBreakpoint& bp)
105     : _method(bp._method), _bci(bp._bci) {
106   _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), bp._class_holder.resolve());
107 }
108 
109 JvmtiBreakpoint::~JvmtiBreakpoint() {
110   _class_holder.release(JvmtiExport::jvmti_oop_storage());
111 }
112 
113 bool JvmtiBreakpoint::equals(const JvmtiBreakpoint& bp) const {
114   return _method   == bp._method
115     &&   _bci      == bp._bci;
116 }
117 
118 address JvmtiBreakpoint::getBcp() const {
119   return _method->bcp_from(_bci);
120 }
121 
122 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
123   assert(!_method->is_old(), "the breakpoint method shouldn't be old");
124   ((Method*)_method->*meth_act)(_bci);
125 
126   // add/remove breakpoint to/from versions of the method that are EMCP.
127   Thread *thread = Thread::current();
128   InstanceKlass* ik = _method->method_holder();
129   Symbol* m_name = _method->name();
130   Symbol* m_signature = _method->signature();
131 
132   // search previous versions if they exist
133   for (InstanceKlass* pv_node = ik->previous_versions();
134        pv_node != nullptr;
135        pv_node = pv_node->previous_versions()) {
136     Array<Method*>* methods = pv_node->methods();
137 
138     for (int i = methods->length() - 1; i >= 0; i--) {
139       Method* method = methods->at(i);
140       // Only set breakpoints in EMCP methods.
141       // EMCP methods are old but not obsolete. Equivalent
142       // Modulo Constant Pool means the method is equivalent except
143       // the constant pool and instructions that access the constant
144       // pool might be different.
145       // If a breakpoint is set in a redefined method, its EMCP methods
146       // must have a breakpoint also.
147       // None of the methods are deleted until none are running.
148       // This code could set a breakpoint in a method that
149       // is never reached, but this won't be noticeable to the programmer.
150       if (!method->is_obsolete() &&
151           method->name() == m_name &&
152           method->signature() == m_signature) {
153         ResourceMark rm;
154         log_debug(redefine, class, breakpoint)
155           ("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
156            method->name()->as_C_string(), method->signature()->as_C_string());
157         (method->*meth_act)(_bci);
158         break;
159       }
160     }
161   }
162 }
163 
164 void JvmtiBreakpoint::set() {
165   each_method_version_do(&Method::set_breakpoint);
166 }
167 
168 void JvmtiBreakpoint::clear() {
169   each_method_version_do(&Method::clear_breakpoint);
170 }
171 
172 void JvmtiBreakpoint::print_on(outputStream* out) const {
173 #ifndef PRODUCT
174   ResourceMark rm;
175   const char *class_name  = (_method == nullptr) ? "null" : _method->klass_name()->as_C_string();
176   const char *method_name = (_method == nullptr) ? "null" : _method->name()->as_C_string();
177   out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
178 #endif
179 }
180 
181 
182 //
183 // class VM_ChangeBreakpoints
184 //
185 // Modify the Breakpoints data structure at a safepoint
186 //
187 // The caller of VM_ChangeBreakpoints operation should ensure that
188 // _bp.method is preserved until VM_ChangeBreakpoints is processed.
189 
190 void VM_ChangeBreakpoints::doit() {
191   if (_bp->method()->is_old()) {
192     // The bp->_method became old because VMOp with class redefinition happened for this class
193     // after JvmtiBreakpoint was created but before JVM_ChangeBreakpoints started.
194     // All class breakpoints are cleared during redefinition, so don't set/clear this breakpoint.
195    return;
196   }
197   switch (_operation) {
198   case SET_BREAKPOINT:
199     _breakpoints->set_at_safepoint(*_bp);
200     break;
201   case CLEAR_BREAKPOINT:
202     _breakpoints->clear_at_safepoint(*_bp);
203     break;
204   default:
205     assert(false, "Unknown operation");
206   }
207 }
208 
209 //
210 // class JvmtiBreakpoints
211 //
212 // a JVMTI internal collection of JvmtiBreakpoint
213 //
214 
215 JvmtiBreakpoints::JvmtiBreakpoints()
216     : _elements(5, mtServiceability) {
217 }
218 
219 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
220 
221 void JvmtiBreakpoints::print() {
222 #ifndef PRODUCT
223   LogTarget(Trace, jvmti) log;
224   LogStream log_stream(log);
225 
226   int n = length();
227   for (int i = 0; i < n; i++) {
228     JvmtiBreakpoint& bp = at(i);
229     log_stream.print("%d: ", i);
230     bp.print_on(&log_stream);
231     log_stream.cr();
232   }
233 #endif
234 }
235 
236 
237 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
238   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
239 
240   int i = find(bp);
241   if (i == -1) {
242     append(bp);
243     bp.set();
244   }
245 }
246 
247 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
248   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
249 
250   int i = find(bp);
251   if (i != -1) {
252     remove(i);
253     bp.clear();
254   }
255 }
256 
257 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
258   if (find(bp) != -1) {
259     return JVMTI_ERROR_DUPLICATE;
260   }
261 
262   // Ensure that bp._method is not deallocated before VM_ChangeBreakpoints::doit().
263   methodHandle mh(Thread::current(), bp.method());
264   VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
265   VMThread::execute(&set_breakpoint);
266   return JVMTI_ERROR_NONE;
267 }
268 
269 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
270   if (find(bp) == -1) {
271     return JVMTI_ERROR_NOT_FOUND;
272   }
273 
274   // Ensure that bp._method is not deallocated before VM_ChangeBreakpoints::doit().
275   methodHandle mh(Thread::current(), bp.method());
276   VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
277   VMThread::execute(&clear_breakpoint);
278   return JVMTI_ERROR_NONE;
279 }
280 
281 void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
282   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
283 
284   // Go backwards because this removes entries that are freed.
285   for (int i = length() - 1; i >= 0; i--) {
286     JvmtiBreakpoint& bp = at(i);
287     if (bp.method()->method_holder() == klass) {
288       bp.clear();
289       remove(i);
290     }
291   }
292 }
293 
294 //
295 // class JvmtiCurrentBreakpoints
296 //
297 
298 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = nullptr;
299 
300 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
301   if (_jvmti_breakpoints == nullptr) {
302     JvmtiBreakpoints* breakpoints = new JvmtiBreakpoints();
303     if (!AtomicAccess::replace_if_null(&_jvmti_breakpoints, breakpoints)) {
304       // already created concurently
305       delete breakpoints;
306     }
307   }
308   return (*_jvmti_breakpoints);
309 }
310 
311 
312 ///////////////////////////////////////////////////////////////
313 //
314 // class VM_BaseGetOrSetLocal
315 //
316 
317 const jvalue VM_BaseGetOrSetLocal::_DEFAULT_VALUE = {0L};
318 // Constructor for non-object getter
319 
320 VM_BaseGetOrSetLocal::VM_BaseGetOrSetLocal(JavaThread* calling_thread, jint depth,
321                                            jint index, BasicType type, jvalue value, bool set, bool self)
322   : _calling_thread(calling_thread)
323   , _depth(depth)
324   , _index(index)
325   , _type(type)
326   , _value(value)
327   , _jvf(nullptr)
328   , _set(set)
329   , _self(self)
330   , _result(JVMTI_ERROR_NONE)
331 {
332 }
333 
334 // Check that the klass is assignable to a type with the given signature.
335 // Another solution could be to use the function Klass::is_subtype_of(type).
336 // But the type class can be forced to load/initialize eagerly in such a case.
337 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
338 // It is better to avoid such a behavior.
339 bool VM_BaseGetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
340   assert(ty_sign != nullptr, "type signature must not be null");
341   assert(thread != nullptr, "thread must not be null");
342   assert(klass != nullptr, "klass must not be null");
343 
344   int len = (int) strlen(ty_sign);
345   if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
346       ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
347     ty_sign++;
348     len -= 2;
349   }
350   TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len);
351   if (klass->name() == ty_sym) {
352     return true;
353   }
354   // Compare primary supers
355   int super_depth = klass->super_depth();
356   int idx;
357   for (idx = 0; idx < super_depth; idx++) {
358     if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
359       return true;
360     }
361   }
362   // Compare secondary supers
363   const Array<Klass*>* sec_supers = klass->secondary_supers();
364   for (idx = 0; idx < sec_supers->length(); idx++) {
365     if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
366       return true;
367     }
368   }
369   return false;
370 }
371 
372 // Checks error conditions:
373 //   JVMTI_ERROR_INVALID_SLOT
374 //   JVMTI_ERROR_TYPE_MISMATCH
375 // Returns: 'true' - everything is Ok, 'false' - error code
376 
377 bool VM_BaseGetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
378   Method* method = jvf->method();
379   if (!method->has_localvariable_table()) {
380     // Just to check index boundaries.
381     jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
382     if (_index < 0 || _index + extra_slot >= method->max_locals()) {
383       _result = JVMTI_ERROR_INVALID_SLOT;
384       return false;
385     }
386     return true;
387   }
388 
389   jint num_entries = method->localvariable_table_length();
390   if (num_entries == 0) {
391     _result = JVMTI_ERROR_INVALID_SLOT;
392     return false;       // There are no slots
393   }
394   int signature_idx = -1;
395   int vf_bci = jvf->bci();
396   LocalVariableTableElement* table = method->localvariable_table_start();
397   for (int i = 0; i < num_entries; i++) {
398     int start_bci = table[i].start_bci;
399     int end_bci = start_bci + table[i].length;
400 
401     // Here we assume that locations of LVT entries
402     // with the same slot number cannot be overlapped
403     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
404       signature_idx = (int) table[i].descriptor_cp_index;
405       break;
406     }
407   }
408   if (signature_idx == -1) {
409     _result = JVMTI_ERROR_INVALID_SLOT;
410     return false;       // Incorrect slot index
411   }
412   Symbol*   sign_sym  = method->constants()->symbol_at(signature_idx);
413   BasicType slot_type = Signature::basic_type(sign_sym);
414 
415   switch (slot_type) {
416   case T_BYTE:
417   case T_SHORT:
418   case T_CHAR:
419   case T_BOOLEAN:
420     slot_type = T_INT;
421     break;
422   case T_ARRAY:
423     slot_type = T_OBJECT;
424     break;
425   default:
426     break;
427   };
428   if (_type != slot_type) {
429     _result = JVMTI_ERROR_TYPE_MISMATCH;
430     return false;
431   }
432 
433   jobject jobj = _value.l;
434   if (_set && slot_type == T_OBJECT && jobj != nullptr) { // null reference is allowed
435     // Check that the jobject class matches the return type signature.
436     oop obj = JNIHandles::resolve_external_guard(jobj);
437     NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
438     Klass* ob_k = obj->klass();
439     NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
440 
441     const char* signature = (const char *) sign_sym->as_utf8();
442     if (!is_assignable(signature, ob_k, VMThread::vm_thread())) {
443       _result = JVMTI_ERROR_TYPE_MISMATCH;
444       return false;
445     }
446   }
447   return true;
448 }
449 
450 bool VM_BaseGetOrSetLocal::check_slot_type_no_lvt(javaVFrame* jvf) {
451   Method* method = jvf->method();
452   jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
453 
454   if (_index < 0 || _index + extra_slot >= method->max_locals()) {
455     _result = JVMTI_ERROR_INVALID_SLOT;
456     return false;
457   }
458   StackValueCollection *locals = _jvf->locals();
459   BasicType slot_type = locals->at(_index)->type();
460 
461   if (slot_type == T_CONFLICT) {
462     _result = JVMTI_ERROR_INVALID_SLOT;
463     return false;
464   }
465   if (extra_slot) {
466     BasicType extra_slot_type = locals->at(_index + 1)->type();
467     if (extra_slot_type != T_INT) {
468       _result = JVMTI_ERROR_INVALID_SLOT;
469       return false;
470     }
471   }
472   if (_type != slot_type && (_type == T_OBJECT || slot_type != T_INT)) {
473     _result = JVMTI_ERROR_TYPE_MISMATCH;
474     return false;
475   }
476   return true;
477 }
478 
479 static bool can_be_deoptimized(vframe* vf) {
480   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
481 }
482 
483 bool VM_GetOrSetLocal::doit_prologue() {
484   if (!_eb.deoptimize_objects(_depth, _depth)) {
485     // The target frame is affected by a reallocation failure.
486     _result = JVMTI_ERROR_OUT_OF_MEMORY;
487     return false;
488   }
489 
490   return true;
491 }
492 
493 void VM_BaseGetOrSetLocal::doit() {
494   _jvf = get_java_vframe();
495   if (_jvf == nullptr) {
496     return;
497   };
498 
499   frame fr = _jvf->fr();
500   if (_set && _depth != 0 && Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
501     _result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations
502     return;
503   }
504 
505   Method* method = _jvf->method();
506   if (getting_receiver()) {
507     if (method->is_static()) {
508       _result = JVMTI_ERROR_INVALID_SLOT;
509       return;
510     }
511   } else {
512     if (method->is_native()) {
513       _result = JVMTI_ERROR_OPAQUE_FRAME;
514       return;
515     }
516 
517     if (!check_slot_type_no_lvt(_jvf)) {
518       return;
519     }
520     if (method->has_localvariable_table() &&
521         !check_slot_type_lvt(_jvf)) {
522       return;
523     }
524   }
525 
526   InterpreterOopMap oop_mask;
527   _jvf->method()->mask_for(_jvf->bci(), &oop_mask);
528   if (oop_mask.is_dead(_index)) {
529     // The local can be invalid and uninitialized in the scope of current bci
530     _result = JVMTI_ERROR_INVALID_SLOT;
531     return;
532   }
533   if (_set) {
534     if (fr.is_heap_frame()) { // we want this check after the check for JVMTI_ERROR_INVALID_SLOT
535       assert(Continuation::is_frame_in_continuation(_jvf->thread(), fr), "sanity check");
536       // If the topmost frame is a heap frame, then it hasn't been thawed. This can happen
537       // if we are executing at a return barrier safepoint. The callee frame has been popped,
538       // but the caller frame has not been thawed. We can't support a JVMTI SetLocal in the callee
539       // frame at this point, because we aren't truly in the callee yet.
540       // fr.is_heap_frame() is impossible if a continuation is at a single step or breakpoint.
541       _result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations
542       return;
543     }
544 
545     // Force deoptimization of frame if compiled because it's
546     // possible the compiler emitted some locals as constant values,
547     // meaning they are not mutable.
548     if (can_be_deoptimized(_jvf)) {
549       // Continuation can't be unmounted at this point (it was checked/reported in get_java_vframe).
550       if (Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
551         _result = JVMTI_ERROR_OPAQUE_FRAME; // can't deoptimize for top continuation frame
552         return;
553       }
554 
555       // Schedule deoptimization so that eventually the local
556       // update will be written to an interpreter frame.
557       Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
558 
559       // Now store a new value for the local which will be applied
560       // once deoptimization occurs. Note however that while this
561       // write is deferred until deoptimization actually happens
562       // can vframe created after this point will have its locals
563       // reflecting this update so as far as anyone can see the
564       // write has already taken place.
565 
566       // If we are updating an oop then get the oop from the handle
567       // since the handle will be long gone by the time the deopt
568       // happens. The oop stored in the deferred local will be
569       // gc'd on its own.
570       if (_type == T_OBJECT) {
571         _value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
572       }
573       // Re-read the vframe so we can see that it is deoptimized
574       // [ Only need because of assert in update_local() ]
575       _jvf = get_java_vframe();
576       ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
577       return;
578     }
579     StackValueCollection *locals = _jvf->locals();
580     Thread* current_thread = VMThread::vm_thread();
581     HandleMark hm(current_thread);
582 
583     switch (_type) {
584       case T_INT:    locals->set_int_at   (_index, _value.i); break;
585       case T_LONG:   locals->set_long_at  (_index, _value.j); break;
586       case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
587       case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
588       case T_OBJECT: {
589         Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l));
590         locals->set_obj_at (_index, ob_h);
591         break;
592       }
593       default: ShouldNotReachHere();
594     }
595     _jvf->set_locals(locals);
596   } else {
597     if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
598       assert(getting_receiver(), "Can only get here when getting receiver");
599       oop receiver = _jvf->fr().get_native_receiver();
600       _value.l = JNIHandles::make_local(_calling_thread, receiver);
601     } else {
602       StackValueCollection *locals = _jvf->locals();
603 
604       switch (_type) {
605         case T_INT:    _value.i = locals->int_at   (_index);   break;
606         case T_LONG:   _value.j = locals->long_at  (_index);   break;
607         case T_FLOAT:  _value.f = locals->float_at (_index);   break;
608         case T_DOUBLE: _value.d = locals->double_at(_index);   break;
609         case T_OBJECT: {
610           // Wrap the oop to be returned in a local JNI handle since
611           // oops_do() no longer applies after doit() is finished.
612           oop obj = locals->obj_at(_index)();
613           _value.l = JNIHandles::make_local(_calling_thread, obj);
614           break;
615         }
616         default: ShouldNotReachHere();
617       }
618     }
619   }
620 }
621 
622 bool VM_BaseGetOrSetLocal::allow_nested_vm_operations() const {
623   return true; // May need to deoptimize
624 }
625 
626 
627 ///////////////////////////////////////////////////////////////
628 //
629 // class VM_GetOrSetLocal
630 //
631 
632 // Constructor for non-object getter
633 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, bool self)
634   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, _DEFAULT_VALUE, false, self),
635     _thread(thread),
636     _eb(false, nullptr, nullptr)
637 {
638 }
639 
640 // Constructor for object or non-object setter
641 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value, bool self)
642   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, value, true, self),
643     _thread(thread),
644     _eb(type == T_OBJECT, JavaThread::current(), thread)
645 {
646 }
647 
648 // Constructor for object getter
649 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index, bool self)
650   : VM_BaseGetOrSetLocal(calling_thread, depth, index, T_OBJECT, _DEFAULT_VALUE, false, self),
651     _thread(thread),
652     _eb(true, calling_thread, thread)
653 {
654 }
655 
656 vframe *VM_GetOrSetLocal::get_vframe() {
657   if (!_thread->has_last_Java_frame()) {
658     return nullptr;
659   }
660   RegisterMap reg_map(_thread,
661                       RegisterMap::UpdateMap::include,
662                       RegisterMap::ProcessFrames::include,
663                       RegisterMap::WalkContinuation::include);
664   vframe *vf = JvmtiEnvBase::get_cthread_last_java_vframe(_thread, &reg_map);
665   int d = 0;
666   while ((vf != nullptr) && (d < _depth)) {
667     vf = vf->java_sender();
668     d++;
669   }
670   return vf;
671 }
672 
673 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
674   vframe* vf = get_vframe();
675   if (!_self && !_thread->is_suspended() && !_thread->is_carrier_thread_suspended()) {
676     _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
677     return nullptr;
678   }
679   if (vf == nullptr) {
680     _result = JVMTI_ERROR_NO_MORE_FRAMES;
681     return nullptr;
682   }
683   javaVFrame *jvf = (javaVFrame*)vf;
684 
685   if (!vf->is_java_frame()) {
686     _result = JVMTI_ERROR_OPAQUE_FRAME;
687     return nullptr;
688   }
689   return jvf;
690 }
691 
692 VM_GetReceiver::VM_GetReceiver(
693     JavaThread* thread, JavaThread* caller_thread, jint depth, bool self)
694     : VM_GetOrSetLocal(thread, caller_thread, depth, 0, self) {}
695 
696 
697 ///////////////////////////////////////////////////////////////
698 //
699 // class VM_VirtualThreadGetOrSetLocal
700 //
701 
702 // Constructor for non-object getter
703 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, jint depth,
704                                                              jint index, BasicType type, bool self)
705   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, _DEFAULT_VALUE, false, self)
706 {
707   _env = env;
708   _vthread_h = vthread_h;
709 }
710 
711 // Constructor for object or non-object setter
712 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, jint depth,
713                                                              jint index, BasicType type, jvalue value, bool self)
714   : VM_BaseGetOrSetLocal(nullptr, depth, index, type, value, true, self)
715 {
716   _env = env;
717   _vthread_h = vthread_h;
718 }
719 
720 // Constructor for object getter
721 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, JavaThread* calling_thread,
722                                                              jint depth, int index, bool self)
723   : VM_BaseGetOrSetLocal(calling_thread, depth, index, T_OBJECT, _DEFAULT_VALUE, false, self)
724 {
725   _env = env;
726   _vthread_h = vthread_h;
727 }
728 
729 javaVFrame *VM_VirtualThreadGetOrSetLocal::get_java_vframe() {
730   JavaThread* java_thread = JvmtiEnvBase::get_JavaThread_or_null(_vthread_h());
731   bool is_cont_mounted = (java_thread != nullptr);
732 
733   if (!(_self || JvmtiVTSuspender::is_vthread_suspended(_vthread_h()))) {
734     _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
735     return nullptr;
736   }
737   javaVFrame* jvf = JvmtiEnvBase::get_vthread_jvf(_vthread_h());
738 
739   int d = 0;
740   while ((jvf != nullptr) && (d < _depth)) {
741     jvf = jvf->java_sender();
742     d++;
743   }
744 
745   if (d < _depth || jvf == nullptr) {
746     _result = JVMTI_ERROR_NO_MORE_FRAMES;
747     return nullptr;
748   }
749 
750   if ((_set && !is_cont_mounted) || !jvf->is_java_frame()) {
751     _result = JVMTI_ERROR_OPAQUE_FRAME;
752     return nullptr;
753   }
754   return jvf;
755 }
756 
757 VM_VirtualThreadGetReceiver::VM_VirtualThreadGetReceiver(
758     JvmtiEnv* env, Handle vthread_h, JavaThread* caller_thread, jint depth, bool self)
759     : VM_VirtualThreadGetOrSetLocal(env, vthread_h, caller_thread, depth, 0, self) {}
760 
761 
762 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
763     nmethod* nm) {
764   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
765   event._event_data.compiled_method_load = nm;
766   return event;
767 }
768 
769 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
770     jmethodID id, const void* code) {
771   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
772   event._event_data.compiled_method_unload.method_id = id;
773   event._event_data.compiled_method_unload.code_begin = code;
774   return event;
775 }
776 
777 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
778       const char* name, const void* code_begin, const void* code_end) {
779   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
780   // Need to make a copy of the name since we don't know how long
781   // the event poster will keep it around after we enqueue the
782   // deferred event and return. strdup() failure is handled in
783   // the post() routine below.
784   event._event_data.dynamic_code_generated.name = os::strdup(name);
785   event._event_data.dynamic_code_generated.code_begin = code_begin;
786   event._event_data.dynamic_code_generated.code_end = code_end;
787   return event;
788 }
789 
790 JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(const char* name) {
791   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD);
792   // Need to make a copy of the name since we don't know how long
793   // the event poster will keep it around after we enqueue the
794   // deferred event and return. strdup() failure is handled in
795   // the post() routine below.
796   event._event_data.class_unload.name = os::strdup(name);
797   return event;
798 }
799 
800 void JvmtiDeferredEvent::post() {
801   assert(Thread::current()->is_service_thread(),
802          "Service thread must post enqueued events");
803   switch(_type) {
804     case TYPE_COMPILED_METHOD_LOAD: {
805       nmethod* nm = _event_data.compiled_method_load;
806       JvmtiExport::post_compiled_method_load(nm);
807       break;
808     }
809     case TYPE_COMPILED_METHOD_UNLOAD: {
810       JvmtiExport::post_compiled_method_unload(
811         _event_data.compiled_method_unload.method_id,
812         _event_data.compiled_method_unload.code_begin);
813       break;
814     }
815     case TYPE_DYNAMIC_CODE_GENERATED: {
816       JvmtiExport::post_dynamic_code_generated_internal(
817         // if strdup failed give the event a default name
818         (_event_data.dynamic_code_generated.name == nullptr)
819           ? "unknown_code" : _event_data.dynamic_code_generated.name,
820         _event_data.dynamic_code_generated.code_begin,
821         _event_data.dynamic_code_generated.code_end);
822       if (_event_data.dynamic_code_generated.name != nullptr) {
823         // release our copy
824         os::free((void *)_event_data.dynamic_code_generated.name);
825       }
826       break;
827     }
828     case TYPE_CLASS_UNLOAD: {
829       JvmtiExport::post_class_unload_internal(
830         // if strdup failed give the event a default name
831         (_event_data.class_unload.name == nullptr)
832           ? "unknown_class" : _event_data.class_unload.name);
833       if (_event_data.class_unload.name != nullptr) {
834         // release our copy
835         os::free((void *)_event_data.class_unload.name);
836       }
837       break;
838     }
839     default:
840       ShouldNotReachHere();
841   }
842 }
843 
844 void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
845   assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
846   nmethod* nm = _event_data.compiled_method_load;
847   JvmtiExport::post_compiled_method_load(env, nm);
848 }
849 
850 void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
851   if (_type == TYPE_COMPILED_METHOD_LOAD) {
852     _event_data.compiled_method_load->run_nmethod_entry_barrier();
853   }
854 }
855 
856 
857 // Keep the nmethod for compiled_method_load from being unloaded.
858 void JvmtiDeferredEvent::oops_do(OopClosure* f, NMethodClosure* cf) {
859   if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
860     cf->do_nmethod(_event_data.compiled_method_load);
861   }
862 }
863 
864 // The GC calls this and marks the nmethods here on the stack so that
865 // they cannot be unloaded while in the queue.
866 void JvmtiDeferredEvent::nmethods_do(NMethodClosure* cf) {
867   if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
868     cf->do_nmethod(_event_data.compiled_method_load);
869   }
870 }
871 
872 
873 bool JvmtiDeferredEventQueue::has_events() {
874   // We save the queued events before the live phase and post them when it starts.
875   // This code could skip saving the events on the queue before the live
876   // phase and ignore them, but this would change how we do things now.
877   // Starting the service thread earlier causes this to be called before the live phase begins.
878   // The events on the queue should all be posted after the live phase so this is an
879   // ok check.  Before the live phase, DynamicCodeGenerated events are posted directly.
880   // If we add other types of events to the deferred queue, this could get ugly.
881   return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE  && _queue_head != nullptr;
882 }
883 
884 void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
885   // Events get added to the end of the queue (and are pulled off the front).
886   QueueNode* node = new QueueNode(event);
887   if (_queue_tail == nullptr) {
888     _queue_tail = _queue_head = node;
889   } else {
890     assert(_queue_tail->next() == nullptr, "Must be the last element in the list");
891     _queue_tail->set_next(node);
892     _queue_tail = node;
893   }
894 
895   assert((_queue_head == nullptr) == (_queue_tail == nullptr),
896          "Inconsistent queue markers");
897 }
898 
899 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
900   assert(_queue_head != nullptr, "Nothing to dequeue");
901 
902   if (_queue_head == nullptr) {
903     // Just in case this happens in product; it shouldn't but let's not crash
904     return JvmtiDeferredEvent();
905   }
906 
907   QueueNode* node = _queue_head;
908   _queue_head = _queue_head->next();
909   if (_queue_head == nullptr) {
910     _queue_tail = nullptr;
911   }
912 
913   assert((_queue_head == nullptr) == (_queue_tail == nullptr),
914          "Inconsistent queue markers");
915 
916   JvmtiDeferredEvent event = node->event();
917   delete node;
918   return event;
919 }
920 
921 void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
922   // Post events while nmethods are still in the queue and can't be unloaded.
923   while (_queue_head != nullptr) {
924     _queue_head->event().post_compiled_method_load_event(env);
925     dequeue();
926   }
927 }
928 
929 void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
930   for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
931      node->event().run_nmethod_entry_barriers();
932   }
933 }
934 
935 
936 void JvmtiDeferredEventQueue::oops_do(OopClosure* f, NMethodClosure* cf) {
937   for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
938      node->event().oops_do(f, cf);
939   }
940 }
941 
942 void JvmtiDeferredEventQueue::nmethods_do(NMethodClosure* cf) {
943   for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
944      node->event().nmethods_do(cf);
945   }
946 }