1 /*
2 * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "code/nmethod.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/oopMapCache.hpp"
30 #include "jvmtifiles/jvmtiEnv.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logStream.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/instanceKlass.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/oopHandle.inline.hpp"
39 #include "prims/jvmtiAgentThread.hpp"
40 #include "prims/jvmtiEventController.inline.hpp"
41 #include "prims/jvmtiImpl.hpp"
42 #include "prims/jvmtiRedefineClasses.hpp"
43 #include "runtime/atomicAccess.hpp"
44 #include "runtime/continuation.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/frame.inline.hpp"
47 #include "runtime/handles.inline.hpp"
48 #include "runtime/interfaceSupport.inline.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/javaThread.hpp"
51 #include "runtime/jniHandles.inline.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/serviceThread.hpp"
54 #include "runtime/signature.hpp"
55 #include "runtime/threadSMR.hpp"
56 #include "runtime/vframe.inline.hpp"
57 #include "runtime/vframe_hp.hpp"
58 #include "runtime/vmOperations.hpp"
59 #include "utilities/exceptions.hpp"
60
61 //
62 // class JvmtiAgentThread
63 //
64 // JavaThread used to wrap a thread started by an agent
65 // using the JVMTI method RunAgentThread.
66 //
67
68 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
69 : JavaThread(start_function_wrapper) {
70 _env = env;
71 _start_fn = start_fn;
72 _start_arg = start_arg;
73 }
74
75 void
76 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
77 // It is expected that any Agent threads will be created as
78 // Java Threads. If this is the case, notification of the creation
79 // of the thread is given in JavaThread::thread_main().
80 assert(thread == JavaThread::current(), "sanity check");
81
82 JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
83 dthread->call_start_function();
84 }
85
86 void
87 JvmtiAgentThread::call_start_function() {
88 ThreadToNativeFromVM transition(this);
89 _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
90 }
91
92 //
93 // class JvmtiBreakpoint
94 //
95
96 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
97 : _method(m_method), _bci((int)location) {
98 assert(_method != nullptr, "No method for breakpoint.");
99 assert(_bci >= 0, "Negative bci for breakpoint.");
100 oop class_holder_oop = _method->method_holder()->klass_holder();
101 _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), class_holder_oop);
102 }
103
104 JvmtiBreakpoint::JvmtiBreakpoint(const JvmtiBreakpoint& bp)
105 : _method(bp._method), _bci(bp._bci) {
106 _class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), bp._class_holder.resolve());
107 }
108
109 JvmtiBreakpoint::~JvmtiBreakpoint() {
110 _class_holder.release(JvmtiExport::jvmti_oop_storage());
111 }
112
113 bool JvmtiBreakpoint::equals(const JvmtiBreakpoint& bp) const {
114 return _method == bp._method
115 && _bci == bp._bci;
116 }
117
118 address JvmtiBreakpoint::getBcp() const {
119 return _method->bcp_from(_bci);
120 }
121
122 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
123 assert(!_method->is_old(), "the breakpoint method shouldn't be old");
124 ((Method*)_method->*meth_act)(_bci);
125
126 // add/remove breakpoint to/from versions of the method that are EMCP.
127 Thread *thread = Thread::current();
128 InstanceKlass* ik = _method->method_holder();
129 Symbol* m_name = _method->name();
130 Symbol* m_signature = _method->signature();
131
132 // search previous versions if they exist
133 for (InstanceKlass* pv_node = ik->previous_versions();
134 pv_node != nullptr;
135 pv_node = pv_node->previous_versions()) {
136 Array<Method*>* methods = pv_node->methods();
137
138 for (int i = methods->length() - 1; i >= 0; i--) {
139 Method* method = methods->at(i);
140 // Only set breakpoints in EMCP methods.
141 // EMCP methods are old but not obsolete. Equivalent
142 // Modulo Constant Pool means the method is equivalent except
143 // the constant pool and instructions that access the constant
144 // pool might be different.
145 // If a breakpoint is set in a redefined method, its EMCP methods
146 // must have a breakpoint also.
147 // None of the methods are deleted until none are running.
148 // This code could set a breakpoint in a method that
149 // is never reached, but this won't be noticeable to the programmer.
150 if (!method->is_obsolete() &&
151 method->name() == m_name &&
152 method->signature() == m_signature) {
153 ResourceMark rm;
154 log_debug(redefine, class, breakpoint)
155 ("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
156 method->name()->as_C_string(), method->signature()->as_C_string());
157 (method->*meth_act)(_bci);
158 break;
159 }
160 }
161 }
162 }
163
164 void JvmtiBreakpoint::set() {
165 each_method_version_do(&Method::set_breakpoint);
166 }
167
168 void JvmtiBreakpoint::clear() {
169 each_method_version_do(&Method::clear_breakpoint);
170 }
171
172 void JvmtiBreakpoint::print_on(outputStream* out) const {
173 #ifndef PRODUCT
174 ResourceMark rm;
175 const char *class_name = (_method == nullptr) ? "null" : _method->klass_name()->as_C_string();
176 const char *method_name = (_method == nullptr) ? "null" : _method->name()->as_C_string();
177 out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
178 #endif
179 }
180
181
182 //
183 // class VM_ChangeBreakpoints
184 //
185 // Modify the Breakpoints data structure at a safepoint
186 //
187 // The caller of VM_ChangeBreakpoints operation should ensure that
188 // _bp.method is preserved until VM_ChangeBreakpoints is processed.
189
190 void VM_ChangeBreakpoints::doit() {
191 if (_bp->method()->is_old()) {
192 // The bp->_method became old because VMOp with class redefinition happened for this class
193 // after JvmtiBreakpoint was created but before JVM_ChangeBreakpoints started.
194 // All class breakpoints are cleared during redefinition, so don't set/clear this breakpoint.
195 return;
196 }
197 switch (_operation) {
198 case SET_BREAKPOINT:
199 _breakpoints->set_at_safepoint(*_bp);
200 break;
201 case CLEAR_BREAKPOINT:
202 _breakpoints->clear_at_safepoint(*_bp);
203 break;
204 default:
205 assert(false, "Unknown operation");
206 }
207 }
208
209 //
210 // class JvmtiBreakpoints
211 //
212 // a JVMTI internal collection of JvmtiBreakpoint
213 //
214
215 JvmtiBreakpoints::JvmtiBreakpoints()
216 : _elements(5, mtServiceability) {
217 }
218
219 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
220
221 void JvmtiBreakpoints::print() {
222 #ifndef PRODUCT
223 LogTarget(Trace, jvmti) log;
224 LogStream log_stream(log);
225
226 int n = length();
227 for (int i = 0; i < n; i++) {
228 JvmtiBreakpoint& bp = at(i);
229 log_stream.print("%d: ", i);
230 bp.print_on(&log_stream);
231 log_stream.cr();
232 }
233 #endif
234 }
235
236
237 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
238 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
239
240 int i = find(bp);
241 if (i == -1) {
242 append(bp);
243 bp.set();
244 }
245 }
246
247 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
248 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
249
250 int i = find(bp);
251 if (i != -1) {
252 remove(i);
253 bp.clear();
254 }
255 }
256
257 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
258 if (find(bp) != -1) {
259 return JVMTI_ERROR_DUPLICATE;
260 }
261
262 // Ensure that bp._method is not deallocated before VM_ChangeBreakpoints::doit().
263 methodHandle mh(Thread::current(), bp.method());
264 VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
265 VMThread::execute(&set_breakpoint);
266 return JVMTI_ERROR_NONE;
267 }
268
269 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
270 if (find(bp) == -1) {
271 return JVMTI_ERROR_NOT_FOUND;
272 }
273
274 // Ensure that bp._method is not deallocated before VM_ChangeBreakpoints::doit().
275 methodHandle mh(Thread::current(), bp.method());
276 VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
277 VMThread::execute(&clear_breakpoint);
278 return JVMTI_ERROR_NONE;
279 }
280
281 void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
282 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
283
284 // Go backwards because this removes entries that are freed.
285 for (int i = length() - 1; i >= 0; i--) {
286 JvmtiBreakpoint& bp = at(i);
287 if (bp.method()->method_holder() == klass) {
288 bp.clear();
289 remove(i);
290 }
291 }
292 }
293
294 //
295 // class JvmtiCurrentBreakpoints
296 //
297
298 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints = nullptr;
299
300 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
301 if (_jvmti_breakpoints == nullptr) {
302 JvmtiBreakpoints* breakpoints = new JvmtiBreakpoints();
303 if (!AtomicAccess::replace_if_null(&_jvmti_breakpoints, breakpoints)) {
304 // already created concurently
305 delete breakpoints;
306 }
307 }
308 return (*_jvmti_breakpoints);
309 }
310
311
312 ///////////////////////////////////////////////////////////////
313 //
314 // class VM_BaseGetOrSetLocal
315 //
316
317 const jvalue VM_BaseGetOrSetLocal::_DEFAULT_VALUE = {0L};
318 // Constructor for non-object getter
319
320 VM_BaseGetOrSetLocal::VM_BaseGetOrSetLocal(JavaThread* calling_thread, jint depth,
321 jint index, BasicType type, jvalue value, bool set, bool self)
322 : _calling_thread(calling_thread)
323 , _depth(depth)
324 , _index(index)
325 , _type(type)
326 , _value(value)
327 , _jvf(nullptr)
328 , _set(set)
329 , _self(self)
330 , _need_clone(false)
331 , _result(JVMTI_ERROR_NONE)
332 {
333 }
334
335 // Check that the klass is assignable to a type with the given signature.
336 // Another solution could be to use the function Klass::is_subtype_of(type).
337 // But the type class can be forced to load/initialize eagerly in such a case.
338 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
339 // It is better to avoid such a behavior.
340 bool VM_BaseGetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
341 assert(ty_sign != nullptr, "type signature must not be null");
342 assert(thread != nullptr, "thread must not be null");
343 assert(klass != nullptr, "klass must not be null");
344
345 int len = (int) strlen(ty_sign);
346 if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
347 ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
348 ty_sign++;
349 len -= 2;
350 }
351 TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len);
352 if (klass->name() == ty_sym) {
353 return true;
354 }
355 // Compare primary supers
356 int super_depth = klass->super_depth();
357 int idx;
358 for (idx = 0; idx < super_depth; idx++) {
359 if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
360 return true;
361 }
362 }
363 // Compare secondary supers
364 const Array<Klass*>* sec_supers = klass->secondary_supers();
365 for (idx = 0; idx < sec_supers->length(); idx++) {
366 if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
367 return true;
368 }
369 }
370 return false;
371 }
372
373 // Checks error conditions:
374 // JVMTI_ERROR_INVALID_SLOT
375 // JVMTI_ERROR_TYPE_MISMATCH
376 // Returns: 'true' - everything is Ok, 'false' - error code
377
378 bool VM_BaseGetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
379 Method* method = jvf->method();
380 if (!method->has_localvariable_table()) {
381 // Just to check index boundaries.
382 jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
383 if (_index < 0 || _index + extra_slot >= method->max_locals()) {
384 _result = JVMTI_ERROR_INVALID_SLOT;
385 return false;
386 }
387 return true;
388 }
389
390 jint num_entries = method->localvariable_table_length();
391 if (num_entries == 0) {
392 _result = JVMTI_ERROR_INVALID_SLOT;
393 return false; // There are no slots
394 }
395 int signature_idx = -1;
396 int vf_bci = jvf->bci();
397 LocalVariableTableElement* table = method->localvariable_table_start();
398 for (int i = 0; i < num_entries; i++) {
399 int start_bci = table[i].start_bci;
400 int end_bci = start_bci + table[i].length;
401
402 // Here we assume that locations of LVT entries
403 // with the same slot number cannot be overlapped
404 if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
405 signature_idx = (int) table[i].descriptor_cp_index;
406 break;
407 }
408 }
409 if (signature_idx == -1) {
410 _result = JVMTI_ERROR_INVALID_SLOT;
411 return false; // Incorrect slot index
412 }
413 Symbol* sign_sym = method->constants()->symbol_at(signature_idx);
414 BasicType slot_type = Signature::basic_type(sign_sym);
415
416 switch (slot_type) {
417 case T_BYTE:
418 case T_SHORT:
419 case T_CHAR:
420 case T_BOOLEAN:
421 slot_type = T_INT;
422 break;
423 case T_ARRAY:
424 slot_type = T_OBJECT;
425 break;
426 default:
427 break;
428 };
429 if (_type != slot_type) {
430 _result = JVMTI_ERROR_TYPE_MISMATCH;
431 return false;
432 }
433
434 jobject jobj = _value.l;
435 if (_set && slot_type == T_OBJECT && jobj != nullptr) { // null reference is allowed
436 // Check that the jobject class matches the return type signature.
437 oop obj = JNIHandles::resolve_external_guard(jobj);
438 NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
439 Klass* ob_k = obj->klass();
440 NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
441
442 const char* signature = (const char *) sign_sym->as_utf8();
443 if (!is_assignable(signature, ob_k, VMThread::vm_thread())) {
444 _result = JVMTI_ERROR_TYPE_MISMATCH;
445 return false;
446 }
447 }
448 return true;
449 }
450
451 bool VM_BaseGetOrSetLocal::check_slot_type_no_lvt(javaVFrame* jvf) {
452 Method* method = jvf->method();
453 jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
454
455 if (_index < 0 || _index + extra_slot >= method->max_locals()) {
456 _result = JVMTI_ERROR_INVALID_SLOT;
457 return false;
458 }
459 StackValueCollection *locals = _jvf->locals();
460 BasicType slot_type = locals->at(_index)->type();
461
462 if (slot_type == T_CONFLICT) {
463 _result = JVMTI_ERROR_INVALID_SLOT;
464 return false;
465 }
466 if (extra_slot) {
467 BasicType extra_slot_type = locals->at(_index + 1)->type();
468 if (extra_slot_type != T_INT) {
469 _result = JVMTI_ERROR_INVALID_SLOT;
470 return false;
471 }
472 }
473 if (_type != slot_type && (_type == T_OBJECT || slot_type != T_INT)) {
474 _result = JVMTI_ERROR_TYPE_MISMATCH;
475 return false;
476 }
477 return true;
478 }
479
480 void VM_BaseGetOrSetLocal::check_and_clone_this_value_object() {
481 oop obj = JNIHandles::resolve(_value.l);
482 HandleMark hm(_calling_thread);
483 Handle obj_h(_calling_thread, obj);
484
485 assert(_type == T_OBJECT, "sanity check");
486 assert(obj != nullptr, "expected non-null oop");
487 assert(obj_h()->is_inline(), "expected inline oop");
488 assert(_index == 0, "expected slot 0 for THIS object");
489
490 InlineKlass* klass = InlineKlass::cast(obj_h()->klass());
491 inlineOop obj_copy = klass->allocate_instance(_calling_thread);
492 if (obj_copy == nullptr) {
493 _result = JVMTI_ERROR_OUT_OF_MEMORY;
494 } else {
495 inlineOop thisObj = inlineOop(obj_h());
496 // copy object payload into the object snapshot
497 BufferedValuePayload src(thisObj);
498 BufferedValuePayload dst(obj_copy, klass);
499 src.copy_to(dst);
500
501 // Must ensure the content of the buffered value is visible
502 // before publishing the buffered value oop
503 OrderAccess::storestore();
504 }
505 _value.l = JNIHandles::make_local(_calling_thread, obj_copy);
506 }
507
508 static bool can_be_deoptimized(vframe* vf) {
509 return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
510 }
511
512 bool VM_GetOrSetLocal::doit_prologue() {
513 if (!_eb.deoptimize_objects(_depth, _depth)) {
514 // The target frame is affected by a reallocation failure.
515 _result = JVMTI_ERROR_OUT_OF_MEMORY;
516 return false;
517 }
518
519 return true;
520 }
521
522 void VM_BaseGetOrSetLocal::doit() {
523 _jvf = get_java_vframe();
524 if (_jvf == nullptr) {
525 return;
526 };
527
528 frame fr = _jvf->fr();
529 if (_set && _depth != 0 && Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
530 _result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations
531 return;
532 }
533
534 Method* method = _jvf->method();
535 if (getting_receiver()) {
536 if (method->is_static()) {
537 _result = JVMTI_ERROR_INVALID_SLOT;
538 return;
539 }
540 } else {
541 if (method->is_native()) {
542 _result = JVMTI_ERROR_OPAQUE_FRAME;
543 return;
544 }
545
546 if (!check_slot_type_no_lvt(_jvf)) {
547 return;
548 }
549 if (method->has_localvariable_table() &&
550 !check_slot_type_lvt(_jvf)) {
551 return;
552 }
553 }
554
555 InterpreterOopMap oop_mask;
556 _jvf->method()->mask_for(_jvf->bci(), &oop_mask);
557 if (oop_mask.is_dead(_index)) {
558 // The local can be invalid and uninitialized in the scope of current bci
559 _result = JVMTI_ERROR_INVALID_SLOT;
560 return;
561 }
562 if (_set) {
563 if (fr.is_heap_frame()) { // we want this check after the check for JVMTI_ERROR_INVALID_SLOT
564 assert(Continuation::is_frame_in_continuation(_jvf->thread(), fr), "sanity check");
565 // If the topmost frame is a heap frame, then it hasn't been thawed. This can happen
566 // if we are executing at a return barrier safepoint. The callee frame has been popped,
567 // but the caller frame has not been thawed. We can't support a JVMTI SetLocal in the callee
568 // frame at this point, because we aren't truly in the callee yet.
569 // fr.is_heap_frame() is impossible if a continuation is at a single step or breakpoint.
570 _result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations
571 return;
572 }
573
574 // Force deoptimization of frame if compiled because it's
575 // possible the compiler emitted some locals as constant values,
576 // meaning they are not mutable.
577 if (can_be_deoptimized(_jvf)) {
578 // Continuation can't be unmounted at this point (it was checked/reported in get_java_vframe).
579 if (Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
580 _result = JVMTI_ERROR_OPAQUE_FRAME; // can't deoptimize for top continuation frame
581 return;
582 }
583
584 // Schedule deoptimization so that eventually the local
585 // update will be written to an interpreter frame.
586 Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
587
588 // Now store a new value for the local which will be applied
589 // once deoptimization occurs. Note however that while this
590 // write is deferred until deoptimization actually happens
591 // can vframe created after this point will have its locals
592 // reflecting this update so as far as anyone can see the
593 // write has already taken place.
594
595 // If we are updating an oop then get the oop from the handle
596 // since the handle will be long gone by the time the deopt
597 // happens. The oop stored in the deferred local will be
598 // gc'd on its own.
599 if (_type == T_OBJECT) {
600 _value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
601 }
602 // Re-read the vframe so we can see that it is deoptimized
603 // [ Only need because of assert in update_local() ]
604 _jvf = get_java_vframe();
605 ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
606 return;
607 }
608 StackValueCollection *locals = _jvf->locals();
609 Thread* current_thread = VMThread::vm_thread();
610 HandleMark hm(current_thread);
611
612 switch (_type) {
613 case T_INT: locals->set_int_at (_index, _value.i); break;
614 case T_LONG: locals->set_long_at (_index, _value.j); break;
615 case T_FLOAT: locals->set_float_at (_index, _value.f); break;
616 case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
617 case T_OBJECT: {
618 Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l));
619 locals->set_obj_at (_index, ob_h);
620 break;
621 }
622 default: ShouldNotReachHere();
623 }
624 _jvf->set_locals(locals);
625 } else {
626 if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
627 assert(getting_receiver(), "Can only get here when getting receiver");
628 oop receiver = _jvf->fr().get_native_receiver();
629 _value.l = JNIHandles::make_local(_calling_thread, receiver);
630 } else {
631 StackValueCollection *locals = _jvf->locals();
632
633 switch (_type) {
634 case T_INT: _value.i = locals->int_at (_index); break;
635 case T_LONG: _value.j = locals->long_at (_index); break;
636 case T_FLOAT: _value.f = locals->float_at (_index); break;
637 case T_DOUBLE: _value.d = locals->double_at(_index); break;
638 case T_OBJECT: {
639 // Wrap the oop to be returned in a local JNI handle since
640 // oops_do() no longer applies after doit() is finished.
641 oop obj = locals->obj_at(_index)();
642
643 if (Arguments::is_valhalla_enabled()) {
644 bool is_ctor = _jvf->method()->is_object_constructor();
645 if (is_ctor && _index == 0 && obj != nullptr && obj->is_inline()) {
646 _need_clone = true; // need to allocate an object snapshot in doit_epilogue
647 }
648 }
649 _value.l = JNIHandles::make_local(_calling_thread, obj);
650 break;
651 }
652 default: ShouldNotReachHere();
653 }
654 }
655 }
656 }
657
658 void VM_BaseGetOrSetLocal::doit_epilogue() {
659 if (_need_clone) {
660 check_and_clone_this_value_object();
661 }
662 }
663
664 bool VM_BaseGetOrSetLocal::allow_nested_vm_operations() const {
665 return true; // May need to deoptimize
666 }
667
668
669 ///////////////////////////////////////////////////////////////
670 //
671 // class VM_GetOrSetLocal
672 //
673
674 // Constructor for non-object getter
675 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, bool self)
676 : VM_BaseGetOrSetLocal(nullptr, depth, index, type, _DEFAULT_VALUE, false, self),
677 _thread(thread),
678 _eb(false, nullptr, nullptr)
679 {
680 }
681
682 // Constructor for object or non-object setter
683 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value, bool self)
684 : VM_BaseGetOrSetLocal(nullptr, depth, index, type, value, true, self),
685 _thread(thread),
686 _eb(type == T_OBJECT, JavaThread::current(), thread)
687 {
688 }
689
690 // Constructor for object getter
691 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index, bool self)
692 : VM_BaseGetOrSetLocal(calling_thread, depth, index, T_OBJECT, _DEFAULT_VALUE, false, self),
693 _thread(thread),
694 _eb(true, calling_thread, thread)
695 {
696 }
697
698 vframe *VM_GetOrSetLocal::get_vframe() {
699 if (!_thread->has_last_Java_frame()) {
700 return nullptr;
701 }
702 RegisterMap reg_map(_thread,
703 RegisterMap::UpdateMap::include,
704 RegisterMap::ProcessFrames::include,
705 RegisterMap::WalkContinuation::include);
706 vframe *vf = JvmtiEnvBase::get_cthread_last_java_vframe(_thread, ®_map);
707 int d = 0;
708 while ((vf != nullptr) && (d < _depth)) {
709 vf = vf->java_sender();
710 d++;
711 }
712 return vf;
713 }
714
715 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
716 vframe* vf = get_vframe();
717 if (!_self && !_thread->is_suspended() && !_thread->is_carrier_thread_suspended()) {
718 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
719 return nullptr;
720 }
721 if (vf == nullptr) {
722 _result = JVMTI_ERROR_NO_MORE_FRAMES;
723 return nullptr;
724 }
725 javaVFrame *jvf = (javaVFrame*)vf;
726
727 if (!vf->is_java_frame()) {
728 _result = JVMTI_ERROR_OPAQUE_FRAME;
729 return nullptr;
730 }
731 return jvf;
732 }
733
734 VM_GetReceiver::VM_GetReceiver(
735 JavaThread* thread, JavaThread* caller_thread, jint depth, bool self)
736 : VM_GetOrSetLocal(thread, caller_thread, depth, 0, self) {}
737
738
739 ///////////////////////////////////////////////////////////////
740 //
741 // class VM_VirtualThreadGetOrSetLocal
742 //
743
744 // Constructor for non-object getter
745 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, jint depth,
746 jint index, BasicType type, bool self)
747 : VM_BaseGetOrSetLocal(nullptr, depth, index, type, _DEFAULT_VALUE, false, self)
748 {
749 _env = env;
750 _vthread_h = vthread_h;
751 }
752
753 // Constructor for object or non-object setter
754 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, jint depth,
755 jint index, BasicType type, jvalue value, bool self)
756 : VM_BaseGetOrSetLocal(nullptr, depth, index, type, value, true, self)
757 {
758 _env = env;
759 _vthread_h = vthread_h;
760 }
761
762 // Constructor for object getter
763 VM_VirtualThreadGetOrSetLocal::VM_VirtualThreadGetOrSetLocal(JvmtiEnv* env, Handle vthread_h, JavaThread* calling_thread,
764 jint depth, int index, bool self)
765 : VM_BaseGetOrSetLocal(calling_thread, depth, index, T_OBJECT, _DEFAULT_VALUE, false, self)
766 {
767 _env = env;
768 _vthread_h = vthread_h;
769 }
770
771 javaVFrame *VM_VirtualThreadGetOrSetLocal::get_java_vframe() {
772 JavaThread* java_thread = JvmtiEnvBase::get_JavaThread_or_null(_vthread_h());
773 bool is_cont_mounted = (java_thread != nullptr);
774
775 if (!(_self || JvmtiVTSuspender::is_vthread_suspended(_vthread_h()))) {
776 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
777 return nullptr;
778 }
779 javaVFrame* jvf = JvmtiEnvBase::get_vthread_jvf(_vthread_h());
780
781 int d = 0;
782 while ((jvf != nullptr) && (d < _depth)) {
783 jvf = jvf->java_sender();
784 d++;
785 }
786
787 if (d < _depth || jvf == nullptr) {
788 _result = JVMTI_ERROR_NO_MORE_FRAMES;
789 return nullptr;
790 }
791
792 if ((_set && !is_cont_mounted) || !jvf->is_java_frame()) {
793 _result = JVMTI_ERROR_OPAQUE_FRAME;
794 return nullptr;
795 }
796 return jvf;
797 }
798
799 VM_VirtualThreadGetReceiver::VM_VirtualThreadGetReceiver(
800 JvmtiEnv* env, Handle vthread_h, JavaThread* caller_thread, jint depth, bool self)
801 : VM_VirtualThreadGetOrSetLocal(env, vthread_h, caller_thread, depth, 0, self) {}
802
803
804 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
805 nmethod* nm) {
806 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
807 event._event_data.compiled_method_load = nm;
808 return event;
809 }
810
811 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
812 jmethodID id, const void* code) {
813 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
814 event._event_data.compiled_method_unload.method_id = id;
815 event._event_data.compiled_method_unload.code_begin = code;
816 return event;
817 }
818
819 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
820 const char* name, const void* code_begin, const void* code_end) {
821 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
822 // Need to make a copy of the name since we don't know how long
823 // the event poster will keep it around after we enqueue the
824 // deferred event and return. strdup() failure is handled in
825 // the post() routine below.
826 event._event_data.dynamic_code_generated.name = os::strdup(name);
827 event._event_data.dynamic_code_generated.code_begin = code_begin;
828 event._event_data.dynamic_code_generated.code_end = code_end;
829 return event;
830 }
831
832 JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(const char* name) {
833 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD);
834 // Need to make a copy of the name since we don't know how long
835 // the event poster will keep it around after we enqueue the
836 // deferred event and return. strdup() failure is handled in
837 // the post() routine below.
838 event._event_data.class_unload.name = os::strdup(name);
839 return event;
840 }
841
842 void JvmtiDeferredEvent::post() {
843 assert(Thread::current()->is_service_thread(),
844 "Service thread must post enqueued events");
845 switch(_type) {
846 case TYPE_COMPILED_METHOD_LOAD: {
847 nmethod* nm = _event_data.compiled_method_load;
848 JvmtiExport::post_compiled_method_load(nm);
849 break;
850 }
851 case TYPE_COMPILED_METHOD_UNLOAD: {
852 JvmtiExport::post_compiled_method_unload(
853 _event_data.compiled_method_unload.method_id,
854 _event_data.compiled_method_unload.code_begin);
855 break;
856 }
857 case TYPE_DYNAMIC_CODE_GENERATED: {
858 JvmtiExport::post_dynamic_code_generated_internal(
859 // if strdup failed give the event a default name
860 (_event_data.dynamic_code_generated.name == nullptr)
861 ? "unknown_code" : _event_data.dynamic_code_generated.name,
862 _event_data.dynamic_code_generated.code_begin,
863 _event_data.dynamic_code_generated.code_end);
864 if (_event_data.dynamic_code_generated.name != nullptr) {
865 // release our copy
866 os::free((void *)_event_data.dynamic_code_generated.name);
867 }
868 break;
869 }
870 case TYPE_CLASS_UNLOAD: {
871 JvmtiExport::post_class_unload_internal(
872 // if strdup failed give the event a default name
873 (_event_data.class_unload.name == nullptr)
874 ? "unknown_class" : _event_data.class_unload.name);
875 if (_event_data.class_unload.name != nullptr) {
876 // release our copy
877 os::free((void *)_event_data.class_unload.name);
878 }
879 break;
880 }
881 default:
882 ShouldNotReachHere();
883 }
884 }
885
886 void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
887 assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
888 nmethod* nm = _event_data.compiled_method_load;
889 JvmtiExport::post_compiled_method_load(env, nm);
890 }
891
892 void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
893 if (_type == TYPE_COMPILED_METHOD_LOAD) {
894 _event_data.compiled_method_load->run_nmethod_entry_barrier();
895 }
896 }
897
898
899 // Keep the nmethod for compiled_method_load from being unloaded.
900 void JvmtiDeferredEvent::oops_do(OopClosure* f, NMethodClosure* cf) {
901 if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
902 cf->do_nmethod(_event_data.compiled_method_load);
903 }
904 }
905
906 // The GC calls this and marks the nmethods here on the stack so that
907 // they cannot be unloaded while in the queue.
908 void JvmtiDeferredEvent::nmethods_do(NMethodClosure* cf) {
909 if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
910 cf->do_nmethod(_event_data.compiled_method_load);
911 }
912 }
913
914
915 bool JvmtiDeferredEventQueue::has_events() {
916 // We save the queued events before the live phase and post them when it starts.
917 // This code could skip saving the events on the queue before the live
918 // phase and ignore them, but this would change how we do things now.
919 // Starting the service thread earlier causes this to be called before the live phase begins.
920 // The events on the queue should all be posted after the live phase so this is an
921 // ok check. Before the live phase, DynamicCodeGenerated events are posted directly.
922 // If we add other types of events to the deferred queue, this could get ugly.
923 return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE && _queue_head != nullptr;
924 }
925
926 void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
927 // Events get added to the end of the queue (and are pulled off the front).
928 QueueNode* node = new QueueNode(event);
929 if (_queue_tail == nullptr) {
930 _queue_tail = _queue_head = node;
931 } else {
932 assert(_queue_tail->next() == nullptr, "Must be the last element in the list");
933 _queue_tail->set_next(node);
934 _queue_tail = node;
935 }
936
937 assert((_queue_head == nullptr) == (_queue_tail == nullptr),
938 "Inconsistent queue markers");
939 }
940
941 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
942 assert(_queue_head != nullptr, "Nothing to dequeue");
943
944 if (_queue_head == nullptr) {
945 // Just in case this happens in product; it shouldn't but let's not crash
946 return JvmtiDeferredEvent();
947 }
948
949 QueueNode* node = _queue_head;
950 _queue_head = _queue_head->next();
951 if (_queue_head == nullptr) {
952 _queue_tail = nullptr;
953 }
954
955 assert((_queue_head == nullptr) == (_queue_tail == nullptr),
956 "Inconsistent queue markers");
957
958 JvmtiDeferredEvent event = node->event();
959 delete node;
960 return event;
961 }
962
963 void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
964 // Post events while nmethods are still in the queue and can't be unloaded.
965 while (_queue_head != nullptr) {
966 _queue_head->event().post_compiled_method_load_event(env);
967 dequeue();
968 }
969 }
970
971 void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
972 for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
973 node->event().run_nmethod_entry_barriers();
974 }
975 }
976
977
978 void JvmtiDeferredEventQueue::oops_do(OopClosure* f, NMethodClosure* cf) {
979 for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
980 node->event().oops_do(f, cf);
981 }
982 }
983
984 void JvmtiDeferredEventQueue::nmethods_do(NMethodClosure* cf) {
985 for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
986 node->event().nmethods_do(cf);
987 }
988 }