1 /* 2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "classfile/moduleEntry.hpp" 29 #include "jvmtifiles/jvmtiEnv.hpp" 30 #include "memory/iterator.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/klass.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/objArrayOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "oops/oopHandle.inline.hpp" 37 #include "prims/jvmtiEnvBase.hpp" 38 #include "prims/jvmtiEventController.inline.hpp" 39 #include "prims/jvmtiExtensions.hpp" 40 #include "prims/jvmtiImpl.hpp" 41 #include "prims/jvmtiManageCapabilities.hpp" 42 #include "prims/jvmtiTagMap.hpp" 43 #include "prims/jvmtiThreadState.inline.hpp" 44 #include "runtime/biasedLocking.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/frame.inline.hpp" 47 #include "runtime/handles.inline.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/jfieldIDWorkaround.hpp" 50 #include "runtime/jniHandles.inline.hpp" 51 #include "runtime/objectMonitor.inline.hpp" 52 #include "runtime/osThread.hpp" 53 #include "runtime/signature.hpp" 54 #include "runtime/thread.inline.hpp" 55 #include "runtime/threadSMR.hpp" 56 #include "runtime/vframe.inline.hpp" 57 #include "runtime/vframe_hp.hpp" 58 #include "runtime/vmThread.hpp" 59 #include "runtime/vmOperations.hpp" 60 61 62 /////////////////////////////////////////////////////////////// 63 // 64 // JvmtiEnvBase 65 // 66 67 JvmtiEnvBase* JvmtiEnvBase::_head_environment = NULL; 68 69 bool JvmtiEnvBase::_globally_initialized = false; 70 volatile bool JvmtiEnvBase::_needs_clean_up = false; 71 72 jvmtiPhase JvmtiEnvBase::_phase = JVMTI_PHASE_PRIMORDIAL; 73 74 volatile int JvmtiEnvBase::_dying_thread_env_iteration_count = 0; 75 76 extern jvmtiInterface_1_ jvmti_Interface; 77 extern jvmtiInterface_1_ jvmtiTrace_Interface; 78 79 80 // perform initializations that must occur before any JVMTI environments 81 // are released but which should only be initialized once (no matter 82 // how many environments are created). 83 void 84 JvmtiEnvBase::globally_initialize() { 85 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 86 assert(_globally_initialized == false, "bad call"); 87 88 JvmtiManageCapabilities::initialize(); 89 90 // register extension functions and events 91 JvmtiExtensions::register_extensions(); 92 93 #ifdef JVMTI_TRACE 94 JvmtiTrace::initialize(); 95 #endif 96 97 _globally_initialized = true; 98 } 99 100 101 void 102 JvmtiEnvBase::initialize() { 103 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 104 105 // Add this environment to the end of the environment list (order is important) 106 { 107 // This block of code must not contain any safepoints, as list deallocation 108 // (which occurs at a safepoint) cannot occur simultaneously with this list 109 // addition. Note: NoSafepointVerifier cannot, currently, be used before 110 // threads exist. 111 JvmtiEnvIterator it; 112 JvmtiEnvBase *previous_env = NULL; 113 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { 114 previous_env = env; 115 } 116 if (previous_env == NULL) { 117 _head_environment = this; 118 } else { 119 previous_env->set_next_environment(this); 120 } 121 } 122 123 if (_globally_initialized == false) { 124 globally_initialize(); 125 } 126 } 127 128 jvmtiPhase 129 JvmtiEnvBase::phase() { 130 // For the JVMTI environments possessed the can_generate_early_vmstart: 131 // replace JVMTI_PHASE_PRIMORDIAL with JVMTI_PHASE_START 132 if (_phase == JVMTI_PHASE_PRIMORDIAL && 133 JvmtiExport::early_vmstart_recorded() && 134 early_vmstart_env()) { 135 return JVMTI_PHASE_START; 136 } 137 return _phase; // Normal case 138 } 139 140 bool 141 JvmtiEnvBase::is_valid() { 142 jint value = 0; 143 144 // This object might not be a JvmtiEnvBase so we can't assume 145 // the _magic field is properly aligned. Get the value in a safe 146 // way and then check against JVMTI_MAGIC. 147 148 switch (sizeof(_magic)) { 149 case 2: 150 value = Bytes::get_native_u2((address)&_magic); 151 break; 152 153 case 4: 154 value = Bytes::get_native_u4((address)&_magic); 155 break; 156 157 case 8: 158 value = Bytes::get_native_u8((address)&_magic); 159 break; 160 161 default: 162 guarantee(false, "_magic field is an unexpected size"); 163 } 164 165 return value == JVMTI_MAGIC; 166 } 167 168 169 bool 170 JvmtiEnvBase::use_version_1_0_semantics() { 171 int major, minor, micro; 172 173 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 174 return major == 1 && minor == 0; // micro version doesn't matter here 175 } 176 177 178 bool 179 JvmtiEnvBase::use_version_1_1_semantics() { 180 int major, minor, micro; 181 182 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 183 return major == 1 && minor == 1; // micro version doesn't matter here 184 } 185 186 bool 187 JvmtiEnvBase::use_version_1_2_semantics() { 188 int major, minor, micro; 189 190 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 191 return major == 1 && minor == 2; // micro version doesn't matter here 192 } 193 194 195 JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() { 196 _version = version; 197 _env_local_storage = NULL; 198 _tag_map = NULL; 199 _native_method_prefix_count = 0; 200 _native_method_prefixes = NULL; 201 _next = NULL; 202 _class_file_load_hook_ever_enabled = false; 203 204 // Moot since ClassFileLoadHook not yet enabled. 205 // But "true" will give a more predictable ClassFileLoadHook behavior 206 // for environment creation during ClassFileLoadHook. 207 _is_retransformable = true; 208 209 // all callbacks initially NULL 210 memset(&_event_callbacks,0,sizeof(jvmtiEventCallbacks)); 211 212 // all capabilities initially off 213 memset(&_current_capabilities, 0, sizeof(_current_capabilities)); 214 215 // all prohibited capabilities initially off 216 memset(&_prohibited_capabilities, 0, sizeof(_prohibited_capabilities)); 217 218 _magic = JVMTI_MAGIC; 219 220 JvmtiEventController::env_initialize((JvmtiEnv*)this); 221 222 #ifdef JVMTI_TRACE 223 _jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface; 224 #else 225 _jvmti_external.functions = &jvmti_Interface; 226 #endif 227 } 228 229 230 void 231 JvmtiEnvBase::dispose() { 232 233 #ifdef JVMTI_TRACE 234 JvmtiTrace::shutdown(); 235 #endif 236 237 // Dispose of event info and let the event controller call us back 238 // in a locked state (env_dispose, below) 239 JvmtiEventController::env_dispose(this); 240 } 241 242 void 243 JvmtiEnvBase::env_dispose() { 244 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 245 246 // We have been entered with all events disabled on this environment. 247 // A race to re-enable events (by setting callbacks) is prevented by 248 // checking for a valid environment when setting callbacks (while 249 // holding the JvmtiThreadState_lock). 250 251 // Mark as invalid. 252 _magic = DISPOSED_MAGIC; 253 254 // Relinquish all capabilities. 255 jvmtiCapabilities *caps = get_capabilities(); 256 JvmtiManageCapabilities::relinquish_capabilities(caps, caps, caps); 257 258 // Same situation as with events (see above) 259 set_native_method_prefixes(0, NULL); 260 261 JvmtiTagMap* tag_map_to_clear = tag_map_acquire(); 262 // A tag map can be big, clear it now to save memory until 263 // the destructor runs. 264 if (tag_map_to_clear != NULL) { 265 tag_map_to_clear->clear(); 266 } 267 268 _needs_clean_up = true; 269 } 270 271 272 JvmtiEnvBase::~JvmtiEnvBase() { 273 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 274 275 // There is a small window of time during which the tag map of a 276 // disposed environment could have been reallocated. 277 // Make sure it is gone. 278 JvmtiTagMap* tag_map_to_deallocate = _tag_map; 279 set_tag_map(NULL); 280 // A tag map can be big, deallocate it now 281 if (tag_map_to_deallocate != NULL) { 282 delete tag_map_to_deallocate; 283 } 284 285 _magic = BAD_MAGIC; 286 } 287 288 289 void 290 JvmtiEnvBase::periodic_clean_up() { 291 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 292 293 // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So 294 // clean up JvmtiThreadState before deleting JvmtiEnv pointer. 295 JvmtiThreadState::periodic_clean_up(); 296 297 // Unlink all invalid environments from the list of environments 298 // and deallocate them 299 JvmtiEnvIterator it; 300 JvmtiEnvBase* previous_env = NULL; 301 JvmtiEnvBase* env = it.first(); 302 while (env != NULL) { 303 if (env->is_valid()) { 304 previous_env = env; 305 env = it.next(env); 306 } else { 307 // This one isn't valid, remove it from the list and deallocate it 308 JvmtiEnvBase* defunct_env = env; 309 env = it.next(env); 310 if (previous_env == NULL) { 311 _head_environment = env; 312 } else { 313 previous_env->set_next_environment(env); 314 } 315 delete defunct_env; 316 } 317 } 318 319 } 320 321 322 void 323 JvmtiEnvBase::check_for_periodic_clean_up() { 324 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 325 326 class ThreadInsideIterationClosure: public ThreadClosure { 327 private: 328 bool _inside; 329 public: 330 ThreadInsideIterationClosure() : _inside(false) {}; 331 332 void do_thread(Thread* thread) { 333 _inside |= thread->is_inside_jvmti_env_iteration(); 334 } 335 336 bool is_inside_jvmti_env_iteration() { 337 return _inside; 338 } 339 }; 340 341 if (_needs_clean_up) { 342 // Check if we are currently iterating environment, 343 // deallocation should not occur if we are 344 ThreadInsideIterationClosure tiic; 345 Threads::threads_do(&tiic); 346 if (!tiic.is_inside_jvmti_env_iteration() && 347 !is_inside_dying_thread_env_iteration()) { 348 _needs_clean_up = false; 349 JvmtiEnvBase::periodic_clean_up(); 350 } 351 } 352 } 353 354 355 void 356 JvmtiEnvBase::record_first_time_class_file_load_hook_enabled() { 357 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), 358 "sanity check"); 359 360 if (!_class_file_load_hook_ever_enabled) { 361 _class_file_load_hook_ever_enabled = true; 362 363 if (get_capabilities()->can_retransform_classes) { 364 _is_retransformable = true; 365 } else { 366 _is_retransformable = false; 367 368 // cannot add retransform capability after ClassFileLoadHook has been enabled 369 get_prohibited_capabilities()->can_retransform_classes = 1; 370 } 371 } 372 } 373 374 375 void 376 JvmtiEnvBase::record_class_file_load_hook_enabled() { 377 if (!_class_file_load_hook_ever_enabled) { 378 if (Threads::number_of_threads() == 0) { 379 record_first_time_class_file_load_hook_enabled(); 380 } else { 381 MutexLocker mu(JvmtiThreadState_lock); 382 record_first_time_class_file_load_hook_enabled(); 383 } 384 } 385 } 386 387 388 jvmtiError 389 JvmtiEnvBase::set_native_method_prefixes(jint prefix_count, char** prefixes) { 390 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), 391 "sanity check"); 392 393 int old_prefix_count = get_native_method_prefix_count(); 394 char **old_prefixes = get_native_method_prefixes(); 395 396 // allocate and install the new prefixex 397 if (prefix_count == 0 || !is_valid()) { 398 _native_method_prefix_count = 0; 399 _native_method_prefixes = NULL; 400 } else { 401 // there are prefixes, allocate an array to hold them, and fill it 402 char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal); 403 if (new_prefixes == NULL) { 404 return JVMTI_ERROR_OUT_OF_MEMORY; 405 } 406 for (int i = 0; i < prefix_count; i++) { 407 char* prefix = prefixes[i]; 408 if (prefix == NULL) { 409 for (int j = 0; j < (i-1); j++) { 410 os::free(new_prefixes[j]); 411 } 412 os::free(new_prefixes); 413 return JVMTI_ERROR_NULL_POINTER; 414 } 415 prefix = os::strdup(prefixes[i]); 416 if (prefix == NULL) { 417 for (int j = 0; j < (i-1); j++) { 418 os::free(new_prefixes[j]); 419 } 420 os::free(new_prefixes); 421 return JVMTI_ERROR_OUT_OF_MEMORY; 422 } 423 new_prefixes[i] = prefix; 424 } 425 _native_method_prefix_count = prefix_count; 426 _native_method_prefixes = new_prefixes; 427 } 428 429 // now that we know the new prefixes have been successfully installed we can 430 // safely remove the old ones 431 if (old_prefix_count != 0) { 432 for (int i = 0; i < old_prefix_count; i++) { 433 os::free(old_prefixes[i]); 434 } 435 os::free(old_prefixes); 436 } 437 438 return JVMTI_ERROR_NONE; 439 } 440 441 442 // Collect all the prefixes which have been set in any JVM TI environments 443 // by the SetNativeMethodPrefix(es) functions. Be sure to maintain the 444 // order of environments and the order of prefixes within each environment. 445 // Return in a resource allocated array. 446 char** 447 JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) { 448 assert(Threads::number_of_threads() == 0 || 449 SafepointSynchronize::is_at_safepoint() || 450 JvmtiThreadState_lock->is_locked(), 451 "sanity check"); 452 453 int total_count = 0; 454 GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5); 455 456 JvmtiEnvIterator it; 457 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { 458 int prefix_count = env->get_native_method_prefix_count(); 459 char** prefixes = env->get_native_method_prefixes(); 460 for (int j = 0; j < prefix_count; j++) { 461 // retrieve a prefix and so that it is safe against asynchronous changes 462 // copy it into the resource area 463 char* prefix = prefixes[j]; 464 char* prefix_copy = NEW_RESOURCE_ARRAY(char, strlen(prefix)+1); 465 strcpy(prefix_copy, prefix); 466 prefix_array->at_put_grow(total_count++, prefix_copy); 467 } 468 } 469 470 char** all_prefixes = NEW_RESOURCE_ARRAY(char*, total_count); 471 char** p = all_prefixes; 472 for (int i = 0; i < total_count; ++i) { 473 *p++ = prefix_array->at(i); 474 } 475 *count_ptr = total_count; 476 return all_prefixes; 477 } 478 479 void 480 JvmtiEnvBase::set_event_callbacks(const jvmtiEventCallbacks* callbacks, 481 jint size_of_callbacks) { 482 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 483 484 size_t byte_cnt = sizeof(jvmtiEventCallbacks); 485 486 // clear in either case to be sure we got any gap between sizes 487 memset(&_event_callbacks, 0, byte_cnt); 488 489 // Now that JvmtiThreadState_lock is held, prevent a possible race condition where events 490 // are re-enabled by a call to set event callbacks where the DisposeEnvironment 491 // occurs after the boiler-plate environment check and before the lock is acquired. 492 if (callbacks != NULL && is_valid()) { 493 if (size_of_callbacks < (jint)byte_cnt) { 494 byte_cnt = size_of_callbacks; 495 } 496 memcpy(&_event_callbacks, callbacks, byte_cnt); 497 } 498 } 499 500 501 // In the fullness of time, all users of the method should instead 502 // directly use allocate, besides being cleaner and faster, this will 503 // mean much better out of memory handling 504 unsigned char * 505 JvmtiEnvBase::jvmtiMalloc(jlong size) { 506 unsigned char* mem = NULL; 507 jvmtiError result = allocate(size, &mem); 508 assert(result == JVMTI_ERROR_NONE, "Allocate failed"); 509 return mem; 510 } 511 512 513 // Handle management 514 515 jobject JvmtiEnvBase::jni_reference(Handle hndl) { 516 return JNIHandles::make_local(hndl()); 517 } 518 519 jobject JvmtiEnvBase::jni_reference(JavaThread *thread, Handle hndl) { 520 return JNIHandles::make_local(thread, hndl()); 521 } 522 523 void JvmtiEnvBase::destroy_jni_reference(jobject jobj) { 524 JNIHandles::destroy_local(jobj); 525 } 526 527 void JvmtiEnvBase::destroy_jni_reference(JavaThread *thread, jobject jobj) { 528 JNIHandles::destroy_local(jobj); // thread is unused. 529 } 530 531 // 532 // Threads 533 // 534 535 jobject * 536 JvmtiEnvBase::new_jobjectArray(int length, Handle *handles) { 537 if (length == 0) { 538 return NULL; 539 } 540 541 jobject *objArray = (jobject *) jvmtiMalloc(sizeof(jobject) * length); 542 NULL_CHECK(objArray, NULL); 543 544 for (int i=0; i<length; i++) { 545 objArray[i] = jni_reference(handles[i]); 546 } 547 return objArray; 548 } 549 550 jthread * 551 JvmtiEnvBase::new_jthreadArray(int length, Handle *handles) { 552 return (jthread *) new_jobjectArray(length,handles); 553 } 554 555 jthreadGroup * 556 JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) { 557 return (jthreadGroup *) new_jobjectArray(length,handles); 558 } 559 560 // return the vframe on the specified thread and depth, NULL if no such frame 561 // The thread and the oops in the returned vframe might not have been process. 562 vframe* 563 JvmtiEnvBase::vframeForNoProcess(JavaThread* java_thread, jint depth) { 564 if (!java_thread->has_last_Java_frame()) { 565 return NULL; 566 } 567 RegisterMap reg_map(java_thread, true /* update_map */, false /* process_frames */); 568 vframe *vf = java_thread->last_java_vframe(®_map); 569 int d = 0; 570 while ((vf != NULL) && (d < depth)) { 571 vf = vf->java_sender(); 572 d++; 573 } 574 return vf; 575 } 576 577 578 // 579 // utilities: JNI objects 580 // 581 582 583 jclass 584 JvmtiEnvBase::get_jni_class_non_null(Klass* k) { 585 assert(k != NULL, "k != NULL"); 586 Thread *thread = Thread::current(); 587 return (jclass)jni_reference(Handle(thread, k->java_mirror())); 588 } 589 590 // 591 // Field Information 592 // 593 594 bool 595 JvmtiEnvBase::get_field_descriptor(Klass* k, jfieldID field, fieldDescriptor* fd) { 596 if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) { 597 return false; 598 } 599 bool found = false; 600 if (jfieldIDWorkaround::is_static_jfieldID(field)) { 601 JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); 602 found = id->find_local_field(fd); 603 } else { 604 // Non-static field. The fieldID is really the offset of the field within the object. 605 int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field); 606 found = InstanceKlass::cast(k)->find_field_from_offset(offset, false, fd); 607 } 608 return found; 609 } 610 611 // 612 // Object Monitor Information 613 // 614 615 // 616 // Count the number of objects for a lightweight monitor. The hobj 617 // parameter is object that owns the monitor so this routine will 618 // count the number of times the same object was locked by frames 619 // in java_thread. 620 // 621 jint 622 JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) { 623 jint ret = 0; 624 if (!java_thread->has_last_Java_frame()) { 625 return ret; // no Java frames so no monitors 626 } 627 628 Thread* current_thread = Thread::current(); 629 ResourceMark rm(current_thread); 630 HandleMark hm(current_thread); 631 RegisterMap reg_map(java_thread); 632 633 for(javaVFrame *jvf=java_thread->last_java_vframe(®_map); jvf != NULL; 634 jvf = jvf->java_sender()) { 635 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); 636 if (!mons->is_empty()) { 637 for (int i = 0; i < mons->length(); i++) { 638 MonitorInfo *mi = mons->at(i); 639 if (mi->owner_is_scalar_replaced()) continue; 640 641 // see if owner of the monitor is our object 642 if (mi->owner() != NULL && mi->owner() == hobj()) { 643 ret++; 644 } 645 } 646 } 647 } 648 return ret; 649 } 650 651 652 653 jvmtiError 654 JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThread *java_thread, jobject *monitor_ptr) { 655 Thread *current_thread = Thread::current(); 656 assert(java_thread->is_handshake_safe_for(current_thread), 657 "call by myself or at handshake"); 658 oop obj = NULL; 659 // The ObjectMonitor* can't be async deflated since we are either 660 // at a safepoint or the calling thread is operating on itself so 661 // it cannot leave the underlying wait()/enter() call. 662 ObjectMonitor *mon = java_thread->current_waiting_monitor(); 663 if (mon == NULL) { 664 // thread is not doing an Object.wait() call 665 mon = java_thread->current_pending_monitor(); 666 if (mon != NULL) { 667 // The thread is trying to enter() an ObjectMonitor. 668 obj = mon->object(); 669 assert(obj != NULL, "ObjectMonitor should have a valid object!"); 670 } 671 // implied else: no contended ObjectMonitor 672 } else { 673 // thread is doing an Object.wait() call 674 obj = mon->object(); 675 assert(obj != NULL, "Object.wait() should have an object"); 676 } 677 678 if (obj == NULL) { 679 *monitor_ptr = NULL; 680 } else { 681 HandleMark hm(current_thread); 682 Handle hobj(current_thread, obj); 683 *monitor_ptr = jni_reference(calling_thread, hobj); 684 } 685 return JVMTI_ERROR_NONE; 686 } 687 688 689 jvmtiError 690 JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread, 691 GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) { 692 // Note: 693 // calling_thread is the thread that requested the list of monitors for java_thread. 694 // java_thread is the thread owning the monitors. 695 // current_thread is the thread executing this code, can be a non-JavaThread (e.g. VM Thread). 696 // And they all may be different threads. 697 jvmtiError err = JVMTI_ERROR_NONE; 698 Thread *current_thread = Thread::current(); 699 assert(java_thread->is_handshake_safe_for(current_thread), 700 "call by myself or at handshake"); 701 702 if (java_thread->has_last_Java_frame()) { 703 ResourceMark rm(current_thread); 704 HandleMark hm(current_thread); 705 RegisterMap reg_map(java_thread); 706 707 int depth = 0; 708 for (javaVFrame *jvf = java_thread->last_java_vframe(®_map); jvf != NULL; 709 jvf = jvf->java_sender()) { 710 if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) { // check for stack too deep 711 // add locked objects for this frame into list 712 err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1); 713 if (err != JVMTI_ERROR_NONE) { 714 return err; 715 } 716 } 717 } 718 } 719 720 // Get off stack monitors. (e.g. acquired via jni MonitorEnter). 721 JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this); 722 ObjectSynchronizer::monitors_iterate(&jmc, java_thread); 723 err = jmc.error(); 724 725 return err; 726 } 727 728 // Save JNI local handles for any objects that this frame owns. 729 jvmtiError 730 JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, 731 javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, jint stack_depth) { 732 jvmtiError err = JVMTI_ERROR_NONE; 733 Thread* current_thread = Thread::current(); 734 ResourceMark rm(current_thread); 735 HandleMark hm(current_thread); 736 737 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); 738 if (mons->is_empty()) { 739 return err; // this javaVFrame holds no monitors 740 } 741 742 oop wait_obj = NULL; 743 { 744 // The ObjectMonitor* can't be async deflated since we are either 745 // at a safepoint or the calling thread is operating on itself so 746 // it cannot leave the underlying wait() call. 747 // Save object of current wait() call (if any) for later comparison. 748 ObjectMonitor *mon = java_thread->current_waiting_monitor(); 749 if (mon != NULL) { 750 wait_obj = mon->object(); 751 } 752 } 753 oop pending_obj = NULL; 754 { 755 // The ObjectMonitor* can't be async deflated since we are either 756 // at a safepoint or the calling thread is operating on itself so 757 // it cannot leave the underlying enter() call. 758 // Save object of current enter() call (if any) for later comparison. 759 ObjectMonitor *mon = java_thread->current_pending_monitor(); 760 if (mon != NULL) { 761 pending_obj = mon->object(); 762 } 763 } 764 765 for (int i = 0; i < mons->length(); i++) { 766 MonitorInfo *mi = mons->at(i); 767 768 if (mi->owner_is_scalar_replaced()) continue; 769 770 oop obj = mi->owner(); 771 if (obj == NULL) { 772 // this monitor doesn't have an owning object so skip it 773 continue; 774 } 775 776 if (wait_obj == obj) { 777 // the thread is waiting on this monitor so it isn't really owned 778 continue; 779 } 780 781 if (pending_obj == obj) { 782 // the thread is pending on this monitor so it isn't really owned 783 continue; 784 } 785 786 if (owned_monitors_list->length() > 0) { 787 // Our list has at least one object on it so we have to check 788 // for recursive object locking 789 bool found = false; 790 for (int j = 0; j < owned_monitors_list->length(); j++) { 791 jobject jobj = ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(j))->monitor; 792 oop check = JNIHandles::resolve(jobj); 793 if (check == obj) { 794 found = true; // we found the object 795 break; 796 } 797 } 798 799 if (found) { 800 // already have this object so don't include it 801 continue; 802 } 803 } 804 805 // add the owning object to our list 806 jvmtiMonitorStackDepthInfo *jmsdi; 807 err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); 808 if (err != JVMTI_ERROR_NONE) { 809 return err; 810 } 811 Handle hobj(Thread::current(), obj); 812 jmsdi->monitor = jni_reference(calling_thread, hobj); 813 jmsdi->stack_depth = stack_depth; 814 owned_monitors_list->append(jmsdi); 815 } 816 817 return err; 818 } 819 820 jvmtiError 821 JvmtiEnvBase::get_stack_trace(JavaThread *java_thread, 822 jint start_depth, jint max_count, 823 jvmtiFrameInfo* frame_buffer, jint* count_ptr) { 824 #ifdef ASSERT 825 uint32_t debug_bits = 0; 826 #endif 827 Thread *current_thread = Thread::current(); 828 assert(SafepointSynchronize::is_at_safepoint() || 829 java_thread->is_handshake_safe_for(current_thread), 830 "call by myself / at safepoint / at handshake"); 831 int count = 0; 832 if (java_thread->has_last_Java_frame()) { 833 RegisterMap reg_map(java_thread, false /* update_map */, false /* process_frames */); 834 ResourceMark rm(current_thread); 835 javaVFrame *jvf = java_thread->last_java_vframe(®_map); 836 HandleMark hm(current_thread); 837 if (start_depth != 0) { 838 if (start_depth > 0) { 839 for (int j = 0; j < start_depth && jvf != NULL; j++) { 840 jvf = jvf->java_sender(); 841 } 842 if (jvf == NULL) { 843 // start_depth is deeper than the stack depth 844 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 845 } 846 } else { // start_depth < 0 847 // we are referencing the starting depth based on the oldest 848 // part of the stack. 849 // optimize to limit the number of times that java_sender() is called 850 javaVFrame *jvf_cursor = jvf; 851 javaVFrame *jvf_prev = NULL; 852 javaVFrame *jvf_prev_prev = NULL; 853 int j = 0; 854 while (jvf_cursor != NULL) { 855 jvf_prev_prev = jvf_prev; 856 jvf_prev = jvf_cursor; 857 for (j = 0; j > start_depth && jvf_cursor != NULL; j--) { 858 jvf_cursor = jvf_cursor->java_sender(); 859 } 860 } 861 if (j == start_depth) { 862 // previous pointer is exactly where we want to start 863 jvf = jvf_prev; 864 } else { 865 // we need to back up further to get to the right place 866 if (jvf_prev_prev == NULL) { 867 // the -start_depth is greater than the stack depth 868 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 869 } 870 // j now is the number of frames on the stack starting with 871 // jvf_prev, we start from jvf_prev_prev and move older on 872 // the stack that many, the result is -start_depth frames 873 // remaining. 874 jvf = jvf_prev_prev; 875 for (; j < 0; j++) { 876 jvf = jvf->java_sender(); 877 } 878 } 879 } 880 } 881 for (; count < max_count && jvf != NULL; count++) { 882 frame_buffer[count].method = jvf->method()->jmethod_id(); 883 frame_buffer[count].location = (jvf->method()->is_native() ? -1 : jvf->bci()); 884 jvf = jvf->java_sender(); 885 } 886 } else { 887 if (start_depth != 0) { 888 // no frames and there is a starting depth 889 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 890 } 891 } 892 *count_ptr = count; 893 return JVMTI_ERROR_NONE; 894 } 895 896 jvmtiError 897 JvmtiEnvBase::get_frame_count(JvmtiThreadState *state, jint *count_ptr) { 898 assert((state != NULL), 899 "JavaThread should create JvmtiThreadState before calling this method"); 900 *count_ptr = state->count_frames(); 901 return JVMTI_ERROR_NONE; 902 } 903 904 jvmtiError 905 JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth, 906 jmethodID* method_ptr, jlocation* location_ptr) { 907 #ifdef ASSERT 908 uint32_t debug_bits = 0; 909 #endif 910 Thread* current_thread = Thread::current(); 911 assert(java_thread->is_handshake_safe_for(current_thread), 912 "call by myself or at handshake"); 913 ResourceMark rm(current_thread); 914 915 vframe *vf = vframeForNoProcess(java_thread, depth); 916 if (vf == NULL) { 917 return JVMTI_ERROR_NO_MORE_FRAMES; 918 } 919 920 // vframeFor should return a java frame. If it doesn't 921 // it means we've got an internal error and we return the 922 // error in product mode. In debug mode we will instead 923 // attempt to cast the vframe to a javaVFrame and will 924 // cause an assertion/crash to allow further diagnosis. 925 #ifdef PRODUCT 926 if (!vf->is_java_frame()) { 927 return JVMTI_ERROR_INTERNAL; 928 } 929 #endif 930 931 HandleMark hm(current_thread); 932 javaVFrame *jvf = javaVFrame::cast(vf); 933 Method* method = jvf->method(); 934 if (method->is_native()) { 935 *location_ptr = -1; 936 } else { 937 *location_ptr = jvf->bci(); 938 } 939 *method_ptr = method->jmethod_id(); 940 941 return JVMTI_ERROR_NONE; 942 } 943 944 945 jvmtiError 946 JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject object, jvmtiMonitorUsage* info_ptr) { 947 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 948 Thread* current_thread = VMThread::vm_thread(); 949 assert(current_thread == Thread::current(), "must be"); 950 951 HandleMark hm(current_thread); 952 Handle hobj; 953 954 // Check arguments 955 { 956 oop mirror = JNIHandles::resolve_external_guard(object); 957 NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT); 958 NULL_CHECK(info_ptr, JVMTI_ERROR_NULL_POINTER); 959 960 hobj = Handle(current_thread, mirror); 961 } 962 963 ThreadsListHandle tlh(current_thread); 964 JavaThread *owning_thread = NULL; 965 ObjectMonitor *mon = NULL; 966 jvmtiMonitorUsage ret = { 967 NULL, 0, 0, NULL, 0, NULL 968 }; 969 970 uint32_t debug_bits = 0; 971 // first derive the object's owner and entry_count (if any) 972 { 973 owning_thread = ObjectSynchronizer::get_lock_owner(tlh.list(), hobj); 974 975 if (owning_thread != NULL) { // monitor is owned 976 // The recursions field of a monitor does not reflect recursions 977 // as lightweight locks before inflating the monitor are not included. 978 // We have to count the number of recursive monitor entries the hard way. 979 // We pass a handle to survive any GCs along the way. 980 ret.entry_count = count_locked_objects(owning_thread, hobj); 981 } 982 // implied else: entry_count == 0 983 } 984 985 jint nWant = 0, nWait = 0; 986 if (mon != NULL) { 987 // this object has a heavyweight monitor 988 nWant = mon->contentions(); // # of threads contending for monitor 989 nWait = mon->waiters(); // # of threads in Object.wait() 990 ret.waiter_count = nWant + nWait; 991 ret.notify_waiter_count = nWait; 992 } else { 993 // this object has a lightweight monitor 994 ret.waiter_count = 0; 995 ret.notify_waiter_count = 0; 996 } 997 998 // Allocate memory for heavyweight and lightweight monitor. 999 jvmtiError err; 1000 err = allocate(ret.waiter_count * sizeof(jthread *), (unsigned char**)&ret.waiters); 1001 if (err != JVMTI_ERROR_NONE) { 1002 return err; 1003 } 1004 err = allocate(ret.notify_waiter_count * sizeof(jthread *), 1005 (unsigned char**)&ret.notify_waiters); 1006 if (err != JVMTI_ERROR_NONE) { 1007 deallocate((unsigned char*)ret.waiters); 1008 return err; 1009 } 1010 1011 // now derive the rest of the fields 1012 if (mon != NULL) { 1013 // this object has a heavyweight monitor 1014 1015 // Number of waiters may actually be less than the waiter count. 1016 // So NULL out memory so that unused memory will be NULL. 1017 memset(ret.waiters, 0, ret.waiter_count * sizeof(jthread *)); 1018 memset(ret.notify_waiters, 0, ret.notify_waiter_count * sizeof(jthread *)); 1019 1020 if (ret.waiter_count > 0) { 1021 // we have contending and/or waiting threads 1022 if (nWant > 0) { 1023 // we have contending threads 1024 ResourceMark rm(current_thread); 1025 // get_pending_threads returns only java thread so we do not need to 1026 // check for non java threads. 1027 GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads(tlh.list(), nWant, (address)mon); 1028 if (wantList->length() < nWant) { 1029 // robustness: the pending list has gotten smaller 1030 nWant = wantList->length(); 1031 } 1032 for (int i = 0; i < nWant; i++) { 1033 JavaThread *pending_thread = wantList->at(i); 1034 Handle th(current_thread, pending_thread->threadObj()); 1035 ret.waiters[i] = (jthread)jni_reference(calling_thread, th); 1036 } 1037 } 1038 if (nWait > 0) { 1039 // we have threads in Object.wait() 1040 int offset = nWant; // add after any contending threads 1041 ObjectWaiter *waiter = mon->first_waiter(); 1042 for (int i = 0, j = 0; i < nWait; i++) { 1043 if (waiter == NULL) { 1044 // robustness: the waiting list has gotten smaller 1045 nWait = j; 1046 break; 1047 } 1048 JavaThread *w = mon->thread_of_waiter(waiter); 1049 if (w != NULL) { 1050 // If the thread was found on the ObjectWaiter list, then 1051 // it has not been notified. This thread can't change the 1052 // state of the monitor so it doesn't need to be suspended. 1053 Handle th(current_thread, w->threadObj()); 1054 ret.waiters[offset + j] = (jthread)jni_reference(calling_thread, th); 1055 ret.notify_waiters[j++] = (jthread)jni_reference(calling_thread, th); 1056 } 1057 waiter = mon->next_waiter(waiter); 1058 } 1059 } 1060 } // ThreadsListHandle is destroyed here. 1061 1062 // Adjust count. nWant and nWait count values may be less than original. 1063 ret.waiter_count = nWant + nWait; 1064 ret.notify_waiter_count = nWait; 1065 } else { 1066 // this object has a lightweight monitor and we have nothing more 1067 // to do here because the defaults are just fine. 1068 } 1069 1070 // we don't update return parameter unless everything worked 1071 *info_ptr = ret; 1072 1073 return JVMTI_ERROR_NONE; 1074 } 1075 1076 ResourceTracker::ResourceTracker(JvmtiEnv* env) { 1077 _env = env; 1078 _allocations = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<unsigned char*>(20, mtServiceability); 1079 _failed = false; 1080 } 1081 ResourceTracker::~ResourceTracker() { 1082 if (_failed) { 1083 for (int i=0; i<_allocations->length(); i++) { 1084 _env->deallocate(_allocations->at(i)); 1085 } 1086 } 1087 delete _allocations; 1088 } 1089 1090 jvmtiError ResourceTracker::allocate(jlong size, unsigned char** mem_ptr) { 1091 unsigned char *ptr; 1092 jvmtiError err = _env->allocate(size, &ptr); 1093 if (err == JVMTI_ERROR_NONE) { 1094 _allocations->append(ptr); 1095 *mem_ptr = ptr; 1096 } else { 1097 *mem_ptr = NULL; 1098 _failed = true; 1099 } 1100 return err; 1101 } 1102 1103 unsigned char* ResourceTracker::allocate(jlong size) { 1104 unsigned char* ptr; 1105 allocate(size, &ptr); 1106 return ptr; 1107 } 1108 1109 char* ResourceTracker::strdup(const char* str) { 1110 char *dup_str = (char*)allocate(strlen(str)+1); 1111 if (dup_str != NULL) { 1112 strcpy(dup_str, str); 1113 } 1114 return dup_str; 1115 } 1116 1117 struct StackInfoNode { 1118 struct StackInfoNode *next; 1119 jvmtiStackInfo info; 1120 }; 1121 1122 // Create a jvmtiStackInfo inside a linked list node and create a 1123 // buffer for the frame information, both allocated as resource objects. 1124 // Fill in both the jvmtiStackInfo and the jvmtiFrameInfo. 1125 // Note that either or both of thr and thread_oop 1126 // may be null if the thread is new or has exited. 1127 void 1128 MultipleStackTracesCollector::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) { 1129 #ifdef ASSERT 1130 Thread *current_thread = Thread::current(); 1131 assert(SafepointSynchronize::is_at_safepoint() || 1132 thr->is_handshake_safe_for(current_thread), 1133 "call by myself / at safepoint / at handshake"); 1134 #endif 1135 1136 jint state = 0; 1137 struct StackInfoNode *node = NEW_RESOURCE_OBJ(struct StackInfoNode); 1138 jvmtiStackInfo *infop = &(node->info); 1139 node->next = head(); 1140 set_head(node); 1141 infop->frame_count = 0; 1142 infop->thread = jt; 1143 1144 if (thread_oop != NULL) { 1145 // get most state bits 1146 state = (jint)java_lang_Thread::get_thread_status(thread_oop); 1147 } 1148 1149 if (thr != NULL) { // add more state bits if there is a JavaThead to query 1150 if (thr->is_suspended()) { 1151 state |= JVMTI_THREAD_STATE_SUSPENDED; 1152 } 1153 JavaThreadState jts = thr->thread_state(); 1154 if (jts == _thread_in_native) { 1155 state |= JVMTI_THREAD_STATE_IN_NATIVE; 1156 } 1157 if (thr->is_interrupted(false)) { 1158 state |= JVMTI_THREAD_STATE_INTERRUPTED; 1159 } 1160 } 1161 infop->state = state; 1162 1163 if (thr != NULL && (state & JVMTI_THREAD_STATE_ALIVE) != 0) { 1164 infop->frame_buffer = NEW_RESOURCE_ARRAY(jvmtiFrameInfo, max_frame_count()); 1165 env()->get_stack_trace(thr, 0, max_frame_count(), 1166 infop->frame_buffer, &(infop->frame_count)); 1167 } else { 1168 infop->frame_buffer = NULL; 1169 infop->frame_count = 0; 1170 } 1171 _frame_count_total += infop->frame_count; 1172 } 1173 1174 // Based on the stack information in the linked list, allocate memory 1175 // block to return and fill it from the info in the linked list. 1176 void 1177 MultipleStackTracesCollector::allocate_and_fill_stacks(jint thread_count) { 1178 // do I need to worry about alignment issues? 1179 jlong alloc_size = thread_count * sizeof(jvmtiStackInfo) 1180 + _frame_count_total * sizeof(jvmtiFrameInfo); 1181 env()->allocate(alloc_size, (unsigned char **)&_stack_info); 1182 1183 // pointers to move through the newly allocated space as it is filled in 1184 jvmtiStackInfo *si = _stack_info + thread_count; // bottom of stack info 1185 jvmtiFrameInfo *fi = (jvmtiFrameInfo *)si; // is the top of frame info 1186 1187 // copy information in resource area into allocated buffer 1188 // insert stack info backwards since linked list is backwards 1189 // insert frame info forwards 1190 // walk the StackInfoNodes 1191 for (struct StackInfoNode *sin = head(); sin != NULL; sin = sin->next) { 1192 jint frame_count = sin->info.frame_count; 1193 size_t frames_size = frame_count * sizeof(jvmtiFrameInfo); 1194 --si; 1195 memcpy(si, &(sin->info), sizeof(jvmtiStackInfo)); 1196 if (frames_size == 0) { 1197 si->frame_buffer = NULL; 1198 } else { 1199 memcpy(fi, sin->info.frame_buffer, frames_size); 1200 si->frame_buffer = fi; // point to the new allocated copy of the frames 1201 fi += frame_count; 1202 } 1203 } 1204 assert(si == _stack_info, "the last copied stack info must be the first record"); 1205 assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size, 1206 "the last copied frame info must be the last record"); 1207 } 1208 1209 1210 void 1211 VM_GetThreadListStackTraces::doit() { 1212 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1213 1214 ResourceMark rm; 1215 ThreadsListHandle tlh; 1216 for (int i = 0; i < _thread_count; ++i) { 1217 jthread jt = _thread_list[i]; 1218 JavaThread* java_thread = NULL; 1219 oop thread_oop = NULL; 1220 jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), jt, &java_thread, &thread_oop); 1221 if (err != JVMTI_ERROR_NONE) { 1222 // We got an error code so we don't have a JavaThread *, but 1223 // only return an error from here if we didn't get a valid 1224 // thread_oop. 1225 if (thread_oop == NULL) { 1226 _collector.set_result(err); 1227 return; 1228 } 1229 // We have a valid thread_oop. 1230 } 1231 _collector.fill_frames(jt, java_thread, thread_oop); 1232 } 1233 _collector.allocate_and_fill_stacks(_thread_count); 1234 } 1235 1236 void 1237 GetSingleStackTraceClosure::do_thread(Thread *target) { 1238 JavaThread *jt = target->as_Java_thread(); 1239 oop thread_oop = jt->threadObj(); 1240 1241 if (!jt->is_exiting() && thread_oop != NULL) { 1242 ResourceMark rm; 1243 _collector.fill_frames(_jthread, jt, thread_oop); 1244 _collector.allocate_and_fill_stacks(1); 1245 } 1246 } 1247 1248 void 1249 VM_GetAllStackTraces::doit() { 1250 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1251 1252 ResourceMark rm; 1253 _final_thread_count = 0; 1254 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1255 oop thread_oop = jt->threadObj(); 1256 if (thread_oop != NULL && 1257 !jt->is_exiting() && 1258 java_lang_Thread::is_alive(thread_oop) && 1259 !jt->is_hidden_from_external_view()) { 1260 ++_final_thread_count; 1261 // Handle block of the calling thread is used to create local refs. 1262 _collector.fill_frames((jthread)JNIHandles::make_local(_calling_thread, thread_oop), 1263 jt, thread_oop); 1264 } 1265 } 1266 _collector.allocate_and_fill_stacks(_final_thread_count); 1267 } 1268 1269 // Verifies that the top frame is a java frame in an expected state. 1270 // Deoptimizes frame if needed. 1271 // Checks that the frame method signature matches the return type (tos). 1272 // HandleMark must be defined in the caller only. 1273 // It is to keep a ret_ob_h handle alive after return to the caller. 1274 jvmtiError 1275 JvmtiEnvBase::check_top_frame(Thread* current_thread, JavaThread* java_thread, 1276 jvalue value, TosState tos, Handle* ret_ob_h) { 1277 ResourceMark rm(current_thread); 1278 1279 vframe *vf = vframeForNoProcess(java_thread, 0); 1280 NULL_CHECK(vf, JVMTI_ERROR_NO_MORE_FRAMES); 1281 1282 javaVFrame *jvf = (javaVFrame*) vf; 1283 if (!vf->is_java_frame() || jvf->method()->is_native()) { 1284 return JVMTI_ERROR_OPAQUE_FRAME; 1285 } 1286 1287 // If the frame is a compiled one, need to deoptimize it. 1288 if (vf->is_compiled_frame()) { 1289 if (!vf->fr().can_be_deoptimized()) { 1290 return JVMTI_ERROR_OPAQUE_FRAME; 1291 } 1292 Deoptimization::deoptimize_frame(java_thread, jvf->fr().id()); 1293 } 1294 1295 // Get information about method return type 1296 Symbol* signature = jvf->method()->signature(); 1297 1298 ResultTypeFinder rtf(signature); 1299 TosState fr_tos = as_TosState(rtf.type()); 1300 if (fr_tos != tos) { 1301 if (tos != itos || (fr_tos != btos && fr_tos != ztos && fr_tos != ctos && fr_tos != stos)) { 1302 return JVMTI_ERROR_TYPE_MISMATCH; 1303 } 1304 } 1305 1306 // Check that the jobject class matches the return type signature. 1307 jobject jobj = value.l; 1308 if (tos == atos && jobj != NULL) { // NULL reference is allowed 1309 Handle ob_h(current_thread, JNIHandles::resolve_external_guard(jobj)); 1310 NULL_CHECK(ob_h, JVMTI_ERROR_INVALID_OBJECT); 1311 Klass* ob_k = ob_h()->klass(); 1312 NULL_CHECK(ob_k, JVMTI_ERROR_INVALID_OBJECT); 1313 1314 // Method return type signature. 1315 char* ty_sign = 1 + strchr(signature->as_C_string(), JVM_SIGNATURE_ENDFUNC); 1316 1317 if (!VM_GetOrSetLocal::is_assignable(ty_sign, ob_k, current_thread)) { 1318 return JVMTI_ERROR_TYPE_MISMATCH; 1319 } 1320 *ret_ob_h = ob_h; 1321 } 1322 return JVMTI_ERROR_NONE; 1323 } /* end check_top_frame */ 1324 1325 1326 // ForceEarlyReturn<type> follows the PopFrame approach in many aspects. 1327 // Main difference is on the last stage in the interpreter. 1328 // The PopFrame stops method execution to continue execution 1329 // from the same method call instruction. 1330 // The ForceEarlyReturn forces return from method so the execution 1331 // continues at the bytecode following the method call. 1332 1333 // java_thread - protected by ThreadsListHandle and pre-checked 1334 1335 jvmtiError 1336 JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState tos) { 1337 // retrieve or create the state 1338 JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); 1339 if (state == NULL) { 1340 return JVMTI_ERROR_THREAD_NOT_ALIVE; 1341 } 1342 1343 // Eagerly reallocate scalar replaced objects. 1344 JavaThread* current_thread = JavaThread::current(); 1345 EscapeBarrier eb(true, current_thread, java_thread); 1346 if (!eb.deoptimize_objects(0)) { 1347 // Reallocation of scalar replaced objects failed -> return with error 1348 return JVMTI_ERROR_OUT_OF_MEMORY; 1349 } 1350 1351 SetForceEarlyReturn op(state, value, tos); 1352 if (java_thread == current_thread) { 1353 op.doit(java_thread, true /* self */); 1354 } else { 1355 Handshake::execute(&op, java_thread); 1356 } 1357 return op.result(); 1358 } 1359 1360 void 1361 SetForceEarlyReturn::doit(Thread *target, bool self) { 1362 JavaThread* java_thread = target->as_Java_thread(); 1363 Thread* current_thread = Thread::current(); 1364 HandleMark hm(current_thread); 1365 1366 if (!self) { 1367 if (!java_thread->is_suspended()) { 1368 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1369 return; 1370 } 1371 } 1372 1373 // Check to see if a ForceEarlyReturn was already in progress 1374 if (_state->is_earlyret_pending()) { 1375 // Probably possible for JVMTI clients to trigger this, but the 1376 // JPDA backend shouldn't allow this to happen 1377 _result = JVMTI_ERROR_INTERNAL; 1378 return; 1379 } 1380 { 1381 // The same as for PopFrame. Workaround bug: 1382 // 4812902: popFrame hangs if the method is waiting at a synchronize 1383 // Catch this condition and return an error to avoid hanging. 1384 // Now JVMTI spec allows an implementation to bail out with an opaque 1385 // frame error. 1386 OSThread* osThread = java_thread->osthread(); 1387 if (osThread->get_state() == MONITOR_WAIT) { 1388 _result = JVMTI_ERROR_OPAQUE_FRAME; 1389 return; 1390 } 1391 } 1392 1393 Handle ret_ob_h; 1394 _result = JvmtiEnvBase::check_top_frame(current_thread, java_thread, _value, _tos, &ret_ob_h); 1395 if (_result != JVMTI_ERROR_NONE) { 1396 return; 1397 } 1398 assert(_tos != atos || _value.l == NULL || ret_ob_h() != NULL, 1399 "return object oop must not be NULL if jobject is not NULL"); 1400 1401 // Update the thread state to reflect that the top frame must be 1402 // forced to return. 1403 // The current frame will be returned later when the suspended 1404 // thread is resumed and right before returning from VM to Java. 1405 // (see call_VM_base() in assembler_<cpu>.cpp). 1406 1407 _state->set_earlyret_pending(); 1408 _state->set_earlyret_oop(ret_ob_h()); 1409 _state->set_earlyret_value(_value, _tos); 1410 1411 // Set pending step flag for this early return. 1412 // It is cleared when next step event is posted. 1413 _state->set_pending_step_for_earlyret(); 1414 } 1415 1416 void 1417 JvmtiMonitorClosure::do_monitor(ObjectMonitor* mon) { 1418 if ( _error != JVMTI_ERROR_NONE) { 1419 // Error occurred in previous iteration so no need to add 1420 // to the list. 1421 return; 1422 } 1423 // Filter out on stack monitors collected during stack walk. 1424 oop obj = mon->object(); 1425 bool found = false; 1426 for (int j = 0; j < _owned_monitors_list->length(); j++) { 1427 jobject jobj = ((jvmtiMonitorStackDepthInfo*)_owned_monitors_list->at(j))->monitor; 1428 oop check = JNIHandles::resolve(jobj); 1429 if (check == obj) { 1430 // On stack monitor already collected during the stack walk. 1431 found = true; 1432 break; 1433 } 1434 } 1435 if (found == false) { 1436 // This is off stack monitor (e.g. acquired via jni MonitorEnter). 1437 jvmtiError err; 1438 jvmtiMonitorStackDepthInfo *jmsdi; 1439 err = _env->allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); 1440 if (err != JVMTI_ERROR_NONE) { 1441 _error = err; 1442 return; 1443 } 1444 Handle hobj(Thread::current(), obj); 1445 jmsdi->monitor = _env->jni_reference(_calling_thread, hobj); 1446 // stack depth is unknown for this monitor. 1447 jmsdi->stack_depth = -1; 1448 _owned_monitors_list->append(jmsdi); 1449 } 1450 } 1451 1452 GrowableArray<OopHandle>* JvmtiModuleClosure::_tbl = NULL; 1453 1454 void JvmtiModuleClosure::do_module(ModuleEntry* entry) { 1455 assert_locked_or_safepoint(Module_lock); 1456 OopHandle module = entry->module_handle(); 1457 guarantee(module.resolve() != NULL, "module object is NULL"); 1458 _tbl->push(module); 1459 } 1460 1461 jvmtiError 1462 JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobject** modules_ptr) { 1463 ResourceMark rm; 1464 MutexLocker mcld(ClassLoaderDataGraph_lock); 1465 MutexLocker ml(Module_lock); 1466 1467 _tbl = new GrowableArray<OopHandle>(77); 1468 if (_tbl == NULL) { 1469 return JVMTI_ERROR_OUT_OF_MEMORY; 1470 } 1471 1472 // Iterate over all the modules loaded to the system. 1473 ClassLoaderDataGraph::modules_do(&do_module); 1474 1475 jint len = _tbl->length(); 1476 guarantee(len > 0, "at least one module must be present"); 1477 1478 jobject* array = (jobject*)env->jvmtiMalloc((jlong)(len * sizeof(jobject))); 1479 if (array == NULL) { 1480 return JVMTI_ERROR_OUT_OF_MEMORY; 1481 } 1482 for (jint idx = 0; idx < len; idx++) { 1483 array[idx] = JNIHandles::make_local(Thread::current(), _tbl->at(idx).resolve()); 1484 } 1485 _tbl = NULL; 1486 *modules_ptr = array; 1487 *module_count_ptr = len; 1488 return JVMTI_ERROR_NONE; 1489 } 1490 1491 void 1492 UpdateForPopTopFrameClosure::doit(Thread *target, bool self) { 1493 Thread* current_thread = Thread::current(); 1494 HandleMark hm(current_thread); 1495 JavaThread* java_thread = target->as_Java_thread(); 1496 assert(java_thread == _state->get_thread(), "Must be"); 1497 1498 if (!self && !java_thread->is_suspended()) { 1499 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1500 return; 1501 } 1502 1503 // Check to see if a PopFrame was already in progress 1504 if (java_thread->popframe_condition() != JavaThread::popframe_inactive) { 1505 // Probably possible for JVMTI clients to trigger this, but the 1506 // JPDA backend shouldn't allow this to happen 1507 _result = JVMTI_ERROR_INTERNAL; 1508 return; 1509 } 1510 1511 // Was workaround bug 1512 // 4812902: popFrame hangs if the method is waiting at a synchronize 1513 // Catch this condition and return an error to avoid hanging. 1514 // Now JVMTI spec allows an implementation to bail out with an opaque frame error. 1515 OSThread* osThread = java_thread->osthread(); 1516 if (osThread->get_state() == MONITOR_WAIT) { 1517 _result = JVMTI_ERROR_OPAQUE_FRAME; 1518 return; 1519 } 1520 1521 ResourceMark rm(current_thread); 1522 // Check if there is more than one Java frame in this thread, that the top two frames 1523 // are Java (not native) frames, and that there is no intervening VM frame 1524 int frame_count = 0; 1525 bool is_interpreted[2]; 1526 intptr_t *frame_sp[2]; 1527 // The 2-nd arg of constructor is needed to stop iterating at java entry frame. 1528 for (vframeStream vfs(java_thread, true, false /* process_frames */); !vfs.at_end(); vfs.next()) { 1529 methodHandle mh(current_thread, vfs.method()); 1530 if (mh->is_native()) { 1531 _result = JVMTI_ERROR_OPAQUE_FRAME; 1532 return; 1533 } 1534 is_interpreted[frame_count] = vfs.is_interpreted_frame(); 1535 frame_sp[frame_count] = vfs.frame_id(); 1536 if (++frame_count > 1) break; 1537 } 1538 if (frame_count < 2) { 1539 // We haven't found two adjacent non-native Java frames on the top. 1540 // There can be two situations here: 1541 // 1. There are no more java frames 1542 // 2. Two top java frames are separated by non-java native frames 1543 if(JvmtiEnvBase::vframeForNoProcess(java_thread, 1) == NULL) { 1544 _result = JVMTI_ERROR_NO_MORE_FRAMES; 1545 return; 1546 } else { 1547 // Intervening non-java native or VM frames separate java frames. 1548 // Current implementation does not support this. See bug #5031735. 1549 // In theory it is possible to pop frames in such cases. 1550 _result = JVMTI_ERROR_OPAQUE_FRAME; 1551 return; 1552 } 1553 } 1554 1555 // If any of the top 2 frames is a compiled one, need to deoptimize it 1556 for (int i = 0; i < 2; i++) { 1557 if (!is_interpreted[i]) { 1558 Deoptimization::deoptimize_frame(java_thread, frame_sp[i]); 1559 } 1560 } 1561 1562 // Update the thread state to reflect that the top frame is popped 1563 // so that cur_stack_depth is maintained properly and all frameIDs 1564 // are invalidated. 1565 // The current frame will be popped later when the suspended thread 1566 // is resumed and right before returning from VM to Java. 1567 // (see call_VM_base() in assembler_<cpu>.cpp). 1568 1569 // It's fine to update the thread state here because no JVMTI events 1570 // shall be posted for this PopFrame. 1571 1572 if (!java_thread->is_exiting() && java_thread->threadObj() != NULL) { 1573 _state->update_for_pop_top_frame(); 1574 java_thread->set_popframe_condition(JavaThread::popframe_pending_bit); 1575 // Set pending step flag for this popframe and it is cleared when next 1576 // step event is posted. 1577 _state->set_pending_step_for_popframe(); 1578 _result = JVMTI_ERROR_NONE; 1579 } 1580 } 1581 1582 void 1583 SetFramePopClosure::doit(Thread *target, bool self) { 1584 ResourceMark rm; 1585 JavaThread* java_thread = target->as_Java_thread(); 1586 1587 assert(_state->get_thread() == java_thread, "Must be"); 1588 1589 if (!self && !java_thread->is_suspended()) { 1590 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1591 return; 1592 } 1593 1594 vframe *vf = JvmtiEnvBase::vframeForNoProcess(java_thread, _depth); 1595 if (vf == NULL) { 1596 _result = JVMTI_ERROR_NO_MORE_FRAMES; 1597 return; 1598 } 1599 1600 if (!vf->is_java_frame() || ((javaVFrame*) vf)->method()->is_native()) { 1601 _result = JVMTI_ERROR_OPAQUE_FRAME; 1602 return; 1603 } 1604 1605 assert(vf->frame_pointer() != NULL, "frame pointer mustn't be NULL"); 1606 if (java_thread->is_exiting() || java_thread->threadObj() == NULL) { 1607 return; /* JVMTI_ERROR_THREAD_NOT_ALIVE (default) */ 1608 } 1609 int frame_number = _state->count_frames() - _depth; 1610 _state->env_thread_state((JvmtiEnvBase*)_env)->set_frame_pop(frame_number); 1611 _result = JVMTI_ERROR_NONE; 1612 } 1613 1614 void 1615 GetOwnedMonitorInfoClosure::do_thread(Thread *target) { 1616 JavaThread *jt = target->as_Java_thread(); 1617 if (!jt->is_exiting() && (jt->threadObj() != NULL)) { 1618 _result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, 1619 jt, 1620 _owned_monitors_list); 1621 } 1622 } 1623 1624 void 1625 GetCurrentContendedMonitorClosure::do_thread(Thread *target) { 1626 JavaThread *jt = target->as_Java_thread(); 1627 if (!jt->is_exiting() && (jt->threadObj() != NULL)) { 1628 _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread, 1629 jt, 1630 _owned_monitor_ptr); 1631 } 1632 } 1633 1634 void 1635 GetStackTraceClosure::do_thread(Thread *target) { 1636 JavaThread *jt = target->as_Java_thread(); 1637 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1638 _result = ((JvmtiEnvBase *)_env)->get_stack_trace(jt, 1639 _start_depth, _max_count, 1640 _frame_buffer, _count_ptr); 1641 } 1642 } 1643 1644 void 1645 GetFrameCountClosure::do_thread(Thread *target) { 1646 JavaThread* jt = _state->get_thread(); 1647 assert(target == jt, "just checking"); 1648 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1649 _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr); 1650 } 1651 } 1652 1653 void 1654 GetFrameLocationClosure::do_thread(Thread *target) { 1655 JavaThread *jt = target->as_Java_thread(); 1656 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1657 _result = ((JvmtiEnvBase*)_env)->get_frame_location(jt, _depth, 1658 _method_ptr, _location_ptr); 1659 } 1660 }