1 /* 2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "classfile/moduleEntry.hpp" 29 #include "jvmtifiles/jvmtiEnv.hpp" 30 #include "memory/iterator.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/klass.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/objArrayOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "oops/oopHandle.inline.hpp" 37 #include "prims/jvmtiEnvBase.hpp" 38 #include "prims/jvmtiEventController.inline.hpp" 39 #include "prims/jvmtiExtensions.hpp" 40 #include "prims/jvmtiImpl.hpp" 41 #include "prims/jvmtiManageCapabilities.hpp" 42 #include "prims/jvmtiTagMap.hpp" 43 #include "prims/jvmtiThreadState.inline.hpp" 44 #include "runtime/biasedLocking.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/frame.inline.hpp" 47 #include "runtime/handles.inline.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/jfieldIDWorkaround.hpp" 50 #include "runtime/jniHandles.inline.hpp" 51 #include "runtime/objectMonitor.inline.hpp" 52 #include "runtime/osThread.hpp" 53 #include "runtime/signature.hpp" 54 #include "runtime/thread.inline.hpp" 55 #include "runtime/threadSMR.hpp" 56 #include "runtime/vframe.inline.hpp" 57 #include "runtime/vframe_hp.hpp" 58 #include "runtime/vmThread.hpp" 59 #include "runtime/vmOperations.hpp" 60 61 62 /////////////////////////////////////////////////////////////// 63 // 64 // JvmtiEnvBase 65 // 66 67 JvmtiEnvBase* JvmtiEnvBase::_head_environment = NULL; 68 69 bool JvmtiEnvBase::_globally_initialized = false; 70 volatile bool JvmtiEnvBase::_needs_clean_up = false; 71 72 jvmtiPhase JvmtiEnvBase::_phase = JVMTI_PHASE_PRIMORDIAL; 73 74 volatile int JvmtiEnvBase::_dying_thread_env_iteration_count = 0; 75 76 extern jvmtiInterface_1_ jvmti_Interface; 77 extern jvmtiInterface_1_ jvmtiTrace_Interface; 78 79 80 // perform initializations that must occur before any JVMTI environments 81 // are released but which should only be initialized once (no matter 82 // how many environments are created). 83 void 84 JvmtiEnvBase::globally_initialize() { 85 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 86 assert(_globally_initialized == false, "bad call"); 87 88 JvmtiManageCapabilities::initialize(); 89 90 // register extension functions and events 91 JvmtiExtensions::register_extensions(); 92 93 #ifdef JVMTI_TRACE 94 JvmtiTrace::initialize(); 95 #endif 96 97 _globally_initialized = true; 98 } 99 100 101 void 102 JvmtiEnvBase::initialize() { 103 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 104 105 // Add this environment to the end of the environment list (order is important) 106 { 107 // This block of code must not contain any safepoints, as list deallocation 108 // (which occurs at a safepoint) cannot occur simultaneously with this list 109 // addition. Note: NoSafepointVerifier cannot, currently, be used before 110 // threads exist. 111 JvmtiEnvIterator it; 112 JvmtiEnvBase *previous_env = NULL; 113 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { 114 previous_env = env; 115 } 116 if (previous_env == NULL) { 117 _head_environment = this; 118 } else { 119 previous_env->set_next_environment(this); 120 } 121 } 122 123 if (_globally_initialized == false) { 124 globally_initialize(); 125 } 126 } 127 128 jvmtiPhase 129 JvmtiEnvBase::phase() { 130 // For the JVMTI environments possessed the can_generate_early_vmstart: 131 // replace JVMTI_PHASE_PRIMORDIAL with JVMTI_PHASE_START 132 if (_phase == JVMTI_PHASE_PRIMORDIAL && 133 JvmtiExport::early_vmstart_recorded() && 134 early_vmstart_env()) { 135 return JVMTI_PHASE_START; 136 } 137 return _phase; // Normal case 138 } 139 140 bool 141 JvmtiEnvBase::is_valid() { 142 jint value = 0; 143 144 // This object might not be a JvmtiEnvBase so we can't assume 145 // the _magic field is properly aligned. Get the value in a safe 146 // way and then check against JVMTI_MAGIC. 147 148 switch (sizeof(_magic)) { 149 case 2: 150 value = Bytes::get_native_u2((address)&_magic); 151 break; 152 153 case 4: 154 value = Bytes::get_native_u4((address)&_magic); 155 break; 156 157 case 8: 158 value = Bytes::get_native_u8((address)&_magic); 159 break; 160 161 default: 162 guarantee(false, "_magic field is an unexpected size"); 163 } 164 165 return value == JVMTI_MAGIC; 166 } 167 168 169 bool 170 JvmtiEnvBase::use_version_1_0_semantics() { 171 int major, minor, micro; 172 173 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 174 return major == 1 && minor == 0; // micro version doesn't matter here 175 } 176 177 178 bool 179 JvmtiEnvBase::use_version_1_1_semantics() { 180 int major, minor, micro; 181 182 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 183 return major == 1 && minor == 1; // micro version doesn't matter here 184 } 185 186 bool 187 JvmtiEnvBase::use_version_1_2_semantics() { 188 int major, minor, micro; 189 190 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 191 return major == 1 && minor == 2; // micro version doesn't matter here 192 } 193 194 195 JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() { 196 _version = version; 197 _env_local_storage = NULL; 198 _tag_map = NULL; 199 _native_method_prefix_count = 0; 200 _native_method_prefixes = NULL; 201 _next = NULL; 202 _class_file_load_hook_ever_enabled = false; 203 204 // Moot since ClassFileLoadHook not yet enabled. 205 // But "true" will give a more predictable ClassFileLoadHook behavior 206 // for environment creation during ClassFileLoadHook. 207 _is_retransformable = true; 208 209 // all callbacks initially NULL 210 memset(&_event_callbacks, 0, sizeof(jvmtiEventCallbacks)); 211 memset(&_ext_event_callbacks, 0, sizeof(jvmtiExtEventCallbacks)); 212 213 // all capabilities initially off 214 memset(&_current_capabilities, 0, sizeof(_current_capabilities)); 215 216 // all prohibited capabilities initially off 217 memset(&_prohibited_capabilities, 0, sizeof(_prohibited_capabilities)); 218 219 _magic = JVMTI_MAGIC; 220 221 JvmtiEventController::env_initialize((JvmtiEnv*)this); 222 223 #ifdef JVMTI_TRACE 224 _jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface; 225 #else 226 _jvmti_external.functions = &jvmti_Interface; 227 #endif 228 } 229 230 231 void 232 JvmtiEnvBase::dispose() { 233 234 #ifdef JVMTI_TRACE 235 JvmtiTrace::shutdown(); 236 #endif 237 238 // Dispose of event info and let the event controller call us back 239 // in a locked state (env_dispose, below) 240 JvmtiEventController::env_dispose(this); 241 } 242 243 void 244 JvmtiEnvBase::env_dispose() { 245 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 246 247 // We have been entered with all events disabled on this environment. 248 // A race to re-enable events (by setting callbacks) is prevented by 249 // checking for a valid environment when setting callbacks (while 250 // holding the JvmtiThreadState_lock). 251 252 // Mark as invalid. 253 _magic = DISPOSED_MAGIC; 254 255 // Relinquish all capabilities. 256 jvmtiCapabilities *caps = get_capabilities(); 257 JvmtiManageCapabilities::relinquish_capabilities(caps, caps, caps); 258 259 // Same situation as with events (see above) 260 set_native_method_prefixes(0, NULL); 261 262 JvmtiTagMap* tag_map_to_clear = tag_map_acquire(); 263 // A tag map can be big, clear it now to save memory until 264 // the destructor runs. 265 if (tag_map_to_clear != NULL) { 266 tag_map_to_clear->clear(); 267 } 268 269 _needs_clean_up = true; 270 } 271 272 273 JvmtiEnvBase::~JvmtiEnvBase() { 274 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 275 276 // There is a small window of time during which the tag map of a 277 // disposed environment could have been reallocated. 278 // Make sure it is gone. 279 JvmtiTagMap* tag_map_to_deallocate = _tag_map; 280 set_tag_map(NULL); 281 // A tag map can be big, deallocate it now 282 if (tag_map_to_deallocate != NULL) { 283 delete tag_map_to_deallocate; 284 } 285 286 _magic = BAD_MAGIC; 287 } 288 289 290 void 291 JvmtiEnvBase::periodic_clean_up() { 292 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 293 294 // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So 295 // clean up JvmtiThreadState before deleting JvmtiEnv pointer. 296 JvmtiThreadState::periodic_clean_up(); 297 298 // Unlink all invalid environments from the list of environments 299 // and deallocate them 300 JvmtiEnvIterator it; 301 JvmtiEnvBase* previous_env = NULL; 302 JvmtiEnvBase* env = it.first(); 303 while (env != NULL) { 304 if (env->is_valid()) { 305 previous_env = env; 306 env = it.next(env); 307 } else { 308 // This one isn't valid, remove it from the list and deallocate it 309 JvmtiEnvBase* defunct_env = env; 310 env = it.next(env); 311 if (previous_env == NULL) { 312 _head_environment = env; 313 } else { 314 previous_env->set_next_environment(env); 315 } 316 delete defunct_env; 317 } 318 } 319 320 } 321 322 323 void 324 JvmtiEnvBase::check_for_periodic_clean_up() { 325 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 326 327 class ThreadInsideIterationClosure: public ThreadClosure { 328 private: 329 bool _inside; 330 public: 331 ThreadInsideIterationClosure() : _inside(false) {}; 332 333 void do_thread(Thread* thread) { 334 _inside |= thread->is_inside_jvmti_env_iteration(); 335 } 336 337 bool is_inside_jvmti_env_iteration() { 338 return _inside; 339 } 340 }; 341 342 if (_needs_clean_up) { 343 // Check if we are currently iterating environment, 344 // deallocation should not occur if we are 345 ThreadInsideIterationClosure tiic; 346 Threads::threads_do(&tiic); 347 if (!tiic.is_inside_jvmti_env_iteration() && 348 !is_inside_dying_thread_env_iteration()) { 349 _needs_clean_up = false; 350 JvmtiEnvBase::periodic_clean_up(); 351 } 352 } 353 } 354 355 356 void 357 JvmtiEnvBase::record_first_time_class_file_load_hook_enabled() { 358 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), 359 "sanity check"); 360 361 if (!_class_file_load_hook_ever_enabled) { 362 _class_file_load_hook_ever_enabled = true; 363 364 if (get_capabilities()->can_retransform_classes) { 365 _is_retransformable = true; 366 } else { 367 _is_retransformable = false; 368 369 // cannot add retransform capability after ClassFileLoadHook has been enabled 370 get_prohibited_capabilities()->can_retransform_classes = 1; 371 } 372 } 373 } 374 375 376 void 377 JvmtiEnvBase::record_class_file_load_hook_enabled() { 378 if (!_class_file_load_hook_ever_enabled) { 379 if (Threads::number_of_threads() == 0) { 380 record_first_time_class_file_load_hook_enabled(); 381 } else { 382 MutexLocker mu(JvmtiThreadState_lock); 383 record_first_time_class_file_load_hook_enabled(); 384 } 385 } 386 } 387 388 389 jvmtiError 390 JvmtiEnvBase::set_native_method_prefixes(jint prefix_count, char** prefixes) { 391 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), 392 "sanity check"); 393 394 int old_prefix_count = get_native_method_prefix_count(); 395 char **old_prefixes = get_native_method_prefixes(); 396 397 // allocate and install the new prefixex 398 if (prefix_count == 0 || !is_valid()) { 399 _native_method_prefix_count = 0; 400 _native_method_prefixes = NULL; 401 } else { 402 // there are prefixes, allocate an array to hold them, and fill it 403 char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal); 404 if (new_prefixes == NULL) { 405 return JVMTI_ERROR_OUT_OF_MEMORY; 406 } 407 for (int i = 0; i < prefix_count; i++) { 408 char* prefix = prefixes[i]; 409 if (prefix == NULL) { 410 for (int j = 0; j < (i-1); j++) { 411 os::free(new_prefixes[j]); 412 } 413 os::free(new_prefixes); 414 return JVMTI_ERROR_NULL_POINTER; 415 } 416 prefix = os::strdup(prefixes[i]); 417 if (prefix == NULL) { 418 for (int j = 0; j < (i-1); j++) { 419 os::free(new_prefixes[j]); 420 } 421 os::free(new_prefixes); 422 return JVMTI_ERROR_OUT_OF_MEMORY; 423 } 424 new_prefixes[i] = prefix; 425 } 426 _native_method_prefix_count = prefix_count; 427 _native_method_prefixes = new_prefixes; 428 } 429 430 // now that we know the new prefixes have been successfully installed we can 431 // safely remove the old ones 432 if (old_prefix_count != 0) { 433 for (int i = 0; i < old_prefix_count; i++) { 434 os::free(old_prefixes[i]); 435 } 436 os::free(old_prefixes); 437 } 438 439 return JVMTI_ERROR_NONE; 440 } 441 442 443 // Collect all the prefixes which have been set in any JVM TI environments 444 // by the SetNativeMethodPrefix(es) functions. Be sure to maintain the 445 // order of environments and the order of prefixes within each environment. 446 // Return in a resource allocated array. 447 char** 448 JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) { 449 assert(Threads::number_of_threads() == 0 || 450 SafepointSynchronize::is_at_safepoint() || 451 JvmtiThreadState_lock->is_locked(), 452 "sanity check"); 453 454 int total_count = 0; 455 GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5); 456 457 JvmtiEnvIterator it; 458 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { 459 int prefix_count = env->get_native_method_prefix_count(); 460 char** prefixes = env->get_native_method_prefixes(); 461 for (int j = 0; j < prefix_count; j++) { 462 // retrieve a prefix and so that it is safe against asynchronous changes 463 // copy it into the resource area 464 char* prefix = prefixes[j]; 465 char* prefix_copy = NEW_RESOURCE_ARRAY(char, strlen(prefix)+1); 466 strcpy(prefix_copy, prefix); 467 prefix_array->at_put_grow(total_count++, prefix_copy); 468 } 469 } 470 471 char** all_prefixes = NEW_RESOURCE_ARRAY(char*, total_count); 472 char** p = all_prefixes; 473 for (int i = 0; i < total_count; ++i) { 474 *p++ = prefix_array->at(i); 475 } 476 *count_ptr = total_count; 477 return all_prefixes; 478 } 479 480 void 481 JvmtiEnvBase::set_event_callbacks(const jvmtiEventCallbacks* callbacks, 482 jint size_of_callbacks) { 483 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 484 485 size_t byte_cnt = sizeof(jvmtiEventCallbacks); 486 487 // clear in either case to be sure we got any gap between sizes 488 memset(&_event_callbacks, 0, byte_cnt); 489 490 // Now that JvmtiThreadState_lock is held, prevent a possible race condition where events 491 // are re-enabled by a call to set event callbacks where the DisposeEnvironment 492 // occurs after the boiler-plate environment check and before the lock is acquired. 493 if (callbacks != NULL && is_valid()) { 494 if (size_of_callbacks < (jint)byte_cnt) { 495 byte_cnt = size_of_callbacks; 496 } 497 memcpy(&_event_callbacks, callbacks, byte_cnt); 498 } 499 } 500 501 502 // In the fullness of time, all users of the method should instead 503 // directly use allocate, besides being cleaner and faster, this will 504 // mean much better out of memory handling 505 unsigned char * 506 JvmtiEnvBase::jvmtiMalloc(jlong size) { 507 unsigned char* mem = NULL; 508 jvmtiError result = allocate(size, &mem); 509 assert(result == JVMTI_ERROR_NONE, "Allocate failed"); 510 return mem; 511 } 512 513 514 // Handle management 515 516 jobject JvmtiEnvBase::jni_reference(Handle hndl) { 517 return JNIHandles::make_local(hndl()); 518 } 519 520 jobject JvmtiEnvBase::jni_reference(JavaThread *thread, Handle hndl) { 521 return JNIHandles::make_local(thread, hndl()); 522 } 523 524 void JvmtiEnvBase::destroy_jni_reference(jobject jobj) { 525 JNIHandles::destroy_local(jobj); 526 } 527 528 void JvmtiEnvBase::destroy_jni_reference(JavaThread *thread, jobject jobj) { 529 JNIHandles::destroy_local(jobj); // thread is unused. 530 } 531 532 // 533 // Threads 534 // 535 536 jobject * 537 JvmtiEnvBase::new_jobjectArray(int length, Handle *handles) { 538 if (length == 0) { 539 return NULL; 540 } 541 542 jobject *objArray = (jobject *) jvmtiMalloc(sizeof(jobject) * length); 543 NULL_CHECK(objArray, NULL); 544 545 for (int i=0; i<length; i++) { 546 objArray[i] = jni_reference(handles[i]); 547 } 548 return objArray; 549 } 550 551 jthread * 552 JvmtiEnvBase::new_jthreadArray(int length, Handle *handles) { 553 return (jthread *) new_jobjectArray(length,handles); 554 } 555 556 jthreadGroup * 557 JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) { 558 return (jthreadGroup *) new_jobjectArray(length,handles); 559 } 560 561 // return the vframe on the specified thread and depth, NULL if no such frame 562 // The thread and the oops in the returned vframe might not have been process. 563 vframe* 564 JvmtiEnvBase::vframeForNoProcess(JavaThread* java_thread, jint depth) { 565 if (!java_thread->has_last_Java_frame()) { 566 return NULL; 567 } 568 RegisterMap reg_map(java_thread, true /* update_map */, false /* process_frames */); 569 vframe *vf = java_thread->last_java_vframe(®_map); 570 int d = 0; 571 while ((vf != NULL) && (d < depth)) { 572 vf = vf->java_sender(); 573 d++; 574 } 575 return vf; 576 } 577 578 579 // 580 // utilities: JNI objects 581 // 582 583 584 jclass 585 JvmtiEnvBase::get_jni_class_non_null(Klass* k) { 586 assert(k != NULL, "k != NULL"); 587 Thread *thread = Thread::current(); 588 return (jclass)jni_reference(Handle(thread, k->java_mirror())); 589 } 590 591 // 592 // Field Information 593 // 594 595 bool 596 JvmtiEnvBase::get_field_descriptor(Klass* k, jfieldID field, fieldDescriptor* fd) { 597 if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) { 598 return false; 599 } 600 bool found = false; 601 if (jfieldIDWorkaround::is_static_jfieldID(field)) { 602 JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); 603 found = id->find_local_field(fd); 604 } else { 605 // Non-static field. The fieldID is really the offset of the field within the object. 606 int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field); 607 found = InstanceKlass::cast(k)->find_field_from_offset(offset, false, fd); 608 } 609 return found; 610 } 611 612 // 613 // Object Monitor Information 614 // 615 616 // 617 // Count the number of objects for a lightweight monitor. The hobj 618 // parameter is object that owns the monitor so this routine will 619 // count the number of times the same object was locked by frames 620 // in java_thread. 621 // 622 jint 623 JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) { 624 jint ret = 0; 625 if (!java_thread->has_last_Java_frame()) { 626 return ret; // no Java frames so no monitors 627 } 628 629 Thread* current_thread = Thread::current(); 630 ResourceMark rm(current_thread); 631 HandleMark hm(current_thread); 632 RegisterMap reg_map(java_thread); 633 634 for(javaVFrame *jvf=java_thread->last_java_vframe(®_map); jvf != NULL; 635 jvf = jvf->java_sender()) { 636 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); 637 if (!mons->is_empty()) { 638 for (int i = 0; i < mons->length(); i++) { 639 MonitorInfo *mi = mons->at(i); 640 if (mi->owner_is_scalar_replaced()) continue; 641 642 // see if owner of the monitor is our object 643 if (mi->owner() != NULL && mi->owner() == hobj()) { 644 ret++; 645 } 646 } 647 } 648 } 649 return ret; 650 } 651 652 653 654 jvmtiError 655 JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThread *java_thread, jobject *monitor_ptr) { 656 Thread *current_thread = Thread::current(); 657 assert(java_thread->is_handshake_safe_for(current_thread), 658 "call by myself or at handshake"); 659 oop obj = NULL; 660 // The ObjectMonitor* can't be async deflated since we are either 661 // at a safepoint or the calling thread is operating on itself so 662 // it cannot leave the underlying wait()/enter() call. 663 ObjectMonitor *mon = java_thread->current_waiting_monitor(); 664 if (mon == NULL) { 665 // thread is not doing an Object.wait() call 666 mon = java_thread->current_pending_monitor(); 667 if (mon != NULL) { 668 // The thread is trying to enter() an ObjectMonitor. 669 obj = mon->object(); 670 assert(obj != NULL, "ObjectMonitor should have a valid object!"); 671 } 672 // implied else: no contended ObjectMonitor 673 } else { 674 // thread is doing an Object.wait() call 675 obj = mon->object(); 676 assert(obj != NULL, "Object.wait() should have an object"); 677 } 678 679 if (obj == NULL) { 680 *monitor_ptr = NULL; 681 } else { 682 HandleMark hm(current_thread); 683 Handle hobj(current_thread, obj); 684 *monitor_ptr = jni_reference(calling_thread, hobj); 685 } 686 return JVMTI_ERROR_NONE; 687 } 688 689 690 jvmtiError 691 JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread, 692 GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) { 693 // Note: 694 // calling_thread is the thread that requested the list of monitors for java_thread. 695 // java_thread is the thread owning the monitors. 696 // current_thread is the thread executing this code, can be a non-JavaThread (e.g. VM Thread). 697 // And they all may be different threads. 698 jvmtiError err = JVMTI_ERROR_NONE; 699 Thread *current_thread = Thread::current(); 700 assert(java_thread->is_handshake_safe_for(current_thread), 701 "call by myself or at handshake"); 702 703 if (java_thread->has_last_Java_frame()) { 704 ResourceMark rm(current_thread); 705 HandleMark hm(current_thread); 706 RegisterMap reg_map(java_thread); 707 708 int depth = 0; 709 for (javaVFrame *jvf = java_thread->last_java_vframe(®_map); jvf != NULL; 710 jvf = jvf->java_sender()) { 711 if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) { // check for stack too deep 712 // add locked objects for this frame into list 713 err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1); 714 if (err != JVMTI_ERROR_NONE) { 715 return err; 716 } 717 } 718 } 719 } 720 721 // Get off stack monitors. (e.g. acquired via jni MonitorEnter). 722 JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this); 723 ObjectSynchronizer::monitors_iterate(&jmc, java_thread); 724 err = jmc.error(); 725 726 return err; 727 } 728 729 // Save JNI local handles for any objects that this frame owns. 730 jvmtiError 731 JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, 732 javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, jint stack_depth) { 733 jvmtiError err = JVMTI_ERROR_NONE; 734 Thread* current_thread = Thread::current(); 735 ResourceMark rm(current_thread); 736 HandleMark hm(current_thread); 737 738 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); 739 if (mons->is_empty()) { 740 return err; // this javaVFrame holds no monitors 741 } 742 743 oop wait_obj = NULL; 744 { 745 // The ObjectMonitor* can't be async deflated since we are either 746 // at a safepoint or the calling thread is operating on itself so 747 // it cannot leave the underlying wait() call. 748 // Save object of current wait() call (if any) for later comparison. 749 ObjectMonitor *mon = java_thread->current_waiting_monitor(); 750 if (mon != NULL) { 751 wait_obj = mon->object(); 752 } 753 } 754 oop pending_obj = NULL; 755 { 756 // The ObjectMonitor* can't be async deflated since we are either 757 // at a safepoint or the calling thread is operating on itself so 758 // it cannot leave the underlying enter() call. 759 // Save object of current enter() call (if any) for later comparison. 760 ObjectMonitor *mon = java_thread->current_pending_monitor(); 761 if (mon != NULL) { 762 pending_obj = mon->object(); 763 } 764 } 765 766 for (int i = 0; i < mons->length(); i++) { 767 MonitorInfo *mi = mons->at(i); 768 769 if (mi->owner_is_scalar_replaced()) continue; 770 771 oop obj = mi->owner(); 772 if (obj == NULL) { 773 // this monitor doesn't have an owning object so skip it 774 continue; 775 } 776 777 if (wait_obj == obj) { 778 // the thread is waiting on this monitor so it isn't really owned 779 continue; 780 } 781 782 if (pending_obj == obj) { 783 // the thread is pending on this monitor so it isn't really owned 784 continue; 785 } 786 787 if (owned_monitors_list->length() > 0) { 788 // Our list has at least one object on it so we have to check 789 // for recursive object locking 790 bool found = false; 791 for (int j = 0; j < owned_monitors_list->length(); j++) { 792 jobject jobj = ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(j))->monitor; 793 oop check = JNIHandles::resolve(jobj); 794 if (check == obj) { 795 found = true; // we found the object 796 break; 797 } 798 } 799 800 if (found) { 801 // already have this object so don't include it 802 continue; 803 } 804 } 805 806 // add the owning object to our list 807 jvmtiMonitorStackDepthInfo *jmsdi; 808 err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); 809 if (err != JVMTI_ERROR_NONE) { 810 return err; 811 } 812 Handle hobj(Thread::current(), obj); 813 jmsdi->monitor = jni_reference(calling_thread, hobj); 814 jmsdi->stack_depth = stack_depth; 815 owned_monitors_list->append(jmsdi); 816 } 817 818 return err; 819 } 820 821 jvmtiError 822 JvmtiEnvBase::get_stack_trace(JavaThread *java_thread, 823 jint start_depth, jint max_count, 824 jvmtiFrameInfo* frame_buffer, jint* count_ptr) { 825 #ifdef ASSERT 826 uint32_t debug_bits = 0; 827 #endif 828 Thread *current_thread = Thread::current(); 829 assert(SafepointSynchronize::is_at_safepoint() || 830 java_thread->is_handshake_safe_for(current_thread), 831 "call by myself / at safepoint / at handshake"); 832 int count = 0; 833 if (java_thread->has_last_Java_frame()) { 834 RegisterMap reg_map(java_thread, false /* update_map */, false /* process_frames */); 835 ResourceMark rm(current_thread); 836 javaVFrame *jvf = java_thread->last_java_vframe(®_map); 837 HandleMark hm(current_thread); 838 if (start_depth != 0) { 839 if (start_depth > 0) { 840 for (int j = 0; j < start_depth && jvf != NULL; j++) { 841 jvf = jvf->java_sender(); 842 } 843 if (jvf == NULL) { 844 // start_depth is deeper than the stack depth 845 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 846 } 847 } else { // start_depth < 0 848 // we are referencing the starting depth based on the oldest 849 // part of the stack. 850 // optimize to limit the number of times that java_sender() is called 851 javaVFrame *jvf_cursor = jvf; 852 javaVFrame *jvf_prev = NULL; 853 javaVFrame *jvf_prev_prev = NULL; 854 int j = 0; 855 while (jvf_cursor != NULL) { 856 jvf_prev_prev = jvf_prev; 857 jvf_prev = jvf_cursor; 858 for (j = 0; j > start_depth && jvf_cursor != NULL; j--) { 859 jvf_cursor = jvf_cursor->java_sender(); 860 } 861 } 862 if (j == start_depth) { 863 // previous pointer is exactly where we want to start 864 jvf = jvf_prev; 865 } else { 866 // we need to back up further to get to the right place 867 if (jvf_prev_prev == NULL) { 868 // the -start_depth is greater than the stack depth 869 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 870 } 871 // j now is the number of frames on the stack starting with 872 // jvf_prev, we start from jvf_prev_prev and move older on 873 // the stack that many, the result is -start_depth frames 874 // remaining. 875 jvf = jvf_prev_prev; 876 for (; j < 0; j++) { 877 jvf = jvf->java_sender(); 878 } 879 } 880 } 881 } 882 for (; count < max_count && jvf != NULL; count++) { 883 frame_buffer[count].method = jvf->method()->jmethod_id(); 884 frame_buffer[count].location = (jvf->method()->is_native() ? -1 : jvf->bci()); 885 jvf = jvf->java_sender(); 886 } 887 } else { 888 if (start_depth != 0) { 889 // no frames and there is a starting depth 890 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 891 } 892 } 893 *count_ptr = count; 894 return JVMTI_ERROR_NONE; 895 } 896 897 jvmtiError 898 JvmtiEnvBase::get_frame_count(JvmtiThreadState *state, jint *count_ptr) { 899 assert((state != NULL), 900 "JavaThread should create JvmtiThreadState before calling this method"); 901 *count_ptr = state->count_frames(); 902 return JVMTI_ERROR_NONE; 903 } 904 905 jvmtiError 906 JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth, 907 jmethodID* method_ptr, jlocation* location_ptr) { 908 #ifdef ASSERT 909 uint32_t debug_bits = 0; 910 #endif 911 Thread* current_thread = Thread::current(); 912 assert(java_thread->is_handshake_safe_for(current_thread), 913 "call by myself or at handshake"); 914 ResourceMark rm(current_thread); 915 916 vframe *vf = vframeForNoProcess(java_thread, depth); 917 if (vf == NULL) { 918 return JVMTI_ERROR_NO_MORE_FRAMES; 919 } 920 921 // vframeFor should return a java frame. If it doesn't 922 // it means we've got an internal error and we return the 923 // error in product mode. In debug mode we will instead 924 // attempt to cast the vframe to a javaVFrame and will 925 // cause an assertion/crash to allow further diagnosis. 926 #ifdef PRODUCT 927 if (!vf->is_java_frame()) { 928 return JVMTI_ERROR_INTERNAL; 929 } 930 #endif 931 932 HandleMark hm(current_thread); 933 javaVFrame *jvf = javaVFrame::cast(vf); 934 Method* method = jvf->method(); 935 if (method->is_native()) { 936 *location_ptr = -1; 937 } else { 938 *location_ptr = jvf->bci(); 939 } 940 *method_ptr = method->jmethod_id(); 941 942 return JVMTI_ERROR_NONE; 943 } 944 945 946 jvmtiError 947 JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject object, jvmtiMonitorUsage* info_ptr) { 948 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 949 Thread* current_thread = VMThread::vm_thread(); 950 assert(current_thread == Thread::current(), "must be"); 951 952 HandleMark hm(current_thread); 953 Handle hobj; 954 955 // Check arguments 956 { 957 oop mirror = JNIHandles::resolve_external_guard(object); 958 NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT); 959 NULL_CHECK(info_ptr, JVMTI_ERROR_NULL_POINTER); 960 961 hobj = Handle(current_thread, mirror); 962 } 963 964 ThreadsListHandle tlh(current_thread); 965 JavaThread *owning_thread = NULL; 966 ObjectMonitor *mon = NULL; 967 jvmtiMonitorUsage ret = { 968 NULL, 0, 0, NULL, 0, NULL 969 }; 970 971 uint32_t debug_bits = 0; 972 // first derive the object's owner and entry_count (if any) 973 { 974 // Revoke any biases before querying the mark word 975 BiasedLocking::revoke_at_safepoint(hobj); 976 977 address owner = NULL; 978 { 979 markWord mark = hobj()->mark(); 980 981 if (!mark.has_monitor()) { 982 // this object has a lightweight monitor 983 984 if (mark.has_locker()) { 985 owner = (address)mark.locker(); // save the address of the Lock word 986 } 987 // implied else: no owner 988 } else { 989 // this object has a heavyweight monitor 990 mon = mark.monitor(); 991 992 // The owner field of a heavyweight monitor may be NULL for no 993 // owner, a JavaThread * or it may still be the address of the 994 // Lock word in a JavaThread's stack. A monitor can be inflated 995 // by a non-owning JavaThread, but only the owning JavaThread 996 // can change the owner field from the Lock word to the 997 // JavaThread * and it may not have done that yet. 998 owner = (address)mon->owner(); 999 } 1000 } 1001 1002 if (owner != NULL) { 1003 // This monitor is owned so we have to find the owning JavaThread. 1004 owning_thread = Threads::owning_thread_from_monitor_owner(tlh.list(), owner); 1005 assert(owning_thread != NULL, "owning JavaThread must not be NULL"); 1006 Handle th(current_thread, owning_thread->threadObj()); 1007 ret.owner = (jthread)jni_reference(calling_thread, th); 1008 } 1009 1010 if (owning_thread != NULL) { // monitor is owned 1011 // The recursions field of a monitor does not reflect recursions 1012 // as lightweight locks before inflating the monitor are not included. 1013 // We have to count the number of recursive monitor entries the hard way. 1014 // We pass a handle to survive any GCs along the way. 1015 ret.entry_count = count_locked_objects(owning_thread, hobj); 1016 } 1017 // implied else: entry_count == 0 1018 } 1019 1020 jint nWant = 0, nWait = 0; 1021 if (mon != NULL) { 1022 // this object has a heavyweight monitor 1023 nWant = mon->contentions(); // # of threads contending for monitor 1024 nWait = mon->waiters(); // # of threads in Object.wait() 1025 ret.waiter_count = nWant + nWait; 1026 ret.notify_waiter_count = nWait; 1027 } else { 1028 // this object has a lightweight monitor 1029 ret.waiter_count = 0; 1030 ret.notify_waiter_count = 0; 1031 } 1032 1033 // Allocate memory for heavyweight and lightweight monitor. 1034 jvmtiError err; 1035 err = allocate(ret.waiter_count * sizeof(jthread *), (unsigned char**)&ret.waiters); 1036 if (err != JVMTI_ERROR_NONE) { 1037 return err; 1038 } 1039 err = allocate(ret.notify_waiter_count * sizeof(jthread *), 1040 (unsigned char**)&ret.notify_waiters); 1041 if (err != JVMTI_ERROR_NONE) { 1042 deallocate((unsigned char*)ret.waiters); 1043 return err; 1044 } 1045 1046 // now derive the rest of the fields 1047 if (mon != NULL) { 1048 // this object has a heavyweight monitor 1049 1050 // Number of waiters may actually be less than the waiter count. 1051 // So NULL out memory so that unused memory will be NULL. 1052 memset(ret.waiters, 0, ret.waiter_count * sizeof(jthread *)); 1053 memset(ret.notify_waiters, 0, ret.notify_waiter_count * sizeof(jthread *)); 1054 1055 if (ret.waiter_count > 0) { 1056 // we have contending and/or waiting threads 1057 if (nWant > 0) { 1058 // we have contending threads 1059 ResourceMark rm(current_thread); 1060 // get_pending_threads returns only java thread so we do not need to 1061 // check for non java threads. 1062 GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads(tlh.list(), nWant, (address)mon); 1063 if (wantList->length() < nWant) { 1064 // robustness: the pending list has gotten smaller 1065 nWant = wantList->length(); 1066 } 1067 for (int i = 0; i < nWant; i++) { 1068 JavaThread *pending_thread = wantList->at(i); 1069 Handle th(current_thread, pending_thread->threadObj()); 1070 ret.waiters[i] = (jthread)jni_reference(calling_thread, th); 1071 } 1072 } 1073 if (nWait > 0) { 1074 // we have threads in Object.wait() 1075 int offset = nWant; // add after any contending threads 1076 ObjectWaiter *waiter = mon->first_waiter(); 1077 for (int i = 0, j = 0; i < nWait; i++) { 1078 if (waiter == NULL) { 1079 // robustness: the waiting list has gotten smaller 1080 nWait = j; 1081 break; 1082 } 1083 JavaThread *w = mon->thread_of_waiter(waiter); 1084 if (w != NULL) { 1085 // If the thread was found on the ObjectWaiter list, then 1086 // it has not been notified. This thread can't change the 1087 // state of the monitor so it doesn't need to be suspended. 1088 Handle th(current_thread, w->threadObj()); 1089 ret.waiters[offset + j] = (jthread)jni_reference(calling_thread, th); 1090 ret.notify_waiters[j++] = (jthread)jni_reference(calling_thread, th); 1091 } 1092 waiter = mon->next_waiter(waiter); 1093 } 1094 } 1095 } // ThreadsListHandle is destroyed here. 1096 1097 // Adjust count. nWant and nWait count values may be less than original. 1098 ret.waiter_count = nWant + nWait; 1099 ret.notify_waiter_count = nWait; 1100 } else { 1101 // this object has a lightweight monitor and we have nothing more 1102 // to do here because the defaults are just fine. 1103 } 1104 1105 // we don't update return parameter unless everything worked 1106 *info_ptr = ret; 1107 1108 return JVMTI_ERROR_NONE; 1109 } 1110 1111 ResourceTracker::ResourceTracker(JvmtiEnv* env) { 1112 _env = env; 1113 _allocations = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<unsigned char*>(20, mtServiceability); 1114 _failed = false; 1115 } 1116 ResourceTracker::~ResourceTracker() { 1117 if (_failed) { 1118 for (int i=0; i<_allocations->length(); i++) { 1119 _env->deallocate(_allocations->at(i)); 1120 } 1121 } 1122 delete _allocations; 1123 } 1124 1125 jvmtiError ResourceTracker::allocate(jlong size, unsigned char** mem_ptr) { 1126 unsigned char *ptr; 1127 jvmtiError err = _env->allocate(size, &ptr); 1128 if (err == JVMTI_ERROR_NONE) { 1129 _allocations->append(ptr); 1130 *mem_ptr = ptr; 1131 } else { 1132 *mem_ptr = NULL; 1133 _failed = true; 1134 } 1135 return err; 1136 } 1137 1138 unsigned char* ResourceTracker::allocate(jlong size) { 1139 unsigned char* ptr; 1140 allocate(size, &ptr); 1141 return ptr; 1142 } 1143 1144 char* ResourceTracker::strdup(const char* str) { 1145 char *dup_str = (char*)allocate(strlen(str)+1); 1146 if (dup_str != NULL) { 1147 strcpy(dup_str, str); 1148 } 1149 return dup_str; 1150 } 1151 1152 struct StackInfoNode { 1153 struct StackInfoNode *next; 1154 jvmtiStackInfo info; 1155 }; 1156 1157 // Create a jvmtiStackInfo inside a linked list node and create a 1158 // buffer for the frame information, both allocated as resource objects. 1159 // Fill in both the jvmtiStackInfo and the jvmtiFrameInfo. 1160 // Note that either or both of thr and thread_oop 1161 // may be null if the thread is new or has exited. 1162 void 1163 MultipleStackTracesCollector::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) { 1164 #ifdef ASSERT 1165 Thread *current_thread = Thread::current(); 1166 assert(SafepointSynchronize::is_at_safepoint() || 1167 thr->is_handshake_safe_for(current_thread), 1168 "call by myself / at safepoint / at handshake"); 1169 #endif 1170 1171 jint state = 0; 1172 struct StackInfoNode *node = NEW_RESOURCE_OBJ(struct StackInfoNode); 1173 jvmtiStackInfo *infop = &(node->info); 1174 node->next = head(); 1175 set_head(node); 1176 infop->frame_count = 0; 1177 infop->thread = jt; 1178 1179 if (thread_oop != NULL) { 1180 // get most state bits 1181 state = (jint)java_lang_Thread::get_thread_status(thread_oop); 1182 } 1183 1184 if (thr != NULL) { // add more state bits if there is a JavaThead to query 1185 if (thr->is_suspended()) { 1186 state |= JVMTI_THREAD_STATE_SUSPENDED; 1187 } 1188 JavaThreadState jts = thr->thread_state(); 1189 if (jts == _thread_in_native) { 1190 state |= JVMTI_THREAD_STATE_IN_NATIVE; 1191 } 1192 if (thr->is_interrupted(false)) { 1193 state |= JVMTI_THREAD_STATE_INTERRUPTED; 1194 } 1195 } 1196 infop->state = state; 1197 1198 if (thr != NULL && (state & JVMTI_THREAD_STATE_ALIVE) != 0) { 1199 infop->frame_buffer = NEW_RESOURCE_ARRAY(jvmtiFrameInfo, max_frame_count()); 1200 env()->get_stack_trace(thr, 0, max_frame_count(), 1201 infop->frame_buffer, &(infop->frame_count)); 1202 } else { 1203 infop->frame_buffer = NULL; 1204 infop->frame_count = 0; 1205 } 1206 _frame_count_total += infop->frame_count; 1207 } 1208 1209 // Based on the stack information in the linked list, allocate memory 1210 // block to return and fill it from the info in the linked list. 1211 void 1212 MultipleStackTracesCollector::allocate_and_fill_stacks(jint thread_count) { 1213 // do I need to worry about alignment issues? 1214 jlong alloc_size = thread_count * sizeof(jvmtiStackInfo) 1215 + _frame_count_total * sizeof(jvmtiFrameInfo); 1216 env()->allocate(alloc_size, (unsigned char **)&_stack_info); 1217 1218 // pointers to move through the newly allocated space as it is filled in 1219 jvmtiStackInfo *si = _stack_info + thread_count; // bottom of stack info 1220 jvmtiFrameInfo *fi = (jvmtiFrameInfo *)si; // is the top of frame info 1221 1222 // copy information in resource area into allocated buffer 1223 // insert stack info backwards since linked list is backwards 1224 // insert frame info forwards 1225 // walk the StackInfoNodes 1226 for (struct StackInfoNode *sin = head(); sin != NULL; sin = sin->next) { 1227 jint frame_count = sin->info.frame_count; 1228 size_t frames_size = frame_count * sizeof(jvmtiFrameInfo); 1229 --si; 1230 memcpy(si, &(sin->info), sizeof(jvmtiStackInfo)); 1231 if (frames_size == 0) { 1232 si->frame_buffer = NULL; 1233 } else { 1234 memcpy(fi, sin->info.frame_buffer, frames_size); 1235 si->frame_buffer = fi; // point to the new allocated copy of the frames 1236 fi += frame_count; 1237 } 1238 } 1239 assert(si == _stack_info, "the last copied stack info must be the first record"); 1240 assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size, 1241 "the last copied frame info must be the last record"); 1242 } 1243 1244 1245 void 1246 VM_GetThreadListStackTraces::doit() { 1247 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1248 1249 ResourceMark rm; 1250 ThreadsListHandle tlh; 1251 for (int i = 0; i < _thread_count; ++i) { 1252 jthread jt = _thread_list[i]; 1253 JavaThread* java_thread = NULL; 1254 oop thread_oop = NULL; 1255 jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), jt, &java_thread, &thread_oop); 1256 if (err != JVMTI_ERROR_NONE) { 1257 // We got an error code so we don't have a JavaThread *, but 1258 // only return an error from here if we didn't get a valid 1259 // thread_oop. 1260 if (thread_oop == NULL) { 1261 _collector.set_result(err); 1262 return; 1263 } 1264 // We have a valid thread_oop. 1265 } 1266 _collector.fill_frames(jt, java_thread, thread_oop); 1267 } 1268 _collector.allocate_and_fill_stacks(_thread_count); 1269 } 1270 1271 void 1272 GetSingleStackTraceClosure::do_thread(Thread *target) { 1273 JavaThread *jt = target->as_Java_thread(); 1274 oop thread_oop = jt->threadObj(); 1275 1276 if (!jt->is_exiting() && thread_oop != NULL) { 1277 ResourceMark rm; 1278 _collector.fill_frames(_jthread, jt, thread_oop); 1279 _collector.allocate_and_fill_stacks(1); 1280 } 1281 } 1282 1283 void 1284 VM_GetAllStackTraces::doit() { 1285 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1286 1287 ResourceMark rm; 1288 _final_thread_count = 0; 1289 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1290 oop thread_oop = jt->threadObj(); 1291 if (thread_oop != NULL && 1292 !jt->is_exiting() && 1293 java_lang_Thread::is_alive(thread_oop) && 1294 !jt->is_hidden_from_external_view()) { 1295 ++_final_thread_count; 1296 // Handle block of the calling thread is used to create local refs. 1297 _collector.fill_frames((jthread)JNIHandles::make_local(_calling_thread, thread_oop), 1298 jt, thread_oop); 1299 } 1300 } 1301 _collector.allocate_and_fill_stacks(_final_thread_count); 1302 } 1303 1304 // Verifies that the top frame is a java frame in an expected state. 1305 // Deoptimizes frame if needed. 1306 // Checks that the frame method signature matches the return type (tos). 1307 // HandleMark must be defined in the caller only. 1308 // It is to keep a ret_ob_h handle alive after return to the caller. 1309 jvmtiError 1310 JvmtiEnvBase::check_top_frame(Thread* current_thread, JavaThread* java_thread, 1311 jvalue value, TosState tos, Handle* ret_ob_h) { 1312 ResourceMark rm(current_thread); 1313 1314 vframe *vf = vframeForNoProcess(java_thread, 0); 1315 NULL_CHECK(vf, JVMTI_ERROR_NO_MORE_FRAMES); 1316 1317 javaVFrame *jvf = (javaVFrame*) vf; 1318 if (!vf->is_java_frame() || jvf->method()->is_native()) { 1319 return JVMTI_ERROR_OPAQUE_FRAME; 1320 } 1321 1322 // If the frame is a compiled one, need to deoptimize it. 1323 if (vf->is_compiled_frame()) { 1324 if (!vf->fr().can_be_deoptimized()) { 1325 return JVMTI_ERROR_OPAQUE_FRAME; 1326 } 1327 Deoptimization::deoptimize_frame(java_thread, jvf->fr().id()); 1328 } 1329 1330 // Get information about method return type 1331 Symbol* signature = jvf->method()->signature(); 1332 1333 ResultTypeFinder rtf(signature); 1334 TosState fr_tos = as_TosState(rtf.type()); 1335 if (fr_tos != tos) { 1336 if (tos != itos || (fr_tos != btos && fr_tos != ztos && fr_tos != ctos && fr_tos != stos)) { 1337 return JVMTI_ERROR_TYPE_MISMATCH; 1338 } 1339 } 1340 1341 // Check that the jobject class matches the return type signature. 1342 jobject jobj = value.l; 1343 if (tos == atos && jobj != NULL) { // NULL reference is allowed 1344 Handle ob_h(current_thread, JNIHandles::resolve_external_guard(jobj)); 1345 NULL_CHECK(ob_h, JVMTI_ERROR_INVALID_OBJECT); 1346 Klass* ob_k = ob_h()->klass(); 1347 NULL_CHECK(ob_k, JVMTI_ERROR_INVALID_OBJECT); 1348 1349 // Method return type signature. 1350 char* ty_sign = 1 + strchr(signature->as_C_string(), JVM_SIGNATURE_ENDFUNC); 1351 1352 if (!VM_GetOrSetLocal::is_assignable(ty_sign, ob_k, current_thread)) { 1353 return JVMTI_ERROR_TYPE_MISMATCH; 1354 } 1355 *ret_ob_h = ob_h; 1356 } 1357 return JVMTI_ERROR_NONE; 1358 } /* end check_top_frame */ 1359 1360 1361 // ForceEarlyReturn<type> follows the PopFrame approach in many aspects. 1362 // Main difference is on the last stage in the interpreter. 1363 // The PopFrame stops method execution to continue execution 1364 // from the same method call instruction. 1365 // The ForceEarlyReturn forces return from method so the execution 1366 // continues at the bytecode following the method call. 1367 1368 // java_thread - protected by ThreadsListHandle and pre-checked 1369 1370 jvmtiError 1371 JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState tos) { 1372 // retrieve or create the state 1373 JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); 1374 if (state == NULL) { 1375 return JVMTI_ERROR_THREAD_NOT_ALIVE; 1376 } 1377 1378 // Eagerly reallocate scalar replaced objects. 1379 JavaThread* current_thread = JavaThread::current(); 1380 EscapeBarrier eb(true, current_thread, java_thread); 1381 if (!eb.deoptimize_objects(0)) { 1382 // Reallocation of scalar replaced objects failed -> return with error 1383 return JVMTI_ERROR_OUT_OF_MEMORY; 1384 } 1385 1386 SetForceEarlyReturn op(state, value, tos); 1387 if (java_thread == current_thread) { 1388 op.doit(java_thread, true /* self */); 1389 } else { 1390 Handshake::execute(&op, java_thread); 1391 } 1392 return op.result(); 1393 } 1394 1395 void 1396 SetForceEarlyReturn::doit(Thread *target, bool self) { 1397 JavaThread* java_thread = target->as_Java_thread(); 1398 Thread* current_thread = Thread::current(); 1399 HandleMark hm(current_thread); 1400 1401 if (java_thread->is_exiting()) { 1402 return; /* JVMTI_ERROR_THREAD_NOT_ALIVE (default) */ 1403 } 1404 if (!self) { 1405 if (!java_thread->is_suspended()) { 1406 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1407 return; 1408 } 1409 } 1410 1411 // Check to see if a ForceEarlyReturn was already in progress 1412 if (_state->is_earlyret_pending()) { 1413 // Probably possible for JVMTI clients to trigger this, but the 1414 // JPDA backend shouldn't allow this to happen 1415 _result = JVMTI_ERROR_INTERNAL; 1416 return; 1417 } 1418 { 1419 // The same as for PopFrame. Workaround bug: 1420 // 4812902: popFrame hangs if the method is waiting at a synchronize 1421 // Catch this condition and return an error to avoid hanging. 1422 // Now JVMTI spec allows an implementation to bail out with an opaque 1423 // frame error. 1424 OSThread* osThread = java_thread->osthread(); 1425 if (osThread->get_state() == MONITOR_WAIT) { 1426 _result = JVMTI_ERROR_OPAQUE_FRAME; 1427 return; 1428 } 1429 } 1430 1431 Handle ret_ob_h; 1432 _result = JvmtiEnvBase::check_top_frame(current_thread, java_thread, _value, _tos, &ret_ob_h); 1433 if (_result != JVMTI_ERROR_NONE) { 1434 return; 1435 } 1436 assert(_tos != atos || _value.l == NULL || ret_ob_h() != NULL, 1437 "return object oop must not be NULL if jobject is not NULL"); 1438 1439 // Update the thread state to reflect that the top frame must be 1440 // forced to return. 1441 // The current frame will be returned later when the suspended 1442 // thread is resumed and right before returning from VM to Java. 1443 // (see call_VM_base() in assembler_<cpu>.cpp). 1444 1445 _state->set_earlyret_pending(); 1446 _state->set_earlyret_oop(ret_ob_h()); 1447 _state->set_earlyret_value(_value, _tos); 1448 1449 // Set pending step flag for this early return. 1450 // It is cleared when next step event is posted. 1451 _state->set_pending_step_for_earlyret(); 1452 } 1453 1454 void 1455 JvmtiMonitorClosure::do_monitor(ObjectMonitor* mon) { 1456 if ( _error != JVMTI_ERROR_NONE) { 1457 // Error occurred in previous iteration so no need to add 1458 // to the list. 1459 return; 1460 } 1461 // Filter out on stack monitors collected during stack walk. 1462 oop obj = mon->object(); 1463 bool found = false; 1464 for (int j = 0; j < _owned_monitors_list->length(); j++) { 1465 jobject jobj = ((jvmtiMonitorStackDepthInfo*)_owned_monitors_list->at(j))->monitor; 1466 oop check = JNIHandles::resolve(jobj); 1467 if (check == obj) { 1468 // On stack monitor already collected during the stack walk. 1469 found = true; 1470 break; 1471 } 1472 } 1473 if (found == false) { 1474 // This is off stack monitor (e.g. acquired via jni MonitorEnter). 1475 jvmtiError err; 1476 jvmtiMonitorStackDepthInfo *jmsdi; 1477 err = _env->allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); 1478 if (err != JVMTI_ERROR_NONE) { 1479 _error = err; 1480 return; 1481 } 1482 Handle hobj(Thread::current(), obj); 1483 jmsdi->monitor = _env->jni_reference(_calling_thread, hobj); 1484 // stack depth is unknown for this monitor. 1485 jmsdi->stack_depth = -1; 1486 _owned_monitors_list->append(jmsdi); 1487 } 1488 } 1489 1490 GrowableArray<OopHandle>* JvmtiModuleClosure::_tbl = NULL; 1491 1492 void JvmtiModuleClosure::do_module(ModuleEntry* entry) { 1493 assert_locked_or_safepoint(Module_lock); 1494 OopHandle module = entry->module_handle(); 1495 guarantee(module.resolve() != NULL, "module object is NULL"); 1496 _tbl->push(module); 1497 } 1498 1499 jvmtiError 1500 JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobject** modules_ptr) { 1501 ResourceMark rm; 1502 MutexLocker mcld(ClassLoaderDataGraph_lock); 1503 MutexLocker ml(Module_lock); 1504 1505 _tbl = new GrowableArray<OopHandle>(77); 1506 if (_tbl == NULL) { 1507 return JVMTI_ERROR_OUT_OF_MEMORY; 1508 } 1509 1510 // Iterate over all the modules loaded to the system. 1511 ClassLoaderDataGraph::modules_do(&do_module); 1512 1513 jint len = _tbl->length(); 1514 guarantee(len > 0, "at least one module must be present"); 1515 1516 jobject* array = (jobject*)env->jvmtiMalloc((jlong)(len * sizeof(jobject))); 1517 if (array == NULL) { 1518 return JVMTI_ERROR_OUT_OF_MEMORY; 1519 } 1520 for (jint idx = 0; idx < len; idx++) { 1521 array[idx] = JNIHandles::make_local(Thread::current(), _tbl->at(idx).resolve()); 1522 } 1523 _tbl = NULL; 1524 *modules_ptr = array; 1525 *module_count_ptr = len; 1526 return JVMTI_ERROR_NONE; 1527 } 1528 1529 void 1530 UpdateForPopTopFrameClosure::doit(Thread *target, bool self) { 1531 Thread* current_thread = Thread::current(); 1532 HandleMark hm(current_thread); 1533 JavaThread* java_thread = target->as_Java_thread(); 1534 1535 if (java_thread->is_exiting()) { 1536 return; /* JVMTI_ERROR_THREAD_NOT_ALIVE (default) */ 1537 } 1538 assert(java_thread == _state->get_thread(), "Must be"); 1539 1540 if (!self && !java_thread->is_suspended()) { 1541 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1542 return; 1543 } 1544 1545 // Check to see if a PopFrame was already in progress 1546 if (java_thread->popframe_condition() != JavaThread::popframe_inactive) { 1547 // Probably possible for JVMTI clients to trigger this, but the 1548 // JPDA backend shouldn't allow this to happen 1549 _result = JVMTI_ERROR_INTERNAL; 1550 return; 1551 } 1552 1553 // Was workaround bug 1554 // 4812902: popFrame hangs if the method is waiting at a synchronize 1555 // Catch this condition and return an error to avoid hanging. 1556 // Now JVMTI spec allows an implementation to bail out with an opaque frame error. 1557 OSThread* osThread = java_thread->osthread(); 1558 if (osThread->get_state() == MONITOR_WAIT) { 1559 _result = JVMTI_ERROR_OPAQUE_FRAME; 1560 return; 1561 } 1562 1563 ResourceMark rm(current_thread); 1564 // Check if there is more than one Java frame in this thread, that the top two frames 1565 // are Java (not native) frames, and that there is no intervening VM frame 1566 int frame_count = 0; 1567 bool is_interpreted[2]; 1568 intptr_t *frame_sp[2]; 1569 // The 2-nd arg of constructor is needed to stop iterating at java entry frame. 1570 for (vframeStream vfs(java_thread, true, false /* process_frames */); !vfs.at_end(); vfs.next()) { 1571 methodHandle mh(current_thread, vfs.method()); 1572 if (mh->is_native()) { 1573 _result = JVMTI_ERROR_OPAQUE_FRAME; 1574 return; 1575 } 1576 is_interpreted[frame_count] = vfs.is_interpreted_frame(); 1577 frame_sp[frame_count] = vfs.frame_id(); 1578 if (++frame_count > 1) break; 1579 } 1580 if (frame_count < 2) { 1581 // We haven't found two adjacent non-native Java frames on the top. 1582 // There can be two situations here: 1583 // 1. There are no more java frames 1584 // 2. Two top java frames are separated by non-java native frames 1585 if(JvmtiEnvBase::vframeForNoProcess(java_thread, 1) == NULL) { 1586 _result = JVMTI_ERROR_NO_MORE_FRAMES; 1587 return; 1588 } else { 1589 // Intervening non-java native or VM frames separate java frames. 1590 // Current implementation does not support this. See bug #5031735. 1591 // In theory it is possible to pop frames in such cases. 1592 _result = JVMTI_ERROR_OPAQUE_FRAME; 1593 return; 1594 } 1595 } 1596 1597 // If any of the top 2 frames is a compiled one, need to deoptimize it 1598 for (int i = 0; i < 2; i++) { 1599 if (!is_interpreted[i]) { 1600 Deoptimization::deoptimize_frame(java_thread, frame_sp[i]); 1601 } 1602 } 1603 1604 // Update the thread state to reflect that the top frame is popped 1605 // so that cur_stack_depth is maintained properly and all frameIDs 1606 // are invalidated. 1607 // The current frame will be popped later when the suspended thread 1608 // is resumed and right before returning from VM to Java. 1609 // (see call_VM_base() in assembler_<cpu>.cpp). 1610 1611 // It's fine to update the thread state here because no JVMTI events 1612 // shall be posted for this PopFrame. 1613 1614 _state->update_for_pop_top_frame(); 1615 java_thread->set_popframe_condition(JavaThread::popframe_pending_bit); 1616 // Set pending step flag for this popframe and it is cleared when next 1617 // step event is posted. 1618 _state->set_pending_step_for_popframe(); 1619 _result = JVMTI_ERROR_NONE; 1620 } 1621 1622 void 1623 SetFramePopClosure::doit(Thread *target, bool self) { 1624 ResourceMark rm; 1625 JavaThread* java_thread = target->as_Java_thread(); 1626 1627 if (java_thread->is_exiting()) { 1628 return; /* JVMTI_ERROR_THREAD_NOT_ALIVE (default) */ 1629 } 1630 assert(_state->get_thread() == java_thread, "Must be"); 1631 1632 if (!self && !java_thread->is_suspended()) { 1633 _result = JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1634 return; 1635 } 1636 1637 vframe *vf = JvmtiEnvBase::vframeForNoProcess(java_thread, _depth); 1638 if (vf == NULL) { 1639 _result = JVMTI_ERROR_NO_MORE_FRAMES; 1640 return; 1641 } 1642 1643 if (!vf->is_java_frame() || ((javaVFrame*) vf)->method()->is_native()) { 1644 _result = JVMTI_ERROR_OPAQUE_FRAME; 1645 return; 1646 } 1647 1648 assert(vf->frame_pointer() != NULL, "frame pointer mustn't be NULL"); 1649 int frame_number = _state->count_frames() - _depth; 1650 _state->env_thread_state((JvmtiEnvBase*)_env)->set_frame_pop(frame_number); 1651 _result = JVMTI_ERROR_NONE; 1652 } 1653 1654 void 1655 GetOwnedMonitorInfoClosure::do_thread(Thread *target) { 1656 JavaThread *jt = target->as_Java_thread(); 1657 if (!jt->is_exiting() && (jt->threadObj() != NULL)) { 1658 _result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, 1659 jt, 1660 _owned_monitors_list); 1661 } 1662 } 1663 1664 void 1665 GetCurrentContendedMonitorClosure::do_thread(Thread *target) { 1666 JavaThread *jt = target->as_Java_thread(); 1667 if (!jt->is_exiting() && (jt->threadObj() != NULL)) { 1668 _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread, 1669 jt, 1670 _owned_monitor_ptr); 1671 } 1672 } 1673 1674 void 1675 GetStackTraceClosure::do_thread(Thread *target) { 1676 JavaThread *jt = target->as_Java_thread(); 1677 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1678 _result = ((JvmtiEnvBase *)_env)->get_stack_trace(jt, 1679 _start_depth, _max_count, 1680 _frame_buffer, _count_ptr); 1681 } 1682 } 1683 1684 void 1685 GetFrameCountClosure::do_thread(Thread *target) { 1686 JavaThread* jt = _state->get_thread(); 1687 assert(target == jt, "just checking"); 1688 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1689 _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr); 1690 } 1691 } 1692 1693 void 1694 GetFrameLocationClosure::do_thread(Thread *target) { 1695 JavaThread *jt = target->as_Java_thread(); 1696 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1697 _result = ((JvmtiEnvBase*)_env)->get_frame_location(jt, _depth, 1698 _method_ptr, _location_ptr); 1699 } 1700 }