< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

 455 
 456 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
 457   // Note: This is called when we have unwound the frame of the callee that did
 458   // throw an exception. So far, no check has been performed by the StackWatermarkSet.
 459   // Notably, the stack is not walkable at this point, and hence the check must
 460   // be deferred until later. Specifically, any of the handlers returned here in
 461   // this function, will get dispatched to, and call deferred checks to
 462   // StackWatermarkSet::after_unwind at a point where the stack is walkable.
 463   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 464   assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 465 
 466   // Reset method handle flag.
 467   current->set_is_method_handle_return(false);
 468 
 469 #if INCLUDE_JVMCI
 470   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 471   // and other exception handler continuations do not read it
 472   current->set_exception_pc(NULL);
 473 #endif // INCLUDE_JVMCI
 474 




 475   // The fastest case first
 476   CodeBlob* blob = CodeCache::find_blob(return_address);
 477   CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL;
 478   if (nm != NULL) {
 479     // Set flag if return address is a method handle call site.
 480     current->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 481     // native nmethods don't have exception handlers
 482     assert(!nm->is_native_method(), "no exception handler");
 483     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 484     if (nm->is_deopt_pc(return_address)) {
 485       // If we come here because of a stack overflow, the stack may be
 486       // unguarded. Reguard the stack otherwise if we return to the
 487       // deopt blob and the stack bang causes a stack overflow we
 488       // crash.
 489       StackOverflow* overflow_state = current->stack_overflow_state();
 490       bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
 491       if (overflow_state->reserved_stack_activation() != current->stack_base()) {
 492         overflow_state->set_reserved_stack_activation(current->stack_base());
 493       }
 494       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 495       // The deferred StackWatermarkSet::after_unwind check will be performed in
 496       // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
 497       return SharedRuntime::deopt_blob()->unpack_with_exception();
 498     } else {
 499       // The deferred StackWatermarkSet::after_unwind check will be performed in
 500       // * OptoRuntime::rethrow_C for C2 code
 501       // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
 502       return nm->exception_begin();

 508     // The deferred StackWatermarkSet::after_unwind check will be performed in
 509     // JavaCallWrapper::~JavaCallWrapper
 510     return StubRoutines::catch_exception_entry();
 511   }
 512   if (blob != NULL && blob->is_optimized_entry_blob()) {
 513     return ((OptimizedEntryBlob*)blob)->exception_handler();
 514   }
 515   // Interpreted code
 516   if (Interpreter::contains(return_address)) {
 517     // The deferred StackWatermarkSet::after_unwind check will be performed in
 518     // InterpreterRuntime::exception_handler_for_exception
 519     return Interpreter::rethrow_exception_entry();
 520   }
 521 
 522   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 523   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 524 
 525 #ifndef PRODUCT
 526   { ResourceMark rm;
 527     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));

 528     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 529     tty->print_cr("b) other problem");
 530   }
 531 #endif // PRODUCT
 532 
 533   ShouldNotReachHere();
 534   return NULL;
 535 }
 536 
 537 
 538 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
 539   return raw_exception_handler_for_return_address(current, return_address);
 540 JRT_END
 541 
 542 
 543 address SharedRuntime::get_poll_stub(address pc) {
 544   address stub;
 545   // Look up the code blob
 546   CodeBlob *cb = CodeCache::find_blob(pc);
 547 
 548   // Should be an nmethod
 549   guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
 550 
 551   // Look up the relocation information
 552   assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
 553     "safepoint polling: type must be poll");
 554 
 555 #ifdef ASSERT
 556   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 557     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 558     Disassembler::decode(cb);
 559     fatal("Only polling locations are used for safepoint");
 560   }
 561 #endif
 562 
 563   bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
 564   bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
 565   if (at_poll_return) {
 566     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 567            "polling page return stub not created yet");
 568     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 569   } else if (has_wide_vectors) {
 570     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL,
 571            "polling page vectors safepoint stub not created yet");
 572     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 573   } else {
 574     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
 575            "polling page safepoint stub not created yet");
 576     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
 577   }
 578   log_debug(safepoint)("... found polling page %s exception at pc = "
 579                        INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
 580                        at_poll_return ? "return" : "loop",
 581                        (intptr_t)pc, (intptr_t)stub);
 582   return stub;
 583 }
 584 
 585 
 586 oop SharedRuntime::retrieve_receiver( Symbol* sig, frame caller ) {
 587   assert(caller.is_interpreted_frame(), "");
 588   int args_size = ArgumentSizeComputer(sig).size() + 1;
 589   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
 590   oop result = cast_to_oop(*caller.interpreter_frame_tos_at(args_size - 1));


 591   assert(Universe::heap()->is_in(result) && oopDesc::is_oop(result), "receiver must be an oop");
 592   return result;
 593 }
 594 
 595 
 596 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
 597   if (JvmtiExport::can_post_on_exceptions()) {
 598     vframeStream vfst(current, true);
 599     methodHandle method = methodHandle(current, vfst.method());
 600     address bcp = method()->bcp_from(vfst.bci());
 601     JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
 602   }
 603 
 604 #if INCLUDE_JVMCI
 605   if (EnableJVMCI && UseJVMCICompiler) {
 606     vframeStream vfst(current, true);
 607     methodHandle method = methodHandle(current, vfst.method());
 608     int bci = vfst.bci();
 609     MethodData* trap_mdo = method->method_data();
 610     if (trap_mdo != NULL) {

 722   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 723   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 724     // Allow abbreviated catch tables.  The idea is to allow a method
 725     // to materialize its exceptions without committing to the exact
 726     // routing of exceptions.  In particular this is needed for adding
 727     // a synthetic handler to unlock monitors when inlining
 728     // synchronized methods since the unlock path isn't represented in
 729     // the bytecodes.
 730     t = table.entry_for(catch_pco, -1, 0);
 731   }
 732 
 733 #ifdef COMPILER1
 734   if (t == NULL && nm->is_compiled_by_c1()) {
 735     assert(nm->unwind_handler_begin() != NULL, "");
 736     return nm->unwind_handler_begin();
 737   }
 738 #endif
 739 
 740   if (t == NULL) {
 741     ttyLocker ttyl;
 742     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", p2i(ret_pc), handler_bci);
 743     tty->print_cr("   Exception:");
 744     exception->print();
 745     tty->cr();
 746     tty->print_cr(" Compiled exception table :");
 747     table.print();
 748     nm->print_code();

 749     guarantee(false, "missing exception handler");
 750     return NULL;
 751   }
 752 
 753   return nm->code_begin() + t->pco();
 754 }
 755 
 756 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
 757   // These errors occur only at call sites
 758   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
 759 JRT_END
 760 
 761 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
 762   // These errors occur only at call sites
 763   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 764 JRT_END
 765 
 766 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
 767   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 768 JRT_END

1069     return caller->attached_method_before_pc(pc);
1070   }
1071   return NULL;
1072 }
1073 
1074 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1075 // for a call current in progress, i.e., arguments has been pushed on stack
1076 // but callee has not been invoked yet.  Caller frame must be compiled.
1077 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1078                                               CallInfo& callinfo, TRAPS) {
1079   Handle receiver;
1080   Handle nullHandle;  // create a handy null handle for exception returns
1081   JavaThread* current = THREAD;
1082 
1083   assert(!vfst.at_end(), "Java frame must exist");
1084 
1085   // Find caller and bci from vframe
1086   methodHandle caller(current, vfst.method());
1087   int          bci   = vfst.bci();
1088 






1089   Bytecode_invoke bytecode(caller, bci);
1090   int bytecode_index = bytecode.index();
1091   bc = bytecode.invoke_code();
1092 
1093   methodHandle attached_method(current, extract_attached_method(vfst));
1094   if (attached_method.not_null()) {
1095     Method* callee = bytecode.static_target(CHECK_NH);
1096     vmIntrinsics::ID id = callee->intrinsic_id();
1097     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1098     // it attaches statically resolved method to the call site.
1099     if (MethodHandles::is_signature_polymorphic(id) &&
1100         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1101       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1102 
1103       // Adjust invocation mode according to the attached method.
1104       switch (bc) {
1105         case Bytecodes::_invokevirtual:
1106           if (attached_method->method_holder()->is_interface()) {
1107             bc = Bytecodes::_invokeinterface;
1108           }

1131                       bc != Bytecodes::_invokehandle;
1132 
1133   // Find receiver for non-static call
1134   if (has_receiver) {
1135     // This register map must be update since we need to find the receiver for
1136     // compiled frames. The receiver might be in a register.
1137     RegisterMap reg_map2(current);
1138     frame stubFrame   = current->last_frame();
1139     // Caller-frame is a compiled frame
1140     frame callerFrame = stubFrame.sender(&reg_map2);
1141 
1142     if (attached_method.is_null()) {
1143       Method* callee = bytecode.static_target(CHECK_NH);
1144       if (callee == NULL) {
1145         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1146       }
1147     }
1148 
1149     // Retrieve from a compiled argument list
1150     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));

1151 
1152     if (receiver.is_null()) {
1153       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1154     }
1155   }
1156 
1157   // Resolve method
1158   if (attached_method.not_null()) {
1159     // Parameterized by attached method.
1160     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1161   } else {
1162     // Parameterized by bytecode.
1163     constantPoolHandle constants(current, caller->constants());
1164     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1165   }
1166 
1167 #ifdef ASSERT
1168   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1169   if (has_receiver) {
1170     assert(receiver.not_null(), "should have thrown exception");

2109     // Only try quick_enter() if we're not trying to reach a safepoint
2110     // so that the calling thread reaches the safepoint more quickly.
2111     if (ObjectSynchronizer::quick_enter(obj, current, lock)) return;
2112   }
2113   // NO_ASYNC required because an async exception on the state transition destructor
2114   // would leave you with the lock held and it would never be released.
2115   // The normal monitorenter NullPointerException is thrown without acquiring a lock
2116   // and the model is that an exception implies the method failed.
2117   JRT_BLOCK_NO_ASYNC
2118   Handle h_obj(THREAD, obj);
2119   ObjectSynchronizer::enter(h_obj, lock, current);
2120   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2121   JRT_BLOCK_END
2122 }
2123 
2124 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2125 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2126   SharedRuntime::monitor_enter_helper(obj, lock, current);
2127 JRT_END
2128 





2129 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2130   assert(JavaThread::current() == current, "invariant");
2131   // Exit must be non-blocking, and therefore no exceptions can be thrown.
2132   ExceptionMark em(current);
2133   // The object could become unlocked through a JNI call, which we have no other checks for.
2134   // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2135   if (obj->is_unlocked()) {
2136     if (CheckJNICalls) {
2137       fatal("Object has been unlocked by JNI");
2138     }
2139     return;
2140   }
2141   ObjectSynchronizer::exit(obj, lock, current);
2142 }
2143 
2144 // Handles the uncommon cases of monitor unlocking in compiled code
2145 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2146   SharedRuntime::monitor_exit_helper(obj, lock, current);
2147 JRT_END
2148 

2902   }
2903 #endif
2904 
2905   new_adapter = AdapterBlob::create(&buffer);
2906   NOT_PRODUCT(int insts_size = buffer.insts_size());
2907   if (new_adapter == NULL) {
2908     // CodeCache is full, disable compilation
2909     // Ought to log this but compile log is only per compile thread
2910     // and we're some non descript Java thread.
2911     return NULL;
2912   }
2913   entry->relocate(new_adapter->content_begin());
2914 #ifndef PRODUCT
2915   // debugging suppport
2916   if (PrintAdapterHandlers || PrintStubCode) {
2917     ttyLocker ttyl;
2918     entry->print_adapter_on(tty);
2919     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2920                   _adapters->number_of_entries(), fingerprint->as_basic_args_string(),
2921                   fingerprint->as_string(), insts_size);
2922     tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
2923     if (Verbose || PrintStubCode) {
2924       address first_pc = entry->base_address();
2925       if (first_pc != NULL) {
2926         Disassembler::decode(first_pc, first_pc + insts_size, tty
2927                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2928         tty->cr();
2929       }
2930     }
2931   }
2932 #endif
2933 
2934   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2935   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2936   if (contains_all_checks || !VerifyAdapterCalls) {
2937     _adapters->add(entry);
2938   }
2939   return entry;
2940 }
2941 
2942 address AdapterHandlerEntry::base_address() {

2990     return false;
2991   }
2992 
2993   return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
2994 }
2995 #endif
2996 
2997 
2998 /**
2999  * Create a native wrapper for this native method.  The wrapper converts the
3000  * Java-compiled calling convention to the native convention, handles
3001  * arguments, and transitions to native.  On return from the native we transition
3002  * back to java blocking if a safepoint is in progress.
3003  */
3004 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3005   ResourceMark rm;
3006   nmethod* nm = NULL;
3007   address critical_entry = NULL;
3008 
3009   assert(method->is_native(), "must be native");
3010   assert(method->is_method_handle_intrinsic() ||
3011          method->has_native_function(), "must have something valid to call!");
3012 
3013   if (CriticalJNINatives && !method->is_method_handle_intrinsic()) {
3014     // We perform the I/O with transition to native before acquiring AdapterHandlerLibrary_lock.
3015     critical_entry = NativeLookup::lookup_critical_entry(method);
3016   }
3017 
3018   {
3019     // Perform the work while holding the lock, but perform any printing outside the lock
3020     MutexLocker mu(AdapterHandlerLibrary_lock);
3021     // See if somebody beat us to it
3022     if (method->code() != NULL) {
3023       return;
3024     }
3025 
3026     const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3027     assert(compile_id > 0, "Must generate native wrapper");
3028 
3029 
3030     ResourceMark rm;
3031     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
3032     if (buf != NULL) {
3033       CodeBuffer buffer(buf);





3034       struct { double data[20]; } locs_buf;

3035       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3036 #if defined(AARCH64)
3037       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3038       // in the constant pool to ensure ordering between the barrier and oops
3039       // accesses. For native_wrappers we need a constant.
3040       buffer.initialize_consts_size(8);
3041 #endif

3042       MacroAssembler _masm(&buffer);
3043 
3044       // Fill in the signature array, for the calling-convention call.
3045       const int total_args_passed = method->size_of_parameters();
3046 
3047       VMRegPair stack_regs[16];
3048       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3049 
3050       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3051                               method->is_static(), total_args_passed);
3052       BasicType* sig_bt = si.basic_types();
3053       assert(si.slots() == total_args_passed, "");
3054       BasicType ret_type = si.return_type();
3055 
3056       // Now get the compiled-Java arguments layout.
3057       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3058 
3059       // Generate the compiled-to-native wrapper code
3060       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type, critical_entry);
3061 

3218   for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3219        kptr2 < fr.interpreter_frame_monitor_begin();
3220        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3221     if (kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
3222       BasicLock *lock = kptr2->lock();
3223       // Inflate so the object's header no longer refers to the BasicLock.
3224       if (lock->displaced_header().is_unlocked()) {
3225         // The object is locked and the resulting ObjectMonitor* will also be
3226         // locked so it can't be async deflated until ownership is dropped.
3227         // See the big comment in basicLock.cpp: BasicLock::move_to().
3228         ObjectSynchronizer::inflate_helper(kptr2->obj());
3229       }
3230       // Now the displaced header is free to move because the
3231       // object's header no longer refers to it.
3232       buf[i++] = (intptr_t)lock->displaced_header().value();
3233       buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3234     }
3235   }
3236   assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3237 






3238   return buf;
3239 JRT_END
3240 
3241 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3242   FREE_C_HEAP_ARRAY(intptr_t, buf);
3243 JRT_END
3244 
3245 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3246   AdapterHandlerTableIterator iter(_adapters);
3247   while (iter.has_next()) {
3248     AdapterHandlerEntry* a = iter.next();
3249     if (b == CodeCache::find_blob(a->get_i2c_entry())) return true;
3250   }
3251   return false;
3252 }
3253 
3254 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3255   AdapterHandlerTableIterator iter(_adapters);
3256   while (iter.has_next()) {
3257     AdapterHandlerEntry* a = iter.next();

3286 void AdapterHandlerLibrary::print_statistics() {
3287   _adapters->print_statistics();
3288 }
3289 
3290 #endif /* PRODUCT */
3291 
3292 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3293   StackOverflow* overflow_state = current->stack_overflow_state();
3294   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3295   overflow_state->set_reserved_stack_activation(current->stack_base());
3296 JRT_END
3297 
3298 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3299   ResourceMark rm(current);
3300   frame activation;
3301   CompiledMethod* nm = NULL;
3302   int count = 1;
3303 
3304   assert(fr.is_java_frame(), "Must start on Java frame");
3305 
3306   while (true) {




3307     Method* method = NULL;
3308     bool found = false;
3309     if (fr.is_interpreted_frame()) {
3310       method = fr.interpreter_frame_method();
3311       if (method != NULL && method->has_reserved_stack_access()) {
3312         found = true;
3313       }
3314     } else {
3315       CodeBlob* cb = fr.cb();
3316       if (cb != NULL && cb->is_compiled()) {
3317         nm = cb->as_compiled_method();
3318         method = nm->method();
3319         // scope_desc_near() must be used, instead of scope_desc_at() because on
3320         // SPARC, the pcDesc can be on the delay slot after the call instruction.
3321         for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != NULL; sd = sd->sender()) {
3322           method = sd->method();
3323           if (method != NULL && method->has_reserved_stack_access()) {
3324             found = true;
3325       }
3326     }
3327       }
3328     }
3329     if (found) {
3330       activation = fr;
3331       warning("Potentially dangerous stack overflow in "
3332               "ReservedStackAccess annotated method %s [%d]",
3333               method->name_and_sig_as_C_string(), count++);
3334       EventReservedStackActivation event;
3335       if (event.should_commit()) {
3336         event.set_method(method);
3337         event.commit();
3338       }
3339     }
3340     if (fr.is_first_java_frame()) {
3341       break;
3342     } else {
3343       fr = fr.java_sender();
3344     }
3345   }
3346   return activation;
3347 }
3348 
3349 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3350   // After any safepoint, just before going back to compiled code,
3351   // we inform the GC that we will be doing initializing writes to
3352   // this object in the future without emitting card-marks, so
3353   // GC may take any compensating steps.
3354 
3355   oop new_obj = current->vm_result();
3356   if (new_obj == NULL) return;
3357 
3358   BarrierSet *bs = BarrierSet::barrier_set();
3359   bs->on_slowpath_allocation_exit(current, new_obj);
3360 }

 455 
 456 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
 457   // Note: This is called when we have unwound the frame of the callee that did
 458   // throw an exception. So far, no check has been performed by the StackWatermarkSet.
 459   // Notably, the stack is not walkable at this point, and hence the check must
 460   // be deferred until later. Specifically, any of the handlers returned here in
 461   // this function, will get dispatched to, and call deferred checks to
 462   // StackWatermarkSet::after_unwind at a point where the stack is walkable.
 463   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 464   assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 465 
 466   // Reset method handle flag.
 467   current->set_is_method_handle_return(false);
 468 
 469 #if INCLUDE_JVMCI
 470   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 471   // and other exception handler continuations do not read it
 472   current->set_exception_pc(NULL);
 473 #endif // INCLUDE_JVMCI
 474 
 475   if (Continuation::is_return_barrier_entry(return_address)) {
 476     return StubRoutines::cont_returnBarrierExc();
 477   }
 478   
 479   // The fastest case first
 480   CodeBlob* blob = CodeCache::find_blob(return_address);
 481   CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL;
 482   if (nm != NULL) {
 483     // Set flag if return address is a method handle call site.
 484     current->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 485     // native nmethods don't have exception handlers
 486     assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
 487     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 488     if (nm->is_deopt_pc(return_address)) {
 489       // If we come here because of a stack overflow, the stack may be
 490       // unguarded. Reguard the stack otherwise if we return to the
 491       // deopt blob and the stack bang causes a stack overflow we
 492       // crash.
 493       StackOverflow* overflow_state = current->stack_overflow_state();
 494       bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
 495       if (overflow_state->reserved_stack_activation() != current->stack_base()) {
 496         overflow_state->set_reserved_stack_activation(current->stack_base());
 497       }
 498       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 499       // The deferred StackWatermarkSet::after_unwind check will be performed in
 500       // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
 501       return SharedRuntime::deopt_blob()->unpack_with_exception();
 502     } else {
 503       // The deferred StackWatermarkSet::after_unwind check will be performed in
 504       // * OptoRuntime::rethrow_C for C2 code
 505       // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
 506       return nm->exception_begin();

 512     // The deferred StackWatermarkSet::after_unwind check will be performed in
 513     // JavaCallWrapper::~JavaCallWrapper
 514     return StubRoutines::catch_exception_entry();
 515   }
 516   if (blob != NULL && blob->is_optimized_entry_blob()) {
 517     return ((OptimizedEntryBlob*)blob)->exception_handler();
 518   }
 519   // Interpreted code
 520   if (Interpreter::contains(return_address)) {
 521     // The deferred StackWatermarkSet::after_unwind check will be performed in
 522     // InterpreterRuntime::exception_handler_for_exception
 523     return Interpreter::rethrow_exception_entry();
 524   }
 525 
 526   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 527   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 528 
 529 #ifndef PRODUCT
 530   { ResourceMark rm;
 531     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 532     os::print_location(tty, (intptr_t)return_address);
 533     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 534     tty->print_cr("b) other problem");
 535   }
 536 #endif // PRODUCT
 537 
 538   ShouldNotReachHere();
 539   return NULL;
 540 }
 541 
 542 
 543 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
 544   return raw_exception_handler_for_return_address(current, return_address);
 545 JRT_END
 546 
 547 
 548 address SharedRuntime::get_poll_stub(address pc) {
 549   address stub;
 550   // Look up the code blob
 551   CodeBlob *cb = CodeCache::find_blob(pc);
 552 
 553   // Should be an nmethod
 554   guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
 555 
 556   // Look up the relocation information
 557   assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
 558       "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
 559 
 560 #ifdef ASSERT
 561   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 562     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 563     Disassembler::decode(cb);
 564     fatal("Only polling locations are used for safepoint");
 565   }
 566 #endif
 567 
 568   bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
 569   bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
 570   if (at_poll_return) {
 571     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 572            "polling page return stub not created yet");
 573     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 574   } else if (has_wide_vectors) {
 575     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL,
 576            "polling page vectors safepoint stub not created yet");
 577     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 578   } else {
 579     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
 580            "polling page safepoint stub not created yet");
 581     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
 582   }
 583   log_debug(safepoint)("... found polling page %s exception at pc = "
 584                        INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
 585                        at_poll_return ? "return" : "loop",
 586                        (intptr_t)pc, (intptr_t)stub);
 587   return stub;
 588 }
 589 
 590 
 591 oop SharedRuntime::retrieve_receiver( Symbol* sig, frame caller ) {
 592   assert(caller.is_interpreted_frame(), "");
 593   int args_size = ArgumentSizeComputer(sig).size() + 1;
 594   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
 595   oop result = cast_to_oop(*caller.interpreter_frame_tos_at(args_size - 1));
 596   // TODO: Erik: remove after integration with concurrent stack scanning
 597   result = NativeAccess<>::oop_load(&result);
 598   assert(Universe::heap()->is_in(result) && oopDesc::is_oop(result), "receiver must be an oop");
 599   return result;
 600 }
 601 
 602 
 603 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
 604   if (JvmtiExport::can_post_on_exceptions()) {
 605     vframeStream vfst(current, true);
 606     methodHandle method = methodHandle(current, vfst.method());
 607     address bcp = method()->bcp_from(vfst.bci());
 608     JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
 609   }
 610 
 611 #if INCLUDE_JVMCI
 612   if (EnableJVMCI && UseJVMCICompiler) {
 613     vframeStream vfst(current, true);
 614     methodHandle method = methodHandle(current, vfst.method());
 615     int bci = vfst.bci();
 616     MethodData* trap_mdo = method->method_data();
 617     if (trap_mdo != NULL) {

 729   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 730   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 731     // Allow abbreviated catch tables.  The idea is to allow a method
 732     // to materialize its exceptions without committing to the exact
 733     // routing of exceptions.  In particular this is needed for adding
 734     // a synthetic handler to unlock monitors when inlining
 735     // synchronized methods since the unlock path isn't represented in
 736     // the bytecodes.
 737     t = table.entry_for(catch_pco, -1, 0);
 738   }
 739 
 740 #ifdef COMPILER1
 741   if (t == NULL && nm->is_compiled_by_c1()) {
 742     assert(nm->unwind_handler_begin() != NULL, "");
 743     return nm->unwind_handler_begin();
 744   }
 745 #endif
 746 
 747   if (t == NULL) {
 748     ttyLocker ttyl;
 749     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
 750     tty->print_cr("   Exception:");
 751     exception->print();
 752     tty->cr();
 753     tty->print_cr(" Compiled exception table :");
 754     table.print();
 755     nm->print();
 756     // nm->print_code();
 757     guarantee(false, "missing exception handler");
 758     return NULL;
 759   }
 760 
 761   return nm->code_begin() + t->pco();
 762 }
 763 
 764 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
 765   // These errors occur only at call sites
 766   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
 767 JRT_END
 768 
 769 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
 770   // These errors occur only at call sites
 771   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 772 JRT_END
 773 
 774 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
 775   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 776 JRT_END

1077     return caller->attached_method_before_pc(pc);
1078   }
1079   return NULL;
1080 }
1081 
1082 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1083 // for a call current in progress, i.e., arguments has been pushed on stack
1084 // but callee has not been invoked yet.  Caller frame must be compiled.
1085 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1086                                               CallInfo& callinfo, TRAPS) {
1087   Handle receiver;
1088   Handle nullHandle;  // create a handy null handle for exception returns
1089   JavaThread* current = THREAD;
1090 
1091   assert(!vfst.at_end(), "Java frame must exist");
1092 
1093   // Find caller and bci from vframe
1094   methodHandle caller(current, vfst.method());
1095   int          bci   = vfst.bci();
1096 
1097   if (caller->is_continuation_enter_intrinsic()) {
1098     bc = Bytecodes::_invokestatic;
1099     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1100     return receiver;
1101   }
1102 
1103   Bytecode_invoke bytecode(caller, bci);
1104   int bytecode_index = bytecode.index();
1105   bc = bytecode.invoke_code();
1106 
1107   methodHandle attached_method(current, extract_attached_method(vfst));
1108   if (attached_method.not_null()) {
1109     Method* callee = bytecode.static_target(CHECK_NH);
1110     vmIntrinsics::ID id = callee->intrinsic_id();
1111     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1112     // it attaches statically resolved method to the call site.
1113     if (MethodHandles::is_signature_polymorphic(id) &&
1114         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1115       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1116 
1117       // Adjust invocation mode according to the attached method.
1118       switch (bc) {
1119         case Bytecodes::_invokevirtual:
1120           if (attached_method->method_holder()->is_interface()) {
1121             bc = Bytecodes::_invokeinterface;
1122           }

1145                       bc != Bytecodes::_invokehandle;
1146 
1147   // Find receiver for non-static call
1148   if (has_receiver) {
1149     // This register map must be update since we need to find the receiver for
1150     // compiled frames. The receiver might be in a register.
1151     RegisterMap reg_map2(current);
1152     frame stubFrame   = current->last_frame();
1153     // Caller-frame is a compiled frame
1154     frame callerFrame = stubFrame.sender(&reg_map2);
1155 
1156     if (attached_method.is_null()) {
1157       Method* callee = bytecode.static_target(CHECK_NH);
1158       if (callee == NULL) {
1159         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1160       }
1161     }
1162 
1163     // Retrieve from a compiled argument list
1164     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1165     assert (oopDesc::is_oop_or_null(receiver()), ""); // if (receiver() != NULL) oopDesc::verify(receiver()); // 
1166 
1167     if (receiver.is_null()) {
1168       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1169     }
1170   }
1171 
1172   // Resolve method
1173   if (attached_method.not_null()) {
1174     // Parameterized by attached method.
1175     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1176   } else {
1177     // Parameterized by bytecode.
1178     constantPoolHandle constants(current, caller->constants());
1179     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1180   }
1181 
1182 #ifdef ASSERT
1183   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1184   if (has_receiver) {
1185     assert(receiver.not_null(), "should have thrown exception");

2124     // Only try quick_enter() if we're not trying to reach a safepoint
2125     // so that the calling thread reaches the safepoint more quickly.
2126     if (ObjectSynchronizer::quick_enter(obj, current, lock)) return;
2127   }
2128   // NO_ASYNC required because an async exception on the state transition destructor
2129   // would leave you with the lock held and it would never be released.
2130   // The normal monitorenter NullPointerException is thrown without acquiring a lock
2131   // and the model is that an exception implies the method failed.
2132   JRT_BLOCK_NO_ASYNC
2133   Handle h_obj(THREAD, obj);
2134   ObjectSynchronizer::enter(h_obj, lock, current);
2135   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2136   JRT_BLOCK_END
2137 }
2138 
2139 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2140 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2141   SharedRuntime::monitor_enter_helper(obj, lock, current);
2142 JRT_END
2143 
2144 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C_inc_held_monitor_count(oopDesc* obj, BasicLock* lock, JavaThread* current))
2145   SharedRuntime::monitor_enter_helper(obj, lock, current);
2146   current->inc_held_monitor_count();
2147 JRT_END
2148 
2149 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2150   assert(JavaThread::current() == current, "invariant");
2151   // Exit must be non-blocking, and therefore no exceptions can be thrown.
2152   ExceptionMark em(current);
2153   // The object could become unlocked through a JNI call, which we have no other checks for.
2154   // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2155   if (obj->is_unlocked()) {
2156     if (CheckJNICalls) {
2157       fatal("Object has been unlocked by JNI");
2158     }
2159     return;
2160   }
2161   ObjectSynchronizer::exit(obj, lock, current);
2162 }
2163 
2164 // Handles the uncommon cases of monitor unlocking in compiled code
2165 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2166   SharedRuntime::monitor_exit_helper(obj, lock, current);
2167 JRT_END
2168 

2922   }
2923 #endif
2924 
2925   new_adapter = AdapterBlob::create(&buffer);
2926   NOT_PRODUCT(int insts_size = buffer.insts_size());
2927   if (new_adapter == NULL) {
2928     // CodeCache is full, disable compilation
2929     // Ought to log this but compile log is only per compile thread
2930     // and we're some non descript Java thread.
2931     return NULL;
2932   }
2933   entry->relocate(new_adapter->content_begin());
2934 #ifndef PRODUCT
2935   // debugging suppport
2936   if (PrintAdapterHandlers || PrintStubCode) {
2937     ttyLocker ttyl;
2938     entry->print_adapter_on(tty);
2939     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2940                   _adapters->number_of_entries(), fingerprint->as_basic_args_string(),
2941                   fingerprint->as_string(), insts_size);
2942     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2943     if (Verbose || PrintStubCode) {
2944       address first_pc = entry->base_address();
2945       if (first_pc != NULL) {
2946         Disassembler::decode(first_pc, first_pc + insts_size, tty
2947                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2948         tty->cr();
2949       }
2950     }
2951   }
2952 #endif
2953 
2954   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2955   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2956   if (contains_all_checks || !VerifyAdapterCalls) {
2957     _adapters->add(entry);
2958   }
2959   return entry;
2960 }
2961 
2962 address AdapterHandlerEntry::base_address() {

3010     return false;
3011   }
3012 
3013   return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3014 }
3015 #endif
3016 
3017 
3018 /**
3019  * Create a native wrapper for this native method.  The wrapper converts the
3020  * Java-compiled calling convention to the native convention, handles
3021  * arguments, and transitions to native.  On return from the native we transition
3022  * back to java blocking if a safepoint is in progress.
3023  */
3024 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3025   ResourceMark rm;
3026   nmethod* nm = NULL;
3027   address critical_entry = NULL;
3028 
3029   assert(method->is_native(), "must be native");
3030   assert(method->is_special_native_intrinsic() ||
3031          method->has_native_function(), "must have something valid to call!");
3032 
3033   if (CriticalJNINatives && !method->is_special_native_intrinsic()) {
3034     // We perform the I/O with transition to native before acquiring AdapterHandlerLibrary_lock.
3035     critical_entry = NativeLookup::lookup_critical_entry(method);
3036   }
3037 
3038   {
3039     // Perform the work while holding the lock, but perform any printing outside the lock
3040     MutexLocker mu(AdapterHandlerLibrary_lock);
3041     // See if somebody beat us to it
3042     if (method->code() != NULL) {
3043       return;
3044     }
3045 
3046     const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3047     assert(compile_id > 0, "Must generate native wrapper");
3048 
3049 
3050     ResourceMark rm;
3051     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
3052     if (buf != NULL) {
3053       CodeBuffer buffer(buf);
3054 
3055       if (method->is_continuation_enter_intrinsic()) {
3056         buffer.initialize_stubs_size(64);
3057       }
3058 
3059       struct { double data[20]; } locs_buf;
3060       struct { double data[20]; } stubs_locs_buf;
3061       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3062 #if defined(AARCH64)
3063       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3064       // in the constant pool to ensure ordering between the barrier and oops
3065       // accesses. For native_wrappers we need a constant.
3066       buffer.initialize_consts_size(8);
3067 #endif
3068       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3069       MacroAssembler _masm(&buffer);
3070 
3071       // Fill in the signature array, for the calling-convention call.
3072       const int total_args_passed = method->size_of_parameters();
3073 
3074       VMRegPair stack_regs[16];
3075       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3076 
3077       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3078                               method->is_static(), total_args_passed);
3079       BasicType* sig_bt = si.basic_types();
3080       assert(si.slots() == total_args_passed, "");
3081       BasicType ret_type = si.return_type();
3082 
3083       // Now get the compiled-Java arguments layout.
3084       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3085 
3086       // Generate the compiled-to-native wrapper code
3087       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type, critical_entry);
3088 

3245   for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3246        kptr2 < fr.interpreter_frame_monitor_begin();
3247        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3248     if (kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
3249       BasicLock *lock = kptr2->lock();
3250       // Inflate so the object's header no longer refers to the BasicLock.
3251       if (lock->displaced_header().is_unlocked()) {
3252         // The object is locked and the resulting ObjectMonitor* will also be
3253         // locked so it can't be async deflated until ownership is dropped.
3254         // See the big comment in basicLock.cpp: BasicLock::move_to().
3255         ObjectSynchronizer::inflate_helper(kptr2->obj());
3256       }
3257       // Now the displaced header is free to move because the
3258       // object's header no longer refers to it.
3259       buf[i++] = (intptr_t)lock->displaced_header().value();
3260       buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3261     }
3262   }
3263   assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3264 
3265   RegisterMap map(current, false);
3266   frame sender = fr.sender(&map);
3267   if (sender.is_interpreted_frame()) {
3268     current->push_cont_fastpath(sender.sp());
3269   }
3270 
3271   return buf;
3272 JRT_END
3273 
3274 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3275   FREE_C_HEAP_ARRAY(intptr_t, buf);
3276 JRT_END
3277 
3278 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3279   AdapterHandlerTableIterator iter(_adapters);
3280   while (iter.has_next()) {
3281     AdapterHandlerEntry* a = iter.next();
3282     if (b == CodeCache::find_blob(a->get_i2c_entry())) return true;
3283   }
3284   return false;
3285 }
3286 
3287 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3288   AdapterHandlerTableIterator iter(_adapters);
3289   while (iter.has_next()) {
3290     AdapterHandlerEntry* a = iter.next();

3319 void AdapterHandlerLibrary::print_statistics() {
3320   _adapters->print_statistics();
3321 }
3322 
3323 #endif /* PRODUCT */
3324 
3325 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3326   StackOverflow* overflow_state = current->stack_overflow_state();
3327   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3328   overflow_state->set_reserved_stack_activation(current->stack_base());
3329 JRT_END
3330 
3331 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3332   ResourceMark rm(current);
3333   frame activation;
3334   CompiledMethod* nm = NULL;
3335   int count = 1;
3336 
3337   assert(fr.is_java_frame(), "Must start on Java frame");
3338 
3339   RegisterMap map(JavaThread::current(), false, false); // don't walk continuations
3340   for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3341     if (!fr.is_java_frame())
3342       continue;
3343 
3344     Method* method = NULL;
3345     bool found = false;
3346     if (fr.is_interpreted_frame()) {
3347       method = fr.interpreter_frame_method();
3348       if (method != NULL && method->has_reserved_stack_access()) {
3349         found = true;
3350       }
3351     } else {
3352       CodeBlob* cb = fr.cb();
3353       if (cb != NULL && cb->is_compiled()) {
3354         nm = cb->as_compiled_method();
3355         method = nm->method();
3356         // scope_desc_near() must be used, instead of scope_desc_at() because on
3357         // SPARC, the pcDesc can be on the delay slot after the call instruction.
3358         for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != NULL; sd = sd->sender()) {
3359           method = sd->method();
3360           if (method != NULL && method->has_reserved_stack_access()) {
3361             found = true;
3362       }
3363     }
3364       }
3365     }
3366     if (found) {
3367       activation = fr;
3368       warning("Potentially dangerous stack overflow in "
3369               "ReservedStackAccess annotated method %s [%d]",
3370               method->name_and_sig_as_C_string(), count++);
3371       EventReservedStackActivation event;
3372       if (event.should_commit()) {
3373         event.set_method(method);
3374         event.commit();
3375       }
3376     }





3377   }
3378   return activation;
3379 }
3380 
3381 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3382   // After any safepoint, just before going back to compiled code,
3383   // we inform the GC that we will be doing initializing writes to
3384   // this object in the future without emitting card-marks, so
3385   // GC may take any compensating steps.
3386 
3387   oop new_obj = current->vm_result();
3388   if (new_obj == NULL) return;
3389 
3390   BarrierSet *bs = BarrierSet::barrier_set();
3391   bs->on_slowpath_allocation_exit(current, new_obj);
3392 }
< prev index next >