< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page




 449 
 450 // Exception handling across interpreter/compiler boundaries
 451 //
 452 // exception_handler_for_return_address(...) returns the continuation address.
 453 // The continuation address is the entry point of the exception handler of the
 454 // previous frame depending on the return address.
 455 
 456 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 457   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 458   assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 459 
 460   // Reset method handle flag.
 461   thread->set_is_method_handle_return(false);
 462 
 463 #if INCLUDE_JVMCI
 464   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 465   // and other exception handler continuations do not read it
 466   thread->set_exception_pc(NULL);
 467 #endif // INCLUDE_JVMCI
 468 




 469   // The fastest case first
 470   CodeBlob* blob = CodeCache::find_blob(return_address);
 471   CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL;
 472   if (nm != NULL) {
 473     // Set flag if return address is a method handle call site.
 474     thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 475     // native nmethods don't have exception handlers
 476     assert(!nm->is_native_method(), "no exception handler");
 477     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 478     if (nm->is_deopt_pc(return_address)) {
 479       // If we come here because of a stack overflow, the stack may be
 480       // unguarded. Reguard the stack otherwise if we return to the
 481       // deopt blob and the stack bang causes a stack overflow we
 482       // crash.
 483       bool guard_pages_enabled = thread->stack_guards_enabled();
 484       if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 485       if (thread->reserved_stack_activation() != thread->stack_base()) {
 486         thread->set_reserved_stack_activation(thread->stack_base());
 487       }
 488       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");


 490     } else {
 491       return nm->exception_begin();
 492     }
 493   }
 494 
 495   // Entry code
 496   if (StubRoutines::returns_to_call_stub(return_address)) {
 497     return StubRoutines::catch_exception_entry();
 498   }
 499   // Interpreted code
 500   if (Interpreter::contains(return_address)) {
 501     return Interpreter::rethrow_exception_entry();
 502   }
 503 
 504   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 505   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 506 
 507 #ifndef PRODUCT
 508   { ResourceMark rm;
 509     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));

 510     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 511     tty->print_cr("b) other problem");
 512   }
 513 #endif // PRODUCT
 514 
 515   ShouldNotReachHere();
 516   return NULL;
 517 }
 518 
 519 
 520 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 521   return raw_exception_handler_for_return_address(thread, return_address);
 522 JRT_END
 523 
 524 
 525 address SharedRuntime::get_poll_stub(address pc) {
 526   address stub;
 527   // Look up the code blob
 528   CodeBlob *cb = CodeCache::find_blob(pc);
 529 
 530   // Should be an nmethod
 531   guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
 532 
 533   // Look up the relocation information
 534   assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
 535     "safepoint polling: type must be poll");
 536 
 537 #ifdef ASSERT
 538   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 539     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 540     Disassembler::decode(cb);
 541     fatal("Only polling locations are used for safepoint");
 542   }
 543 #endif
 544 
 545   bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
 546   bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
 547   if (at_poll_return) {
 548     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 549            "polling page return stub not created yet");
 550     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 551   } else if (has_wide_vectors) {
 552     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL,
 553            "polling page vectors safepoint stub not created yet");
 554     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 555   } else {


 682   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 683   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 684     // Allow abbreviated catch tables.  The idea is to allow a method
 685     // to materialize its exceptions without committing to the exact
 686     // routing of exceptions.  In particular this is needed for adding
 687     // a synthetic handler to unlock monitors when inlining
 688     // synchronized methods since the unlock path isn't represented in
 689     // the bytecodes.
 690     t = table.entry_for(catch_pco, -1, 0);
 691   }
 692 
 693 #ifdef COMPILER1
 694   if (t == NULL && nm->is_compiled_by_c1()) {
 695     assert(nm->unwind_handler_begin() != NULL, "");
 696     return nm->unwind_handler_begin();
 697   }
 698 #endif
 699 
 700   if (t == NULL) {
 701     ttyLocker ttyl;
 702     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", p2i(ret_pc), handler_bci);
 703     tty->print_cr("   Exception:");
 704     exception->print();
 705     tty->cr();
 706     tty->print_cr(" Compiled exception table :");
 707     table.print();
 708     nm->print_code();

 709     guarantee(false, "missing exception handler");
 710     return NULL;
 711   }
 712 
 713   return nm->code_begin() + t->pco();
 714 }
 715 
 716 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
 717   // These errors occur only at call sites
 718   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
 719 JRT_END
 720 
 721 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
 722   // These errors occur only at call sites
 723   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 724 JRT_END
 725 
 726 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
 727   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 728 JRT_END


2735 #endif
2736 
2737       new_adapter = AdapterBlob::create(&buffer);
2738       NOT_PRODUCT(insts_size = buffer.insts_size());
2739     }
2740     if (new_adapter == NULL) {
2741       // CodeCache is full, disable compilation
2742       // Ought to log this but compile log is only per compile thread
2743       // and we're some non descript Java thread.
2744       return NULL; // Out of CodeCache space
2745     }
2746     entry->relocate(new_adapter->content_begin());
2747 #ifndef PRODUCT
2748     // debugging suppport
2749     if (PrintAdapterHandlers || PrintStubCode) {
2750       ttyLocker ttyl;
2751       entry->print_adapter_on(tty);
2752       tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
2753                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2754                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
2755       tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
2756       if (Verbose || PrintStubCode) {
2757         address first_pc = entry->base_address();
2758         if (first_pc != NULL) {
2759           Disassembler::decode(first_pc, first_pc + insts_size);
2760           tty->cr();
2761         }
2762       }
2763     }
2764 #endif
2765     // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2766     // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2767     if (contains_all_checks || !VerifyAdapterCalls) {
2768       _adapters->add(entry);
2769     }
2770   }
2771   // Outside of the lock
2772   if (new_adapter != NULL) {
2773     char blob_id[256];
2774     jio_snprintf(blob_id,
2775                  sizeof(blob_id),


3035     for (int i = 0; i < cnt; i++) {
3036       VMReg reg1 = regs[i].first();
3037       if (reg1->is_stack()) {
3038         // Yuck
3039         reg1 = reg1->bias(out_preserve_stack_slots());
3040       }
3041       VMReg reg2 = regs[i].second();
3042       if (reg2->is_stack()) {
3043         // Yuck
3044         reg2 = reg2->bias(out_preserve_stack_slots());
3045       }
3046       regs[i].set_pair(reg2, reg1);
3047     }
3048   }
3049 
3050   // results
3051   *arg_size = cnt;
3052   return regs;
3053 }
3054 









3055 // OSR Migration Code
3056 //
3057 // This code is used convert interpreter frames into compiled frames.  It is
3058 // called from very start of a compiled OSR nmethod.  A temp array is
3059 // allocated to hold the interesting bits of the interpreter frame.  All
3060 // active locks are inflated to allow them to move.  The displaced headers and
3061 // active interpreter locals are copied into the temp buffer.  Then we return
3062 // back to the compiled code.  The compiled code then pops the current
3063 // interpreter frame off the stack and pushes a new compiled frame.  Then it
3064 // copies the interpreter locals and displaced headers where it wants.
3065 // Finally it calls back to free the temp buffer.
3066 //
3067 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3068 
3069 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
3070 
3071   //
3072   // This code is dependent on the memory layout of the interpreter local
3073   // array and the monitors. On all of our platforms the layout is identical
3074   // so this code is shared. If some platform lays the their arrays out


3185 }
3186 
3187 #endif /* PRODUCT */
3188 
3189 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* thread))
3190   assert(thread->is_Java_thread(), "Only Java threads have a stack reserved zone");
3191   if (thread->stack_reserved_zone_disabled()) {
3192   thread->enable_stack_reserved_zone();
3193   }
3194   thread->set_reserved_stack_activation(thread->stack_base());
3195 JRT_END
3196 
3197 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr) {
3198   ResourceMark rm(thread);
3199   frame activation;
3200   CompiledMethod* nm = NULL;
3201   int count = 1;
3202 
3203   assert(fr.is_java_frame(), "Must start on Java frame");
3204 
3205   while (true) {




3206     Method* method = NULL;
3207     bool found = false;
3208     if (fr.is_interpreted_frame()) {
3209       method = fr.interpreter_frame_method();
3210       if (method != NULL && method->has_reserved_stack_access()) {
3211         found = true;
3212       }
3213     } else {
3214       CodeBlob* cb = fr.cb();
3215       if (cb != NULL && cb->is_compiled()) {
3216         nm = cb->as_compiled_method();
3217         method = nm->method();
3218         // scope_desc_near() must be used, instead of scope_desc_at() because on
3219         // SPARC, the pcDesc can be on the delay slot after the call instruction.
3220         for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != NULL; sd = sd->sender()) {
3221           method = sd->method();
3222           if (method != NULL && method->has_reserved_stack_access()) {
3223             found = true;
3224       }
3225     }
3226       }
3227     }
3228     if (found) {
3229       activation = fr;
3230       warning("Potentially dangerous stack overflow in "
3231               "ReservedStackAccess annotated method %s [%d]",
3232               method->name_and_sig_as_C_string(), count++);
3233       EventReservedStackActivation event;
3234       if (event.should_commit()) {
3235         event.set_method(method);
3236         event.commit();
3237       }
3238     }
3239     if (fr.is_first_java_frame()) {
3240       break;
3241     } else {
3242       fr = fr.java_sender();
3243     }
3244   }
3245   return activation;
3246 }
3247 
3248 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) {
3249   // After any safepoint, just before going back to compiled code,
3250   // we inform the GC that we will be doing initializing writes to
3251   // this object in the future without emitting card-marks, so
3252   // GC may take any compensating steps.
3253 
3254   oop new_obj = thread->vm_result();
3255   if (new_obj == NULL) return;
3256 
3257   BarrierSet *bs = BarrierSet::barrier_set();
3258   bs->on_slowpath_allocation_exit(thread, new_obj);
3259 }


 449 
 450 // Exception handling across interpreter/compiler boundaries
 451 //
 452 // exception_handler_for_return_address(...) returns the continuation address.
 453 // The continuation address is the entry point of the exception handler of the
 454 // previous frame depending on the return address.
 455 
 456 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 457   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 458   assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 459 
 460   // Reset method handle flag.
 461   thread->set_is_method_handle_return(false);
 462 
 463 #if INCLUDE_JVMCI
 464   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 465   // and other exception handler continuations do not read it
 466   thread->set_exception_pc(NULL);
 467 #endif // INCLUDE_JVMCI
 468 
 469   if (Continuation::is_return_barrier_entry(return_address)) {
 470     return StubRoutines::cont_returnBarrierExc();
 471   }
 472   
 473   // The fastest case first
 474   CodeBlob* blob = CodeCache::find_blob(return_address);
 475   CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL;
 476   if (nm != NULL) {
 477     // Set flag if return address is a method handle call site.
 478     thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 479     // native nmethods don't have exception handlers
 480     assert(!nm->is_native_method(), "no exception handler");
 481     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 482     if (nm->is_deopt_pc(return_address)) {
 483       // If we come here because of a stack overflow, the stack may be
 484       // unguarded. Reguard the stack otherwise if we return to the
 485       // deopt blob and the stack bang causes a stack overflow we
 486       // crash.
 487       bool guard_pages_enabled = thread->stack_guards_enabled();
 488       if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 489       if (thread->reserved_stack_activation() != thread->stack_base()) {
 490         thread->set_reserved_stack_activation(thread->stack_base());
 491       }
 492       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");


 494     } else {
 495       return nm->exception_begin();
 496     }
 497   }
 498 
 499   // Entry code
 500   if (StubRoutines::returns_to_call_stub(return_address)) {
 501     return StubRoutines::catch_exception_entry();
 502   }
 503   // Interpreted code
 504   if (Interpreter::contains(return_address)) {
 505     return Interpreter::rethrow_exception_entry();
 506   }
 507 
 508   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 509   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 510 
 511 #ifndef PRODUCT
 512   { ResourceMark rm;
 513     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 514     os::print_location(tty, (intptr_t)return_address);
 515     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 516     tty->print_cr("b) other problem");
 517   }
 518 #endif // PRODUCT
 519 
 520   ShouldNotReachHere();
 521   return NULL;
 522 }
 523 
 524 
 525 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 526   return raw_exception_handler_for_return_address(thread, return_address);
 527 JRT_END
 528 
 529 
 530 address SharedRuntime::get_poll_stub(address pc) {
 531   address stub;
 532   // Look up the code blob
 533   CodeBlob *cb = CodeCache::find_blob(pc);
 534 
 535   // Should be an nmethod
 536   guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
 537 
 538   // Look up the relocation information
 539   assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
 540       "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
 541 
 542 #ifdef ASSERT
 543   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 544     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 545     Disassembler::decode(cb);
 546     fatal("Only polling locations are used for safepoint");
 547   }
 548 #endif
 549 
 550   bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
 551   bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
 552   if (at_poll_return) {
 553     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 554            "polling page return stub not created yet");
 555     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 556   } else if (has_wide_vectors) {
 557     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL,
 558            "polling page vectors safepoint stub not created yet");
 559     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 560   } else {


 687   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 688   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 689     // Allow abbreviated catch tables.  The idea is to allow a method
 690     // to materialize its exceptions without committing to the exact
 691     // routing of exceptions.  In particular this is needed for adding
 692     // a synthetic handler to unlock monitors when inlining
 693     // synchronized methods since the unlock path isn't represented in
 694     // the bytecodes.
 695     t = table.entry_for(catch_pco, -1, 0);
 696   }
 697 
 698 #ifdef COMPILER1
 699   if (t == NULL && nm->is_compiled_by_c1()) {
 700     assert(nm->unwind_handler_begin() != NULL, "");
 701     return nm->unwind_handler_begin();
 702   }
 703 #endif
 704 
 705   if (t == NULL) {
 706     ttyLocker ttyl;
 707     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
 708     tty->print_cr("   Exception:");
 709     exception->print();
 710     tty->cr();
 711     tty->print_cr(" Compiled exception table :");
 712     table.print();
 713     nm->print();
 714     // nm->print_code();
 715     guarantee(false, "missing exception handler");
 716     return NULL;
 717   }
 718 
 719   return nm->code_begin() + t->pco();
 720 }
 721 
 722 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
 723   // These errors occur only at call sites
 724   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
 725 JRT_END
 726 
 727 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
 728   // These errors occur only at call sites
 729   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 730 JRT_END
 731 
 732 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
 733   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 734 JRT_END


2741 #endif
2742 
2743       new_adapter = AdapterBlob::create(&buffer);
2744       NOT_PRODUCT(insts_size = buffer.insts_size());
2745     }
2746     if (new_adapter == NULL) {
2747       // CodeCache is full, disable compilation
2748       // Ought to log this but compile log is only per compile thread
2749       // and we're some non descript Java thread.
2750       return NULL; // Out of CodeCache space
2751     }
2752     entry->relocate(new_adapter->content_begin());
2753 #ifndef PRODUCT
2754     // debugging suppport
2755     if (PrintAdapterHandlers || PrintStubCode) {
2756       ttyLocker ttyl;
2757       entry->print_adapter_on(tty);
2758       tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
2759                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2760                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
2761       tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2762       if (Verbose || PrintStubCode) {
2763         address first_pc = entry->base_address();
2764         if (first_pc != NULL) {
2765           Disassembler::decode(first_pc, first_pc + insts_size);
2766           tty->cr();
2767         }
2768       }
2769     }
2770 #endif
2771     // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2772     // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2773     if (contains_all_checks || !VerifyAdapterCalls) {
2774       _adapters->add(entry);
2775     }
2776   }
2777   // Outside of the lock
2778   if (new_adapter != NULL) {
2779     char blob_id[256];
2780     jio_snprintf(blob_id,
2781                  sizeof(blob_id),


3041     for (int i = 0; i < cnt; i++) {
3042       VMReg reg1 = regs[i].first();
3043       if (reg1->is_stack()) {
3044         // Yuck
3045         reg1 = reg1->bias(out_preserve_stack_slots());
3046       }
3047       VMReg reg2 = regs[i].second();
3048       if (reg2->is_stack()) {
3049         // Yuck
3050         reg2 = reg2->bias(out_preserve_stack_slots());
3051       }
3052       regs[i].set_pair(reg2, reg1);
3053     }
3054   }
3055 
3056   // results
3057   *arg_size = cnt;
3058   return regs;
3059 }
3060 
3061 JRT_LEAF(jlong, SharedRuntime::continuation_getFP(JavaThread* thread) )
3062   RegisterMap reg_map2(thread);
3063   assert(false, "");
3064   frame stubFrame   = thread->last_frame();
3065     // Caller-frame is a compiled frame
3066   frame callerFrame = stubFrame.sender(&reg_map2);
3067   return (jlong) callerFrame.real_fp();
3068 JRT_END
3069 
3070 // OSR Migration Code
3071 //
3072 // This code is used convert interpreter frames into compiled frames.  It is
3073 // called from very start of a compiled OSR nmethod.  A temp array is
3074 // allocated to hold the interesting bits of the interpreter frame.  All
3075 // active locks are inflated to allow them to move.  The displaced headers and
3076 // active interpreter locals are copied into the temp buffer.  Then we return
3077 // back to the compiled code.  The compiled code then pops the current
3078 // interpreter frame off the stack and pushes a new compiled frame.  Then it
3079 // copies the interpreter locals and displaced headers where it wants.
3080 // Finally it calls back to free the temp buffer.
3081 //
3082 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3083 
3084 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
3085 
3086   //
3087   // This code is dependent on the memory layout of the interpreter local
3088   // array and the monitors. On all of our platforms the layout is identical
3089   // so this code is shared. If some platform lays the their arrays out


3200 }
3201 
3202 #endif /* PRODUCT */
3203 
3204 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* thread))
3205   assert(thread->is_Java_thread(), "Only Java threads have a stack reserved zone");
3206   if (thread->stack_reserved_zone_disabled()) {
3207   thread->enable_stack_reserved_zone();
3208   }
3209   thread->set_reserved_stack_activation(thread->stack_base());
3210 JRT_END
3211 
3212 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr) {
3213   ResourceMark rm(thread);
3214   frame activation;
3215   CompiledMethod* nm = NULL;
3216   int count = 1;
3217 
3218   assert(fr.is_java_frame(), "Must start on Java frame");
3219 
3220   RegisterMap map(JavaThread::current(), false, true); // don't update; walk continuations
3221   for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3222     if (!fr.is_java_frame())
3223       continue;
3224 
3225     Method* method = NULL;
3226     bool found = false;
3227     if (fr.is_interpreted_frame()) {
3228       method = fr.interpreter_frame_method();
3229       if (method != NULL && method->has_reserved_stack_access()) {
3230         found = true;
3231       }
3232     } else {
3233       CodeBlob* cb = fr.cb();
3234       if (cb != NULL && cb->is_compiled()) {
3235         nm = cb->as_compiled_method();
3236         method = nm->method();
3237         // scope_desc_near() must be used, instead of scope_desc_at() because on
3238         // SPARC, the pcDesc can be on the delay slot after the call instruction.
3239         for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != NULL; sd = sd->sender()) {
3240           method = sd->method();
3241           if (method != NULL && method->has_reserved_stack_access()) {
3242             found = true;
3243       }
3244     }
3245       }
3246     }
3247     if (found) {
3248       activation = fr;
3249       warning("Potentially dangerous stack overflow in "
3250               "ReservedStackAccess annotated method %s [%d]",
3251               method->name_and_sig_as_C_string(), count++);
3252       EventReservedStackActivation event;
3253       if (event.should_commit()) {
3254         event.set_method(method);
3255         event.commit();
3256       }





3257     }
3258   }
3259   return activation;
3260 }
3261 
3262 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) {
3263   // After any safepoint, just before going back to compiled code,
3264   // we inform the GC that we will be doing initializing writes to
3265   // this object in the future without emitting card-marks, so
3266   // GC may take any compensating steps.
3267 
3268   oop new_obj = thread->vm_result();
3269   if (new_obj == NULL) return;
3270 
3271   BarrierSet *bs = BarrierSet::barrier_set();
3272   bs->on_slowpath_allocation_exit(thread, new_obj);
3273 }
< prev index next >