< prev index next >

src/hotspot/cpu/x86/x86_64.ad

Print this page

  711     st->print("cmpl    [r15_thread + #disarmed_guard_value_offset], #disarmed_guard_value\t");
  712     st->print("\n\t");
  713     st->print("je      fast_entry\t");
  714     st->print("\n\t");
  715     st->print("call    #nmethod_entry_barrier_stub\t");
  716     st->print("\n\tfast_entry:");
  717   }
  718   st->cr();
  719 }
  720 #endif
  721 
  722 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
  723   Compile* C = ra_->C;
  724   C2_MacroAssembler _masm(&cbuf);
  725 
  726   int framesize = C->output()->frame_size_in_bytes();
  727   int bangsize = C->output()->bang_size_in_bytes();
  728 
  729   if (C->clinit_barrier_on_entry()) {
  730     assert(VM_Version::supports_fast_class_init_checks(), "sanity");
  731     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
  732 
  733     Label L_skip_barrier;
  734     Register klass = rscratch1;
  735 
  736     __ mov_metadata(klass, C->method()->holder()->constant_encoding());
  737     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
  738 
  739     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
  740 
  741     __ bind(L_skip_barrier);
  742   }
  743 
  744   __ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, false, C->stub_function() != nullptr);
  745 
  746   C->output()->set_frame_complete(cbuf.insts_size());
  747 
  748   if (C->has_mach_constant_base_node()) {
  749     // NOTE: We set the table base offset here because users might be
  750     // emitted before MachConstantBaseNode.
  751     ConstantTable& constant_table = C->output()->constant_table();

 1770     }
 1771     __ bind(miss);
 1772   %}
 1773 
 1774   enc_class clear_avx %{
 1775     debug_only(int off0 = cbuf.insts_size());
 1776     if (generate_vzeroupper(Compile::current())) {
 1777       // Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty
 1778       // Clear upper bits of YMM registers when current compiled code uses
 1779       // wide vectors to avoid AVX <-> SSE transition penalty during call.
 1780       MacroAssembler _masm(&cbuf);
 1781       __ vzeroupper();
 1782     }
 1783     debug_only(int off1 = cbuf.insts_size());
 1784     assert(off1 - off0 == clear_avx_size(), "correct size prediction");
 1785   %}
 1786 
 1787   enc_class Java_To_Runtime(method meth) %{
 1788     // No relocation needed
 1789     MacroAssembler _masm(&cbuf);
 1790     __ mov64(r10, (int64_t) $meth$$method);





 1791     __ call(r10);
 1792     __ post_call_nop();
 1793   %}
 1794 
 1795   enc_class Java_Static_Call(method meth)
 1796   %{
 1797     // JAVA STATIC CALL
 1798     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to
 1799     // determine who we intended to call.
 1800     MacroAssembler _masm(&cbuf);
 1801 
 1802     if (!_method) {
 1803       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, $meth$$method)));
 1804     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 1805       // The NOP here is purely to ensure that eliding a call to
 1806       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 1807       __ addr_nop_5();
 1808       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 1809     } else {
 1810       int method_index = resolved_method_index(cbuf);

  711     st->print("cmpl    [r15_thread + #disarmed_guard_value_offset], #disarmed_guard_value\t");
  712     st->print("\n\t");
  713     st->print("je      fast_entry\t");
  714     st->print("\n\t");
  715     st->print("call    #nmethod_entry_barrier_stub\t");
  716     st->print("\n\tfast_entry:");
  717   }
  718   st->cr();
  719 }
  720 #endif
  721 
  722 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
  723   Compile* C = ra_->C;
  724   C2_MacroAssembler _masm(&cbuf);
  725 
  726   int framesize = C->output()->frame_size_in_bytes();
  727   int bangsize = C->output()->bang_size_in_bytes();
  728 
  729   if (C->clinit_barrier_on_entry()) {
  730     assert(VM_Version::supports_fast_class_init_checks(), "sanity");
  731     assert(!C->method()->holder()->is_not_initialized() || C->do_clinit_barriers(), "initialization should have been started");
  732 
  733     Label L_skip_barrier;
  734     Register klass = rscratch1;
  735 
  736     __ mov_metadata(klass, C->method()->holder()->constant_encoding());
  737     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
  738 
  739     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
  740 
  741     __ bind(L_skip_barrier);
  742   }
  743 
  744   __ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, false, C->stub_function() != nullptr);
  745 
  746   C->output()->set_frame_complete(cbuf.insts_size());
  747 
  748   if (C->has_mach_constant_base_node()) {
  749     // NOTE: We set the table base offset here because users might be
  750     // emitted before MachConstantBaseNode.
  751     ConstantTable& constant_table = C->output()->constant_table();

 1770     }
 1771     __ bind(miss);
 1772   %}
 1773 
 1774   enc_class clear_avx %{
 1775     debug_only(int off0 = cbuf.insts_size());
 1776     if (generate_vzeroupper(Compile::current())) {
 1777       // Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty
 1778       // Clear upper bits of YMM registers when current compiled code uses
 1779       // wide vectors to avoid AVX <-> SSE transition penalty during call.
 1780       MacroAssembler _masm(&cbuf);
 1781       __ vzeroupper();
 1782     }
 1783     debug_only(int off1 = cbuf.insts_size());
 1784     assert(off1 - off0 == clear_avx_size(), "correct size prediction");
 1785   %}
 1786 
 1787   enc_class Java_To_Runtime(method meth) %{
 1788     // No relocation needed
 1789     MacroAssembler _masm(&cbuf);
 1790     if (SCCache::is_on_for_write()) {
 1791       // Created runtime_call_type relocation when caching code
 1792       __ lea(r10, RuntimeAddress((address)$meth$$method));
 1793     } else {
 1794       __ mov64(r10, (int64_t) $meth$$method);
 1795     }
 1796     __ call(r10);
 1797     __ post_call_nop();
 1798   %}
 1799 
 1800   enc_class Java_Static_Call(method meth)
 1801   %{
 1802     // JAVA STATIC CALL
 1803     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to
 1804     // determine who we intended to call.
 1805     MacroAssembler _masm(&cbuf);
 1806 
 1807     if (!_method) {
 1808       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, $meth$$method)));
 1809     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 1810       // The NOP here is purely to ensure that eliding a call to
 1811       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 1812       __ addr_nop_5();
 1813       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 1814     } else {
 1815       int method_index = resolved_method_index(cbuf);
< prev index next >