< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/vtableStubs.hpp"


  32 #include "gc/shared/gcLocker.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/compiledICHolder.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "runtime/safepointMechanism.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/vframeArray.hpp"
  41 #include "utilities/align.hpp"
  42 #include "vmreg_x86.inline.hpp"
  43 #ifdef COMPILER1
  44 #include "c1/c1_Runtime1.hpp"
  45 #endif
  46 #ifdef COMPILER2
  47 #include "opto/runtime.hpp"
  48 #endif
  49 #include "vm_version_x86.hpp"
  50 
  51 #define __ masm->


 958 
 959   {
 960 
 961     Label missed;
 962     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 963     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 964     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 965     __ jcc(Assembler::notEqual, missed);
 966     // Method might have been compiled since the call site was patched to
 967     // interpreted if that is the case treat it as a miss so we can get
 968     // the call site corrected.
 969     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 970     __ jcc(Assembler::equal, skip_fixup);
 971 
 972     __ bind(missed);
 973     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 974   }
 975 
 976   address c2i_entry = __ pc();
 977 



 978   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 979 
 980   __ flush();
 981   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 982 }
 983 
 984 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 985                                          VMRegPair *regs,
 986                                          VMRegPair *regs2,
 987                                          int total_args_passed) {
 988   assert(regs2 == NULL, "not needed on x86");
 989 // We return the amount of VMRegImpl stack slots we need to reserve for all
 990 // the arguments NOT counting out_preserve_stack_slots.
 991 
 992   uint    stack = 0;        // All arguments on stack
 993 
 994   for( int i = 0; i < total_args_passed; i++) {
 995     // From the type and the argument number (count) compute the location
 996     switch( sig_bt[i] ) {
 997     case T_BOOLEAN:


1869 #endif // COMPILER1
1870 
1871   // The instruction at the verified entry point must be 5 bytes or longer
1872   // because it can be patched on the fly by make_non_entrant. The stack bang
1873   // instruction fits that requirement.
1874 
1875   // Generate stack overflow check
1876 
1877   if (UseStackBanging) {
1878     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
1879   } else {
1880     // need a 5 byte instruction to allow MT safe patching to non-entrant
1881     __ fat_nop();
1882   }
1883 
1884   // Generate a new frame for the wrapper.
1885   __ enter();
1886   // -2 because return address is already present and so is saved rbp
1887   __ subptr(rsp, stack_size - 2*wordSize);
1888 



1889   // Frame is now completed as far as size and linkage.
1890   int frame_complete = ((intptr_t)__ pc()) - start;
1891 
1892   if (UseRTMLocking) {
1893     // Abort RTM transaction before calling JNI
1894     // because critical section will be large and will be
1895     // aborted anyway. Also nmethod could be deoptimized.
1896     __ xabort(0);
1897   }
1898 
1899   // Calculate the difference between rsp and rbp,. We need to know it
1900   // after the native call because on windows Java Natives will pop
1901   // the arguments and it is painful to do rsp relative addressing
1902   // in a platform independent way. So after the call we switch to
1903   // rbp, relative addressing.
1904 
1905   int fp_adjustment = stack_size - 2*wordSize;
1906 
1907 #ifdef COMPILER2
1908   // C2 may leave the stack dirty if not in SSE2+ mode
1909   if (UseSSE >= 2) {
1910     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1911   } else {
1912     __ empty_FPU_stack();
1913   }
1914 #endif /* COMPILER2 */
1915 
1916   // Compute the rbp, offset for any slots used after the jni call
1917 
1918   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1919 
1920   // We use rdi as a thread pointer because it is callee save and
1921   // if we load it once it is usable thru the entire wrapper
1922   const Register thread = rdi;
1923 
1924   // We use rsi as the oop handle for the receiver/klass
1925   // It is callee save so it survives the call to native
1926 
1927   const Register oop_handle_reg = rsi;
1928 
1929   __ get_thread(thread);
1930 
1931   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
1932     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1933                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1934   }
1935 
1936   //
1937   // We immediately shuffle the arguments so that any vm call we have to
1938   // make from here on out (sync slow path, jvmti, etc.) we will have
1939   // captured the oops from our caller and have a valid oopMap for
1940   // them.
1941 
1942   // -----------------
1943   // The Grand Shuffle
1944   //
1945   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1946   // and, if static, the class mirror instead of a receiver.  This pretty much
1947   // guarantees that register layout will not match (and x86 doesn't use reg
1948   // parms though amd does).  Since the native abi doesn't use register args
1949   // and the java conventions does we don't have to worry about collisions.




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/barrierSetAssembler.hpp"
  34 #include "gc/shared/gcLocker.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/compiledICHolder.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/vframeArray.hpp"
  43 #include "utilities/align.hpp"
  44 #include "vmreg_x86.inline.hpp"
  45 #ifdef COMPILER1
  46 #include "c1/c1_Runtime1.hpp"
  47 #endif
  48 #ifdef COMPILER2
  49 #include "opto/runtime.hpp"
  50 #endif
  51 #include "vm_version_x86.hpp"
  52 
  53 #define __ masm->


 960 
 961   {
 962 
 963     Label missed;
 964     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 965     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 966     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 967     __ jcc(Assembler::notEqual, missed);
 968     // Method might have been compiled since the call site was patched to
 969     // interpreted if that is the case treat it as a miss so we can get
 970     // the call site corrected.
 971     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 972     __ jcc(Assembler::equal, skip_fixup);
 973 
 974     __ bind(missed);
 975     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 976   }
 977 
 978   address c2i_entry = __ pc();
 979 
 980   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 981   bs->c2i_entry_barrier(masm);
 982 
 983   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 984 
 985   __ flush();
 986   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 987 }
 988 
 989 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 990                                          VMRegPair *regs,
 991                                          VMRegPair *regs2,
 992                                          int total_args_passed) {
 993   assert(regs2 == NULL, "not needed on x86");
 994 // We return the amount of VMRegImpl stack slots we need to reserve for all
 995 // the arguments NOT counting out_preserve_stack_slots.
 996 
 997   uint    stack = 0;        // All arguments on stack
 998 
 999   for( int i = 0; i < total_args_passed; i++) {
1000     // From the type and the argument number (count) compute the location
1001     switch( sig_bt[i] ) {
1002     case T_BOOLEAN:


1874 #endif // COMPILER1
1875 
1876   // The instruction at the verified entry point must be 5 bytes or longer
1877   // because it can be patched on the fly by make_non_entrant. The stack bang
1878   // instruction fits that requirement.
1879 
1880   // Generate stack overflow check
1881 
1882   if (UseStackBanging) {
1883     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
1884   } else {
1885     // need a 5 byte instruction to allow MT safe patching to non-entrant
1886     __ fat_nop();
1887   }
1888 
1889   // Generate a new frame for the wrapper.
1890   __ enter();
1891   // -2 because return address is already present and so is saved rbp
1892   __ subptr(rsp, stack_size - 2*wordSize);
1893 
1894   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1895   bs->nmethod_entry_barrier(masm);
1896 
1897   // Frame is now completed as far as size and linkage.
1898   int frame_complete = ((intptr_t)__ pc()) - start;
1899 
1900   if (UseRTMLocking) {
1901     // Abort RTM transaction before calling JNI
1902     // because critical section will be large and will be
1903     // aborted anyway. Also nmethod could be deoptimized.
1904     __ xabort(0);
1905   }
1906 
1907   // Calculate the difference between rsp and rbp,. We need to know it
1908   // after the native call because on windows Java Natives will pop
1909   // the arguments and it is painful to do rsp relative addressing
1910   // in a platform independent way. So after the call we switch to
1911   // rbp, relative addressing.
1912 
1913   int fp_adjustment = stack_size - 2*wordSize;
1914 
1915 #ifdef COMPILER2
1916   // C2 may leave the stack dirty if not in SSE2+ mode
1917   if (UseSSE >= 2) {
1918     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1919   } else {
1920     __ empty_FPU_stack();
1921   }
1922 #endif /* COMPILER2 */
1923 
1924   // Compute the rbp, offset for any slots used after the jni call
1925 
1926   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1927 
1928   // We use rdi as a thread pointer because it is callee save and
1929   // if we load it once it is usable thru the entire wrapper
1930   const Register thread = rdi;
1931 
1932    // We use rsi as the oop handle for the receiver/klass
1933    // It is callee save so it survives the call to native
1934 
1935    const Register oop_handle_reg = rsi;
1936 
1937    __ get_thread(thread);
1938 
1939   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
1940     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1941                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1942   }
1943 
1944   //
1945   // We immediately shuffle the arguments so that any vm call we have to
1946   // make from here on out (sync slow path, jvmti, etc.) we will have
1947   // captured the oops from our caller and have a valid oopMap for
1948   // them.
1949 
1950   // -----------------
1951   // The Grand Shuffle
1952   //
1953   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1954   // and, if static, the class mirror instead of a receiver.  This pretty much
1955   // guarantees that register layout will not match (and x86 doesn't use reg
1956   // parms though amd does).  Since the native abi doesn't use register args
1957   // and the java conventions does we don't have to worry about collisions.


< prev index next >