< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -40,10 +40,11 @@
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/align.hpp"
 #include "utilities/formatBuffer.hpp"
+#include "utilities/macros.hpp"
 #include "vm_version_x86.hpp"
 #include "vmreg_x86.inline.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif

@@ -51,10 +52,14 @@
 #include "opto/runtime.hpp"
 #endif
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciJavaClasses.hpp"
 #endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#endif
 
 #define __ masm->
 
 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
 

@@ -2128,11 +2133,11 @@
   // We use r14 as the oop handle for the receiver/klass
   // It is callee save so it survives the call to native
 
   const Register oop_handle_reg = r14;
 
-  if (is_critical_native) {
+  if (is_critical_native SHENANDOAHGC_ONLY(&& !UseShenandoahGC)) {
     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
   }
 
   //

@@ -2185,10 +2190,16 @@
   // This may iterate in two different directions depending on the
   // kind of native it is.  The reason is that for regular JNI natives
   // the incoming and outgoing registers are offset upwards and for
   // critical natives they are offset down.
   GrowableArray<int> arg_order(2 * total_in_args);
+#if INCLUDE_SHENANDOAHGC
+  // Inbound arguments that need to be pinned for critical natives
+  GrowableArray<int> pinned_args(total_in_args);
+  // Current stack slot for storing register based array argument
+  int pinned_slot = oop_handle_offset;
+#endif
   VMRegPair tmp_vmreg;
   tmp_vmreg.set2(rbx->as_VMReg());
 
   if (!is_critical_native) {
     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {

@@ -2232,10 +2243,18 @@
     }
 #endif /* ASSERT */
     switch (in_sig_bt[i]) {
       case T_ARRAY:
         if (is_critical_native) {
+#if INCLUDE_SHENANDOAHGC
+          // pin before unpack
+          if (UseShenandoahGC) {
+            assert(pinned_slot <= stack_slots, "overflow");
+            ShenandoahBarrierSet::assembler()->pin_critical_native_array(masm, in_regs[i], pinned_slot);
+            pinned_args.append(i);
+          }
+#endif
           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
           c_arg++;
 #ifdef ASSERT
           if (out_regs[c_arg].first()->is_Register()) {
             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;

@@ -2448,10 +2467,26 @@
   case T_VOID: break;
   case T_LONG: break;
   default       : ShouldNotReachHere();
   }
 
+#if INCLUDE_SHENANDOAHGC
+  if (UseShenandoahGC) {
+    // unpin pinned arguments
+    pinned_slot = oop_handle_offset;
+    if (pinned_args.length() > 0) {
+      // save return value that may be overwritten otherwise.
+      save_native_result(masm, ret_type, stack_slots);
+      for (int index = 0; index < pinned_args.length(); index ++) {
+        int i = pinned_args.at(index);
+        assert(pinned_slot <= stack_slots, "overflow");
+        ShenandoahBarrierSet::assembler()->unpin_critical_native_array(masm, in_regs[i], pinned_slot);
+      }
+      restore_native_result(masm, ret_type, stack_slots);
+    }
+  }
+#endif
   // Switch thread to "native transition" state before reading the synchronization state.
   // This additional state is necessary because reading and testing the synchronization
   // state is not atomic w.r.t. GC, as this scenario demonstrates:
   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
   //     VM thread changes sync state to synchronizing and suspends threads for GC.
< prev index next >