< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page


   1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "gc/shared/gcLocker.hpp"

  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/compiledICHolder.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/vframeArray.hpp"
  43 #include "utilities/align.hpp"
  44 #include "utilities/formatBuffer.hpp"
  45 #include "vm_version_x86.hpp"
  46 #include "vmreg_x86.inline.hpp"
  47 #ifdef COMPILER1
  48 #include "c1/c1_Runtime1.hpp"
  49 #endif
  50 #ifdef COMPILER2
  51 #include "opto/runtime.hpp"
  52 #endif
  53 #if INCLUDE_JVMCI
  54 #include "jvmci/jvmciJavaClasses.hpp"
  55 #endif


1417     } else if (in_regs[i].first()->is_XMMRegister()) {
1418       if (in_sig_bt[i] == T_FLOAT) {
1419         int offset = slot * VMRegImpl::stack_slot_size;
1420         slot++;
1421         assert(slot <= stack_slots, "overflow");
1422         if (map != NULL) {
1423           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1424         } else {
1425           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1426         }
1427       }
1428     } else if (in_regs[i].first()->is_stack()) {
1429       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1430         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1431         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1432       }
1433     }
1434   }
1435 }
1436 






























































































1437 
1438 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1439 // keeps a new JNI critical region from starting until a GC has been
1440 // forced.  Save down any oops in registers and describe them in an
1441 // OopMap.
1442 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1443                                                int stack_slots,
1444                                                int total_c_args,
1445                                                int total_in_args,
1446                                                int arg_save_area,
1447                                                OopMapSet* oop_maps,
1448                                                VMRegPair* in_regs,
1449                                                BasicType* in_sig_bt) {
1450   __ block_comment("check GCLocker::needs_gc");
1451   Label cont;
1452   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1453   __ jcc(Assembler::equal, cont);
1454 
1455   // Save down any incoming oops and call into the runtime to halt for a GC
1456 


2113     }
2114 
2115 #ifdef ASSERT
2116     {
2117       Label L;
2118       __ mov(rax, rsp);
2119       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2120       __ cmpptr(rax, rsp);
2121       __ jcc(Assembler::equal, L);
2122       __ stop("improperly aligned stack");
2123       __ bind(L);
2124     }
2125 #endif /* ASSERT */
2126 
2127 
2128   // We use r14 as the oop handle for the receiver/klass
2129   // It is callee save so it survives the call to native
2130 
2131   const Register oop_handle_reg = r14;
2132 
2133   if (is_critical_native) {
2134     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2135                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2136   }
2137 
2138   //
2139   // We immediately shuffle the arguments so that any vm call we have to
2140   // make from here on out (sync slow path, jvmti, etc.) we will have
2141   // captured the oops from our caller and have a valid oopMap for
2142   // them.
2143 
2144   // -----------------
2145   // The Grand Shuffle
2146 
2147   // The Java calling convention is either equal (linux) or denser (win64) than the
2148   // c calling convention. However the because of the jni_env argument the c calling
2149   // convention always has at least one more (and two for static) arguments than Java.
2150   // Therefore if we move the args from java -> c backwards then we will never have
2151   // a register->register conflict and we don't have to build a dependency graph
2152   // and figure out how to break any cycles.
2153   //


2170   // All inbound args are referenced based on rbp and all outbound args via rsp.
2171 
2172 
2173 #ifdef ASSERT
2174   bool reg_destroyed[RegisterImpl::number_of_registers];
2175   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2176   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2177     reg_destroyed[r] = false;
2178   }
2179   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2180     freg_destroyed[f] = false;
2181   }
2182 
2183 #endif /* ASSERT */
2184 
2185   // This may iterate in two different directions depending on the
2186   // kind of native it is.  The reason is that for regular JNI natives
2187   // the incoming and outgoing registers are offset upwards and for
2188   // critical natives they are offset down.
2189   GrowableArray<int> arg_order(2 * total_in_args);





2190   VMRegPair tmp_vmreg;
2191   tmp_vmreg.set2(rbx->as_VMReg());
2192 
2193   if (!is_critical_native) {
2194     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2195       arg_order.push(i);
2196       arg_order.push(c_arg);
2197     }
2198   } else {
2199     // Compute a valid move order, using tmp_vmreg to break any cycles
2200     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2201   }
2202 
2203   int temploc = -1;
2204   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2205     int i = arg_order.at(ai);
2206     int c_arg = arg_order.at(ai + 1);
2207     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2208     if (c_arg == -1) {
2209       assert(is_critical_native, "should only be required for critical natives");


2217       // Read from the temporary location
2218       assert(temploc != -1, "must be valid");
2219       i = temploc;
2220       temploc = -1;
2221     }
2222 #ifdef ASSERT
2223     if (in_regs[i].first()->is_Register()) {
2224       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2225     } else if (in_regs[i].first()->is_XMMRegister()) {
2226       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2227     }
2228     if (out_regs[c_arg].first()->is_Register()) {
2229       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2230     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2231       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2232     }
2233 #endif /* ASSERT */
2234     switch (in_sig_bt[i]) {
2235       case T_ARRAY:
2236         if (is_critical_native) {






2237           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2238           c_arg++;
2239 #ifdef ASSERT
2240           if (out_regs[c_arg].first()->is_Register()) {
2241             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2242           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2243             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2244           }
2245 #endif
2246           break;
2247         }
2248       case T_OBJECT:
2249         assert(!is_critical_native, "no oop arguments");
2250         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2251                     ((i == 0) && (!is_static)),
2252                     &receiver_offset);
2253         break;
2254       case T_VOID:
2255         break;
2256 


2431   // Verify or restore cpu control state after JNI call
2432   __ restore_cpu_control_state_after_jni();
2433 
2434   // Unpack native results.
2435   switch (ret_type) {
2436   case T_BOOLEAN: __ c2bool(rax);            break;
2437   case T_CHAR   : __ movzwl(rax, rax);      break;
2438   case T_BYTE   : __ sign_extend_byte (rax); break;
2439   case T_SHORT  : __ sign_extend_short(rax); break;
2440   case T_INT    : /* nothing to do */        break;
2441   case T_DOUBLE :
2442   case T_FLOAT  :
2443     // Result is in xmm0 we'll save as needed
2444     break;
2445   case T_ARRAY:                 // Really a handle
2446   case T_OBJECT:                // Really a handle
2447       break; // can't de-handlize until after safepoint check
2448   case T_VOID: break;
2449   case T_LONG: break;
2450   default       : ShouldNotReachHere();













2451   }
2452 
2453   // Switch thread to "native transition" state before reading the synchronization state.
2454   // This additional state is necessary because reading and testing the synchronization
2455   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2456   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2457   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2458   //     Thread A is resumed to finish this native method, but doesn't block here since it
2459   //     didn't see any synchronization is progress, and escapes.
2460   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2461 
2462   if(os::is_MP()) {
2463     if (UseMembar) {
2464       // Force this write out before the read below
2465       __ membar(Assembler::Membar_mask_bits(
2466            Assembler::LoadLoad | Assembler::LoadStore |
2467            Assembler::StoreLoad | Assembler::StoreStore));
2468     } else {
2469       // Write serialization page so VM thread can do a pseudo remote membar.
2470       // We use the current thread pointer to calculate a thread specific


   1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "gc/shared/gcLocker.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/compiledICHolder.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/vframeArray.hpp"
  44 #include "utilities/align.hpp"
  45 #include "utilities/formatBuffer.hpp"
  46 #include "vm_version_x86.hpp"
  47 #include "vmreg_x86.inline.hpp"
  48 #ifdef COMPILER1
  49 #include "c1/c1_Runtime1.hpp"
  50 #endif
  51 #ifdef COMPILER2
  52 #include "opto/runtime.hpp"
  53 #endif
  54 #if INCLUDE_JVMCI
  55 #include "jvmci/jvmciJavaClasses.hpp"
  56 #endif


1418     } else if (in_regs[i].first()->is_XMMRegister()) {
1419       if (in_sig_bt[i] == T_FLOAT) {
1420         int offset = slot * VMRegImpl::stack_slot_size;
1421         slot++;
1422         assert(slot <= stack_slots, "overflow");
1423         if (map != NULL) {
1424           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1425         } else {
1426           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1427         }
1428       }
1429     } else if (in_regs[i].first()->is_stack()) {
1430       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1431         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1432         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1433       }
1434     }
1435   }
1436 }
1437 
1438 // Pin incoming array argument of java critical method
1439 static void pin_critical_native_array(MacroAssembler* masm,
1440                                       VMRegPair reg,
1441                                       int& pinned_slot) {
1442   __ block_comment("pin_critical_native_array {");
1443   Register tmp_reg = rax;
1444 
1445   Label is_null;
1446   VMRegPair tmp;
1447   VMRegPair in_reg = reg;
1448   bool on_stack = false;
1449 
1450   tmp.set_ptr(tmp_reg->as_VMReg());
1451   if (reg.first()->is_stack()) {
1452     // Load the arg up from the stack
1453     move_ptr(masm, reg, tmp);
1454     reg = tmp;
1455     on_stack = true;
1456   } else {
1457     __ movptr(rax, reg.first()->as_Register());
1458   }
1459   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1460   __ jccb(Assembler::equal, is_null);
1461 
1462   __ push(c_rarg0);
1463   __ push(c_rarg1);
1464   __ push(c_rarg2);
1465   __ push(c_rarg3);
1466 #ifdef _WIN64
1467   // caller-saved registers on Windows
1468   __ push(r10);
1469   __ push(r11);
1470 #else
1471   __ push(c_rarg4);
1472   __ push(c_rarg5);
1473 #endif
1474 
1475   if (reg.first()->as_Register() != c_rarg1) {
1476     __ movptr(c_rarg1, reg.first()->as_Register());
1477   }
1478   __ movptr(c_rarg0, r15_thread);
1479   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::pin_object)));
1480 
1481 #ifdef _WIN64
1482   __ pop(r11);
1483   __ pop(r10);
1484 #else
1485   __ pop(c_rarg5);
1486   __ pop(c_rarg4);
1487 #endif
1488   __ pop(c_rarg3);
1489   __ pop(c_rarg2);
1490   __ pop(c_rarg1);
1491   __ pop(c_rarg0);
1492 
1493   if (on_stack) {
1494     __ movptr(Address(rbp, reg2offset_in(in_reg.first())), rax);
1495     __ bind(is_null);
1496   } else {
1497     __ movptr(reg.first()->as_Register(), rax);
1498 
1499     // save on stack for unpinning later
1500     __ bind(is_null);
1501     assert(reg.first()->is_Register(), "Must be a register");
1502     int offset = pinned_slot * VMRegImpl::stack_slot_size;
1503     pinned_slot += VMRegImpl::slots_per_word;
1504     __ movq(Address(rsp, offset), rax);
1505   }
1506   __ block_comment("} pin_critical_native_array");
1507 }
1508 
1509 // Unpin array argument of java critical method
1510 static void unpin_critical_native_array(MacroAssembler* masm,
1511                                         VMRegPair reg,
1512                                         int& pinned_slot) {
1513   __ block_comment("unpin_critical_native_array {");
1514   Label is_null;
1515 
1516   if (reg.first()->is_stack()) {
1517     __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1518   } else {
1519     int offset = pinned_slot * VMRegImpl::stack_slot_size;
1520     pinned_slot += VMRegImpl::slots_per_word;
1521     __ movq(c_rarg1, Address(rsp, offset));
1522   }
1523   __ testptr(c_rarg1, c_rarg1);
1524   __ jccb(Assembler::equal, is_null);
1525 
1526   __ movptr(c_rarg0, r15_thread);
1527   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object)));
1528 
1529   __ bind(is_null);
1530   __ block_comment("} unpin_critical_native_array");
1531 }
1532 
1533 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1534 // keeps a new JNI critical region from starting until a GC has been
1535 // forced.  Save down any oops in registers and describe them in an
1536 // OopMap.
1537 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1538                                                int stack_slots,
1539                                                int total_c_args,
1540                                                int total_in_args,
1541                                                int arg_save_area,
1542                                                OopMapSet* oop_maps,
1543                                                VMRegPair* in_regs,
1544                                                BasicType* in_sig_bt) {
1545   __ block_comment("check GCLocker::needs_gc");
1546   Label cont;
1547   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1548   __ jcc(Assembler::equal, cont);
1549 
1550   // Save down any incoming oops and call into the runtime to halt for a GC
1551 


2208     }
2209 
2210 #ifdef ASSERT
2211     {
2212       Label L;
2213       __ mov(rax, rsp);
2214       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2215       __ cmpptr(rax, rsp);
2216       __ jcc(Assembler::equal, L);
2217       __ stop("improperly aligned stack");
2218       __ bind(L);
2219     }
2220 #endif /* ASSERT */
2221 
2222 
2223   // We use r14 as the oop handle for the receiver/klass
2224   // It is callee save so it survives the call to native
2225 
2226   const Register oop_handle_reg = r14;
2227 
2228   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2229     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2230                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2231   }
2232 
2233   //
2234   // We immediately shuffle the arguments so that any vm call we have to
2235   // make from here on out (sync slow path, jvmti, etc.) we will have
2236   // captured the oops from our caller and have a valid oopMap for
2237   // them.
2238 
2239   // -----------------
2240   // The Grand Shuffle
2241 
2242   // The Java calling convention is either equal (linux) or denser (win64) than the
2243   // c calling convention. However the because of the jni_env argument the c calling
2244   // convention always has at least one more (and two for static) arguments than Java.
2245   // Therefore if we move the args from java -> c backwards then we will never have
2246   // a register->register conflict and we don't have to build a dependency graph
2247   // and figure out how to break any cycles.
2248   //


2265   // All inbound args are referenced based on rbp and all outbound args via rsp.
2266 
2267 
2268 #ifdef ASSERT
2269   bool reg_destroyed[RegisterImpl::number_of_registers];
2270   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2271   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2272     reg_destroyed[r] = false;
2273   }
2274   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2275     freg_destroyed[f] = false;
2276   }
2277 
2278 #endif /* ASSERT */
2279 
2280   // This may iterate in two different directions depending on the
2281   // kind of native it is.  The reason is that for regular JNI natives
2282   // the incoming and outgoing registers are offset upwards and for
2283   // critical natives they are offset down.
2284   GrowableArray<int> arg_order(2 * total_in_args);
2285   // Inbound arguments that need to be pinned for critical natives
2286   GrowableArray<int> pinned_args(total_in_args);
2287   // Current stack slot for storing register based array argument
2288   int pinned_slot = oop_handle_offset;
2289 
2290   VMRegPair tmp_vmreg;
2291   tmp_vmreg.set2(rbx->as_VMReg());
2292 
2293   if (!is_critical_native) {
2294     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2295       arg_order.push(i);
2296       arg_order.push(c_arg);
2297     }
2298   } else {
2299     // Compute a valid move order, using tmp_vmreg to break any cycles
2300     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2301   }
2302 
2303   int temploc = -1;
2304   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2305     int i = arg_order.at(ai);
2306     int c_arg = arg_order.at(ai + 1);
2307     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2308     if (c_arg == -1) {
2309       assert(is_critical_native, "should only be required for critical natives");


2317       // Read from the temporary location
2318       assert(temploc != -1, "must be valid");
2319       i = temploc;
2320       temploc = -1;
2321     }
2322 #ifdef ASSERT
2323     if (in_regs[i].first()->is_Register()) {
2324       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2325     } else if (in_regs[i].first()->is_XMMRegister()) {
2326       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2327     }
2328     if (out_regs[c_arg].first()->is_Register()) {
2329       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2330     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2331       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2332     }
2333 #endif /* ASSERT */
2334     switch (in_sig_bt[i]) {
2335       case T_ARRAY:
2336         if (is_critical_native) {
2337           // pin before unpack
2338           if (Universe::heap()->supports_object_pinning()) {
2339             assert(pinned_slot <= stack_slots, "overflow");
2340             pin_critical_native_array(masm, in_regs[i], pinned_slot);
2341             pinned_args.append(i);
2342           }
2343           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2344           c_arg++;
2345 #ifdef ASSERT
2346           if (out_regs[c_arg].first()->is_Register()) {
2347             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2348           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2349             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2350           }
2351 #endif
2352           break;
2353         }
2354       case T_OBJECT:
2355         assert(!is_critical_native, "no oop arguments");
2356         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2357                     ((i == 0) && (!is_static)),
2358                     &receiver_offset);
2359         break;
2360       case T_VOID:
2361         break;
2362 


2537   // Verify or restore cpu control state after JNI call
2538   __ restore_cpu_control_state_after_jni();
2539 
2540   // Unpack native results.
2541   switch (ret_type) {
2542   case T_BOOLEAN: __ c2bool(rax);            break;
2543   case T_CHAR   : __ movzwl(rax, rax);      break;
2544   case T_BYTE   : __ sign_extend_byte (rax); break;
2545   case T_SHORT  : __ sign_extend_short(rax); break;
2546   case T_INT    : /* nothing to do */        break;
2547   case T_DOUBLE :
2548   case T_FLOAT  :
2549     // Result is in xmm0 we'll save as needed
2550     break;
2551   case T_ARRAY:                 // Really a handle
2552   case T_OBJECT:                // Really a handle
2553       break; // can't de-handlize until after safepoint check
2554   case T_VOID: break;
2555   case T_LONG: break;
2556   default       : ShouldNotReachHere();
2557   }
2558 
2559   // unpin pinned arguments
2560   pinned_slot = oop_handle_offset;
2561   if (pinned_args.length() > 0) {
2562     // save return value that may be overwritten otherwise.
2563     save_native_result(masm, ret_type, stack_slots);
2564     for (int index = 0; index < pinned_args.length(); index ++) {
2565       int i = pinned_args.at(index);
2566       assert(pinned_slot <= stack_slots, "overflow");
2567       unpin_critical_native_array(masm, in_regs[i], pinned_slot);
2568     }
2569     restore_native_result(masm, ret_type, stack_slots);
2570   }
2571 
2572   // Switch thread to "native transition" state before reading the synchronization state.
2573   // This additional state is necessary because reading and testing the synchronization
2574   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2575   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2576   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2577   //     Thread A is resumed to finish this native method, but doesn't block here since it
2578   //     didn't see any synchronization is progress, and escapes.
2579   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2580 
2581   if(os::is_MP()) {
2582     if (UseMembar) {
2583       // Force this write out before the read below
2584       __ membar(Assembler::Membar_mask_bits(
2585            Assembler::LoadLoad | Assembler::LoadStore |
2586            Assembler::StoreLoad | Assembler::StoreStore));
2587     } else {
2588       // Write serialization page so VM thread can do a pseudo remote membar.
2589       // We use the current thread pointer to calculate a thread specific


< prev index next >