< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 28,37 **** --- 28,38 ---- #include "code/debugInfoRec.hpp" #include "code/icBuffer.hpp" #include "code/nativeInst.hpp" #include "code/vtableStubs.hpp" #include "gc/shared/gcLocker.hpp" + #include "gc/shared/collectedHeap.hpp" #include "interpreter/interpreter.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "runtime/safepointMechanism.hpp"
*** 1301,1310 **** --- 1302,1402 ---- } } } } + // Registers need to be saved for runtime call + static Register caller_saved_registers[] = { + rcx, rdx, rsi, rdi + }; + + // Save caller saved registers except r1 and r2 + static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) { + int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register)); + for (int index = 0; index < reg_len; index ++) { + Register this_reg = caller_saved_registers[index]; + if (this_reg != r1 && this_reg != r2) { + __ push(this_reg); + } + } + } + + // Restore caller saved registers except r1 and r2 + static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) { + int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register)); + for (int index = reg_len - 1; index >= 0; index --) { + Register this_reg = caller_saved_registers[index]; + if (this_reg != r1 && this_reg != r2) { + __ pop(this_reg); + } + } + } + + // Pin object, return pinned object or null in rax + static void gen_pin_object(MacroAssembler* masm, + Register thread, VMRegPair reg) { + __ block_comment("gen_pin_object {"); + + Label is_null; + Register tmp_reg = rax; + VMRegPair tmp(tmp_reg->as_VMReg()); + if (reg.first()->is_stack()) { + // Load the arg up from the stack + simple_move32(masm, reg, tmp); + reg = tmp; + } else { + __ movl(tmp_reg, reg.first()->as_Register()); + } + __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); + __ jccb(Assembler::equal, is_null); + + // Save registers that may be used by runtime call + Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg; + save_registers_except(masm, arg, thread); + + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::pin_object), + thread, reg.first()->as_Register()); + + // Restore saved registers + restore_registers_except(masm, arg, thread); + + __ bind(is_null); + __ block_comment("} gen_pin_object"); + } + + // Unpin object + static void gen_unpin_object(MacroAssembler* masm, + Register thread, VMRegPair reg) { + __ block_comment("gen_unpin_object {"); + Label is_null; + + // temp register + __ push(rax); + Register tmp_reg = rax; + VMRegPair tmp(tmp_reg->as_VMReg()); + + simple_move32(masm, reg, tmp); + + __ testptr(rax, rax); + __ jccb(Assembler::equal, is_null); + + // Save registers that may be used by runtime call + Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg; + save_registers_except(masm, arg, thread); + + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object), + thread, rax); + + // Restore saved registers + restore_registers_except(masm, arg, thread); + __ bind(is_null); + __ pop(rax); + __ block_comment("} gen_unpin_object"); + } + // Check GCLocker::needs_gc and enter the runtime if it's true. This // keeps a new JNI critical region from starting until a GC has been // forced. Save down any oops in registers and describe them in an // OopMap. static void check_needs_gc_for_critical_native(MacroAssembler* masm,
*** 1836,1846 **** const Register oop_handle_reg = rsi; __ get_thread(thread); ! if (is_critical_native) { check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } // --- 1928,1938 ---- const Register oop_handle_reg = rsi; __ get_thread(thread); ! if (is_critical_native && !Universe::heap()->supports_object_pinning()) { check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } //
*** 1874,1883 **** --- 1966,1980 ---- // sure we can capture all the incoming oop args from the // caller. // OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); + // Inbound arguments that need to be pinned for critical natives + GrowableArray<int> pinned_args(total_in_args); + // Current stack slot for storing register based array argument + int pinned_slot = oop_handle_offset; + // Mark location of rbp, // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg()); // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx // Are free to temporaries if we have to do stack to steck moves.
*** 1885,1895 **** for (int i = 0; i < total_in_args ; i++, c_arg++ ) { switch (in_sig_bt[i]) { case T_ARRAY: if (is_critical_native) { ! unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); c_arg++; break; } case T_OBJECT: assert(!is_critical_native, "no oop arguments"); --- 1982,2013 ---- for (int i = 0; i < total_in_args ; i++, c_arg++ ) { switch (in_sig_bt[i]) { case T_ARRAY: if (is_critical_native) { ! VMRegPair in_arg = in_regs[i]; ! if (Universe::heap()->supports_object_pinning()) { ! // gen_pin_object handles save and restore ! // of any clobbered registers ! gen_pin_object(masm, thread, in_arg); ! pinned_args.append(i); ! ! // rax has pinned array ! VMRegPair result_reg(rax->as_VMReg()); ! if (!in_arg.first()->is_stack()) { ! assert(pinned_slot <= stack_slots, "overflow"); ! simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot)); ! pinned_slot += VMRegImpl::slots_per_word; ! } else { ! // Write back pinned value, it will be used to unpin this argument ! __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register()); ! } ! // We have the array in register, use it ! in_arg = result_reg; ! } ! ! unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); c_arg++; break; } case T_OBJECT: assert(!is_critical_native, "no oop arguments");
*** 2081,2090 **** --- 2199,2228 ---- case T_VOID: break; case T_LONG: break; default : ShouldNotReachHere(); } + // unpin pinned arguments + pinned_slot = oop_handle_offset; + if (pinned_args.length() > 0) { + // save return value that may be overwritten otherwise. + save_native_result(masm, ret_type, stack_slots); + for (int index = 0; index < pinned_args.length(); index ++) { + int i = pinned_args.at(index); + assert(pinned_slot <= stack_slots, "overflow"); + if (!in_regs[i].first()->is_stack()) { + int offset = pinned_slot * VMRegImpl::stack_slot_size; + __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset)); + pinned_slot += VMRegImpl::slots_per_word; + } + // gen_pin_object handles save and restore + // of any other clobbered registers + gen_unpin_object(masm, thread, in_regs[i]); + } + restore_native_result(masm, ret_type, stack_slots); + } + // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. // VM thread changes sync state to synchronizing and suspends threads for GC.
< prev index next >