< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 36,53 **** --- 36,58 ---- #include "oops/compiledICHolder.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp" + #include "utilities/macros.hpp" #include "vmreg_x86.inline.hpp" #ifdef COMPILER1 #include "c1/c1_Runtime1.hpp" #endif #ifdef COMPILER2 #include "opto/runtime.hpp" #endif #include "vm_version_x86.hpp" + #if INCLUDE_SHENANDOAHGC + #include "gc/shenandoah/shenandoahBarrierSet.hpp" + #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" + #endif #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
*** 1836,1846 **** const Register oop_handle_reg = rsi; __ get_thread(thread); ! if (is_critical_native) { check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } // --- 1841,1851 ---- const Register oop_handle_reg = rsi; __ get_thread(thread); ! if (is_critical_native SHENANDOAHGC_ONLY(&& !UseShenandoahGC)) { check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } //
*** 1874,1883 **** --- 1879,1894 ---- // sure we can capture all the incoming oop args from the // caller. // OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); + #if INCLUDE_SHENANDOAHGC + // Inbound arguments that need to be pinned for critical natives + GrowableArray<int> pinned_args(total_in_args); + // Current stack slot for storing register based array argument + int pinned_slot = oop_handle_offset; + #endif // Mark location of rbp, // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg()); // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx // Are free to temporaries if we have to do stack to steck moves.
*** 1885,1895 **** --- 1896,1930 ---- for (int i = 0; i < total_in_args ; i++, c_arg++ ) { switch (in_sig_bt[i]) { case T_ARRAY: if (is_critical_native) { + #if INCLUDE_SHENANDOAHGC + VMRegPair in_arg = in_regs[i]; + if (UseShenandoahGC) { + // gen_pin_object handles save and restore + // of any clobbered registers + ShenandoahBarrierSet::assembler()->gen_pin_object(masm, thread, in_arg); + pinned_args.append(i); + + // rax has pinned array + VMRegPair result_reg(rax->as_VMReg()); + if (!in_arg.first()->is_stack()) { + assert(pinned_slot <= stack_slots, "overflow"); + simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot)); + pinned_slot += VMRegImpl::slots_per_word; + } else { + // Write back pinned value, it will be used to unpin this argument + __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register()); + } + // We have the array in register, use it + in_arg = result_reg; + } + unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); + #else unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); + #endif c_arg++; break; } case T_OBJECT: assert(!is_critical_native, "no oop arguments");
*** 2081,2090 **** --- 2116,2148 ---- case T_VOID: break; case T_LONG: break; default : ShouldNotReachHere(); } + #if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + // unpin pinned arguments + pinned_slot = oop_handle_offset; + if (pinned_args.length() > 0) { + // save return value that may be overwritten otherwise. + save_native_result(masm, ret_type, stack_slots); + for (int index = 0; index < pinned_args.length(); index ++) { + int i = pinned_args.at(index); + assert(pinned_slot <= stack_slots, "overflow"); + if (!in_regs[i].first()->is_stack()) { + int offset = pinned_slot * VMRegImpl::stack_slot_size; + __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset)); + pinned_slot += VMRegImpl::slots_per_word; + } + // gen_pin_object handles save and restore + // of any other clobbered registers + ShenandoahBarrierSet::assembler()->gen_unpin_object(masm, thread, in_regs[i]); + } + restore_native_result(masm, ret_type, stack_slots); + } + } + #endif // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. // VM thread changes sync state to synchronizing and suspends threads for GC.
< prev index next >