< prev index next >

src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp

Print this page

 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_aarch64.hpp"

 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 
 38 
 39 #define __ ce->masm()->
 40 
 41 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 42   __ bind(_entry);
 43   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 44   __ adr(rscratch1, safepoint_pc);
 45   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
 46 
 47   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
 48          "polling page return stub not created yet");
 49   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 50 
 51   __ far_jump(RuntimeAddress(stub));
 52 }
 53 
 54 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {

216 
217 
218 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
219   __ bind(_entry);
220   if (_compute_lock) {
221     // lock_reg was destroyed by fast unlocking attempt => recompute it
222     ce->monitor_address(_monitor_ix, _lock_reg);
223   }
224   ce->store_parameter(_lock_reg->as_register(), 0);
225   // note: non-blocking leaf routine => no call info needed
226   Runtime1::StubID exit_id;
227   if (ce->compilation()->has_fpu_code()) {
228     exit_id = Runtime1::monitorexit_id;
229   } else {
230     exit_id = Runtime1::monitorexit_nofpu_id;
231   }
232   __ adr(lr, _continuation);
233   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
234 }
235 







236 
237 // Implementation of patching:
238 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
239 // - Replace original code with a call to the stub
240 // At Runtime:
241 // - call to stub, jump to runtime
242 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
243 // - in runtime: after initializing class, restore original code, reexecute instruction
244 
245 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
246 
247 void PatchingStub::align_patch_site(MacroAssembler* masm) {
248 }
249 
250 void PatchingStub::emit_code(LIR_Assembler* ce) {
251   assert(false, "AArch64 should not use C1 runtime patching");
252 }
253 
254 
255 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {

 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_aarch64.hpp"
 35 #include "runtime/objectMonitor.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "vmreg_aarch64.inline.hpp"
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 43   __ bind(_entry);
 44   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 45   __ adr(rscratch1, safepoint_pc);
 46   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
 47 
 48   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
 49          "polling page return stub not created yet");
 50   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 51 
 52   __ far_jump(RuntimeAddress(stub));
 53 }
 54 
 55 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {

217 
218 
219 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
220   __ bind(_entry);
221   if (_compute_lock) {
222     // lock_reg was destroyed by fast unlocking attempt => recompute it
223     ce->monitor_address(_monitor_ix, _lock_reg);
224   }
225   ce->store_parameter(_lock_reg->as_register(), 0);
226   // note: non-blocking leaf routine => no call info needed
227   Runtime1::StubID exit_id;
228   if (ce->compilation()->has_fpu_code()) {
229     exit_id = Runtime1::monitorexit_id;
230   } else {
231     exit_id = Runtime1::monitorexit_nofpu_id;
232   }
233   __ adr(lr, _continuation);
234   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
235 }
236 
237 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
238   assert(UseCompactObjectHeaders, "Only use with compact object headers");
239   __ bind(_entry);
240   Register d = _result->as_register();
241   __ ldr(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
242   __ b(_continuation);
243 }
244 
245 // Implementation of patching:
246 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
247 // - Replace original code with a call to the stub
248 // At Runtime:
249 // - call to stub, jump to runtime
250 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
251 // - in runtime: after initializing class, restore original code, reexecute instruction
252 
253 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
254 
255 void PatchingStub::align_patch_site(MacroAssembler* masm) {
256 }
257 
258 void PatchingStub::emit_code(LIR_Assembler* ce) {
259   assert(false, "AArch64 should not use C1 runtime patching");
260 }
261 
262 
263 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
< prev index next >