< prev index next >

src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp

Print this page

 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_aarch64.hpp"

 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 
 38 
 39 #define __ ce->masm()->
 40 
 41 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 42   __ bind(_entry);
 43   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 44   __ adr(rscratch1, safepoint_pc);
 45   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
 46 
 47   assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 48          "polling page return stub not created yet");
 49   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 50 
 51   __ far_jump(RuntimeAddress(stub));
 52 }
 53 
 54 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {

236 
237 
238 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
239   __ bind(_entry);
240   if (_compute_lock) {
241     // lock_reg was destroyed by fast unlocking attempt => recompute it
242     ce->monitor_address(_monitor_ix, _lock_reg);
243   }
244   ce->store_parameter(_lock_reg->as_register(), 0);
245   // note: non-blocking leaf routine => no call info needed
246   Runtime1::StubID exit_id;
247   if (ce->compilation()->has_fpu_code()) {
248     exit_id = Runtime1::monitorexit_id;
249   } else {
250     exit_id = Runtime1::monitorexit_nofpu_id;
251   }
252   __ adr(lr, _continuation);
253   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
254 }
255 







256 
257 // Implementation of patching:
258 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
259 // - Replace original code with a call to the stub
260 // At Runtime:
261 // - call to stub, jump to runtime
262 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
263 // - in runtime: after initializing class, restore original code, reexecute instruction
264 
265 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
266 
267 void PatchingStub::align_patch_site(MacroAssembler* masm) {
268 }
269 
270 void PatchingStub::emit_code(LIR_Assembler* ce) {
271   assert(false, "AArch64 should not use C1 runtime patching");
272 }
273 
274 
275 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {

 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_aarch64.hpp"
 35 #include "runtime/objectMonitor.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "vmreg_aarch64.inline.hpp"
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 43   __ bind(_entry);
 44   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 45   __ adr(rscratch1, safepoint_pc);
 46   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
 47 
 48   assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 49          "polling page return stub not created yet");
 50   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 51 
 52   __ far_jump(RuntimeAddress(stub));
 53 }
 54 
 55 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {

237 
238 
239 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
240   __ bind(_entry);
241   if (_compute_lock) {
242     // lock_reg was destroyed by fast unlocking attempt => recompute it
243     ce->monitor_address(_monitor_ix, _lock_reg);
244   }
245   ce->store_parameter(_lock_reg->as_register(), 0);
246   // note: non-blocking leaf routine => no call info needed
247   Runtime1::StubID exit_id;
248   if (ce->compilation()->has_fpu_code()) {
249     exit_id = Runtime1::monitorexit_id;
250   } else {
251     exit_id = Runtime1::monitorexit_nofpu_id;
252   }
253   __ adr(lr, _continuation);
254   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
255 }
256 
257 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
258   assert(UseCompactObjectHeaders, "Only use with compact object headers");
259   __ bind(_entry);
260   Register d = _result->as_register();
261   __ ldr(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
262   __ b(_continuation);
263 }
264 
265 // Implementation of patching:
266 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
267 // - Replace original code with a call to the stub
268 // At Runtime:
269 // - call to stub, jump to runtime
270 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
271 // - in runtime: after initializing class, restore original code, reexecute instruction
272 
273 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
274 
275 void PatchingStub::align_patch_site(MacroAssembler* masm) {
276 }
277 
278 void PatchingStub::emit_code(LIR_Assembler* ce) {
279   assert(false, "AArch64 should not use C1 runtime patching");
280 }
281 
282 
283 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
< prev index next >