1969 st->print("bhi #slow_path");
1970 }
1971 }
1972 #endif
1973
1974 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1975 Compile* C = ra_->C;
1976 C2_MacroAssembler _masm(&cbuf);
1977 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1978
1979 __ remove_frame(framesize);
1980
1981 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1982 __ reserved_stack_check();
1983 }
1984
1985 if (do_polling() && C->is_method_compilation()) {
1986 Label dummy_label;
1987 Label* code_stub = &dummy_label;
1988 if (!C->output()->in_scratch_emit_size()) {
1989 code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
1990 }
1991 __ relocate(relocInfo::poll_return_type);
1992 __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
1993 }
1994 }
1995
1996 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1997 // Variable size. Determine dynamically.
1998 return MachNode::size(ra_);
1999 }
2000
2001 int MachEpilogNode::reloc() const {
2002 // Return number of relocatable values contained in this instruction.
2003 return 1; // 1 for polling page.
2004 }
2005
2006 const Pipeline * MachEpilogNode::pipeline() const {
2007 return MachNode::pipeline_class();
2008 }
2009
3782 #endif
3783 __ ret(lr);
3784 %}
3785
3786 enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3787 C2_MacroAssembler _masm(&cbuf);
3788 Register target_reg = as_Register($jump_target$$reg);
3789 __ br(target_reg);
3790 %}
3791
3792 enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3793 C2_MacroAssembler _masm(&cbuf);
3794 Register target_reg = as_Register($jump_target$$reg);
3795 // exception oop should be in r0
3796 // ret addr has been popped into lr
3797 // callee expects it in r3
3798 __ mov(r3, lr);
3799 __ br(target_reg);
3800 %}
3801
3802 enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3803 C2_MacroAssembler _masm(&cbuf);
3804 Register oop = as_Register($object$$reg);
3805 Register box = as_Register($box$$reg);
3806 Register disp_hdr = as_Register($tmp$$reg);
3807 Register tmp = as_Register($tmp2$$reg);
3808 Label cont;
3809 Label object_has_monitor;
3810 Label cas_failed;
3811
3812 assert_different_registers(oop, box, tmp, disp_hdr);
3813
3814 // Load markWord from object into displaced_header.
3815 __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3816
3817 if (DiagnoseSyncOnValueBasedClasses != 0) {
3818 __ load_klass(tmp, oop);
3819 __ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
3820 __ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
3821 __ br(Assembler::NE, cont);
3822 }
3823
3824 if (UseBiasedLocking && !UseOptoBiasInlining) {
3825 __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3826 }
3827
3828 // Check for existing monitor
3829 __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3830
3831 // Set tmp to be (markWord of object | UNLOCK_VALUE).
3832 __ orr(tmp, disp_hdr, markWord::unlocked_value);
3833
3834 // Initialize the box. (Must happen before we update the object mark!)
3835 __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3836
3837 // Compare object markWord with an unlocked value (tmp) and if
3838 // equal exchange the stack address of our box with object markWord.
3839 // On failure disp_hdr contains the possibly locked markWord.
3840 __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3841 /*release*/ true, /*weak*/ false, disp_hdr);
3842 __ br(Assembler::EQ, cont);
3843
3844 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3845
3846 // If the compare-and-exchange succeeded, then we found an unlocked
3847 // object, will have now locked it will continue at label cont
3848
3849 __ bind(cas_failed);
3850 // We did not see an unlocked object so try the fast recursive case.
3851
3852 // Check if the owner is self by comparing the value in the
3853 // markWord of object (disp_hdr) with the stack pointer.
3854 __ mov(rscratch1, sp);
3855 __ sub(disp_hdr, disp_hdr, rscratch1);
3856 __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3857 // If condition is true we are cont and hence we can store 0 as the
3858 // displaced header in the box, which indicates that it is a recursive lock.
3859 __ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
3860 __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3861
3862 __ b(cont);
3863
3864 // Handle existing monitor.
3865 __ bind(object_has_monitor);
3866
3867 // The object's monitor m is unlocked iff m->owner == NULL,
3868 // otherwise m->owner may contain a thread or a stack address.
3869 //
3870 // Try to CAS m->owner from NULL to current thread.
3871 __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3872 __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3873 /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
3874
3875 // Store a non-null value into the box to avoid looking like a re-entrant
3876 // lock. The fast-path monitor unlock code checks for
3877 // markWord::monitor_value so use markWord::unused_mark which has the
3878 // relevant bit set, and also matches ObjectSynchronizer::enter.
3879 __ mov(tmp, (address)markWord::unused_mark().value());
3880 __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3881
3882 __ br(Assembler::EQ, cont); // CAS success means locking succeeded
3883
3884 __ cmp(rscratch1, rthread);
3885 __ br(Assembler::NE, cont); // Check for recursive locking
3886
3887 // Recursive lock case
3888 __ increment(Address(disp_hdr, ObjectMonitor::recursions_offset_in_bytes() - markWord::monitor_value), 1);
3889 // flag == EQ still from the cmp above, checking if this is a reentrant lock
3890
3891 __ bind(cont);
3892 // flag == EQ indicates success
3893 // flag == NE indicates failure
3894 %}
3895
3896 enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3897 C2_MacroAssembler _masm(&cbuf);
3898 Register oop = as_Register($object$$reg);
3899 Register box = as_Register($box$$reg);
3900 Register disp_hdr = as_Register($tmp$$reg);
3901 Register tmp = as_Register($tmp2$$reg);
3902 Label cont;
3903 Label object_has_monitor;
3904
3905 assert_different_registers(oop, box, tmp, disp_hdr);
3906
3907 if (UseBiasedLocking && !UseOptoBiasInlining) {
3908 __ biased_locking_exit(oop, tmp, cont);
3909 }
3910
3911 // Find the lock address and load the displaced header from the stack.
3912 __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3913
3914 // If the displaced header is 0, we have a recursive unlock.
3915 __ cmp(disp_hdr, zr);
3916 __ br(Assembler::EQ, cont);
3917
3918 // Handle existing monitor.
3919 __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3920 __ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
3921
3922 // Check if it is still a light weight lock, this is is true if we
3923 // see the stack address of the basicLock in the markWord of the
3924 // object.
3925
3926 __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3927 /*release*/ true, /*weak*/ false, tmp);
3928 __ b(cont);
3929
3930 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3931
3932 // Handle existing monitor.
3933 __ bind(object_has_monitor);
3934 STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
3935 __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
3936 __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3937
3938 Label notRecursive;
3939 __ cbz(disp_hdr, notRecursive);
3940
3941 // Recursive lock
3942 __ sub(disp_hdr, disp_hdr, 1u);
3943 __ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3944 __ cmp(disp_hdr, disp_hdr); // Sets flags for result
3945 __ b(cont);
3946
3947 __ bind(notRecursive);
3948 __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3949 __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3950 __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3951 __ cmp(rscratch1, zr); // Sets flags for result
3952 __ cbnz(rscratch1, cont);
3953 // need a release store here
3954 __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3955 __ stlr(zr, tmp); // set unowned
3956
3957 __ bind(cont);
3958 // flag == EQ indicates success
3959 // flag == NE indicates failure
3960 %}
3961
3962 %}
3963
3964 //----------FRAME--------------------------------------------------------------
3965 // Definition of frame structure and management information.
3966 //
3967 // S T A C K L A Y O U T Allocators stack-slot number
3968 // | (to get allocators register number
3969 // G Owned by | | v add OptoReg::stack0())
3970 // r CALLER | |
3971 // o | +--------+ pad to even-align allocators stack-slot
3972 // w V | pad0 | numbers; owned by CALLER
3973 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3974 // h ^ | in | 5
3975 // | | args | 4 Holes in incoming args owned by SELF
3976 // | | | | 3
3977 // | | +--------+
3978 // V | | old out| Empty on Intel, window on Sparc
3979 // | old |preserve| Must be even aligned.
3980 // | SP-+--------+----> Matcher::_old_SP, even aligned
3981 // | | in | 3 area for Intel ret address
7422 %}
7423
7424 // Load Klass Pointer
7425 instruct loadKlass(iRegPNoSp dst, memory8 mem)
7426 %{
7427 match(Set dst (LoadKlass mem));
7428 predicate(!needs_acquiring_load(n));
7429
7430 ins_cost(4 * INSN_COST);
7431 format %{ "ldr $dst, $mem\t# class" %}
7432
7433 ins_encode(aarch64_enc_ldr(dst, mem));
7434
7435 ins_pipe(iload_reg_mem);
7436 %}
7437
7438 // Load Narrow Klass Pointer
7439 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
7440 %{
7441 match(Set dst (LoadNKlass mem));
7442 predicate(!needs_acquiring_load(n));
7443
7444 ins_cost(4 * INSN_COST);
7445 format %{ "ldrw $dst, $mem\t# compressed class ptr" %}
7446
7447 ins_encode(aarch64_enc_ldrw(dst, mem));
7448
7449 ins_pipe(iload_reg_mem);
7450 %}
7451
7452 // Load Float
7453 instruct loadF(vRegF dst, memory4 mem)
7454 %{
7455 match(Set dst (LoadF mem));
7456 predicate(!needs_acquiring_load(n));
7457
7458 ins_cost(4 * INSN_COST);
7459 format %{ "ldrs $dst, $mem\t# float" %}
7460
7461 ins_encode( aarch64_enc_ldrs(dst, mem) );
7462
7463 ins_pipe(pipe_class_memory);
7464 %}
7465
7466 // Load Double
7467 instruct loadD(vRegD dst, memory8 mem)
7468 %{
7469 match(Set dst (LoadD mem));
7470 predicate(!needs_acquiring_load(n));
7471
16663
16664 effect(USE lbl);
16665
16666 ins_cost(BRANCH_COST);
16667 // short variant.
16668 // ins_short_branch(1);
16669 format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
16670
16671 ins_encode(aarch64_enc_br_conU(cmp, lbl));
16672
16673 ins_pipe(pipe_branch);
16674 %}
16675
16676 // counted loop end branch far
16677 // counted loop end branch far unsigned
16678 // TODO: fixme
16679
16680 // ============================================================================
16681 // inlined locking and unlocking
16682
16683 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16684 %{
16685 match(Set cr (FastLock object box));
16686 effect(TEMP tmp, TEMP tmp2);
16687
16688 // TODO
16689 // identify correct cost
16690 ins_cost(5 * INSN_COST);
16691 format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16692
16693 ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
16694
16695 ins_pipe(pipe_serial);
16696 %}
16697
16698 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16699 %{
16700 match(Set cr (FastUnlock object box));
16701 effect(TEMP tmp, TEMP tmp2);
16702
16703 ins_cost(5 * INSN_COST);
16704 format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16705
16706 ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
16707
16708 ins_pipe(pipe_serial);
16709 %}
16710
16711
16712 // ============================================================================
16713 // Safepoint Instructions
16714
16715 // TODO
16716 // provide a near and far version of this code
16717
16718 instruct safePoint(rFlagsReg cr, iRegP poll)
16719 %{
16720 match(SafePoint poll);
16721 effect(KILL cr);
16722
16723 format %{
16724 "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16725 %}
16726 ins_encode %{
16727 __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16728 %}
16729 ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16730 %}
|
1969 st->print("bhi #slow_path");
1970 }
1971 }
1972 #endif
1973
1974 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1975 Compile* C = ra_->C;
1976 C2_MacroAssembler _masm(&cbuf);
1977 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1978
1979 __ remove_frame(framesize);
1980
1981 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1982 __ reserved_stack_check();
1983 }
1984
1985 if (do_polling() && C->is_method_compilation()) {
1986 Label dummy_label;
1987 Label* code_stub = &dummy_label;
1988 if (!C->output()->in_scratch_emit_size()) {
1989 C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
1990 C->output()->add_stub(stub);
1991 code_stub = &stub->entry();
1992 }
1993 __ relocate(relocInfo::poll_return_type);
1994 __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
1995 }
1996 }
1997
1998 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1999 // Variable size. Determine dynamically.
2000 return MachNode::size(ra_);
2001 }
2002
2003 int MachEpilogNode::reloc() const {
2004 // Return number of relocatable values contained in this instruction.
2005 return 1; // 1 for polling page.
2006 }
2007
2008 const Pipeline * MachEpilogNode::pipeline() const {
2009 return MachNode::pipeline_class();
2010 }
2011
3784 #endif
3785 __ ret(lr);
3786 %}
3787
3788 enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3789 C2_MacroAssembler _masm(&cbuf);
3790 Register target_reg = as_Register($jump_target$$reg);
3791 __ br(target_reg);
3792 %}
3793
3794 enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3795 C2_MacroAssembler _masm(&cbuf);
3796 Register target_reg = as_Register($jump_target$$reg);
3797 // exception oop should be in r0
3798 // ret addr has been popped into lr
3799 // callee expects it in r3
3800 __ mov(r3, lr);
3801 __ br(target_reg);
3802 %}
3803
3804 %}
3805
3806 //----------FRAME--------------------------------------------------------------
3807 // Definition of frame structure and management information.
3808 //
3809 // S T A C K L A Y O U T Allocators stack-slot number
3810 // | (to get allocators register number
3811 // G Owned by | | v add OptoReg::stack0())
3812 // r CALLER | |
3813 // o | +--------+ pad to even-align allocators stack-slot
3814 // w V | pad0 | numbers; owned by CALLER
3815 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3816 // h ^ | in | 5
3817 // | | args | 4 Holes in incoming args owned by SELF
3818 // | | | | 3
3819 // | | +--------+
3820 // V | | old out| Empty on Intel, window on Sparc
3821 // | old |preserve| Must be even aligned.
3822 // | SP-+--------+----> Matcher::_old_SP, even aligned
3823 // | | in | 3 area for Intel ret address
7264 %}
7265
7266 // Load Klass Pointer
7267 instruct loadKlass(iRegPNoSp dst, memory8 mem)
7268 %{
7269 match(Set dst (LoadKlass mem));
7270 predicate(!needs_acquiring_load(n));
7271
7272 ins_cost(4 * INSN_COST);
7273 format %{ "ldr $dst, $mem\t# class" %}
7274
7275 ins_encode(aarch64_enc_ldr(dst, mem));
7276
7277 ins_pipe(iload_reg_mem);
7278 %}
7279
7280 // Load Narrow Klass Pointer
7281 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
7282 %{
7283 match(Set dst (LoadNKlass mem));
7284 predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
7285
7286 ins_cost(4 * INSN_COST);
7287 format %{ "ldrw $dst, $mem\t# compressed class ptr" %}
7288
7289 ins_encode(aarch64_enc_ldrw(dst, mem));
7290
7291 ins_pipe(iload_reg_mem);
7292 %}
7293
7294 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem, rFlagsReg cr)
7295 %{
7296 match(Set dst (LoadNKlass mem));
7297 effect(KILL cr);
7298 predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
7299
7300 ins_cost(4 * INSN_COST);
7301 format %{ "ldrw $dst, $mem\t# compressed class ptr" %}
7302 ins_encode %{
7303 __ load_nklass_compact($dst$$Register, $mem$$base$$Register, $mem$$index$$Register, $mem$$scale, $mem$$disp);
7304 %}
7305 ins_pipe(pipe_slow);
7306 %}
7307
7308 // Load Float
7309 instruct loadF(vRegF dst, memory4 mem)
7310 %{
7311 match(Set dst (LoadF mem));
7312 predicate(!needs_acquiring_load(n));
7313
7314 ins_cost(4 * INSN_COST);
7315 format %{ "ldrs $dst, $mem\t# float" %}
7316
7317 ins_encode( aarch64_enc_ldrs(dst, mem) );
7318
7319 ins_pipe(pipe_class_memory);
7320 %}
7321
7322 // Load Double
7323 instruct loadD(vRegD dst, memory8 mem)
7324 %{
7325 match(Set dst (LoadD mem));
7326 predicate(!needs_acquiring_load(n));
7327
16519
16520 effect(USE lbl);
16521
16522 ins_cost(BRANCH_COST);
16523 // short variant.
16524 // ins_short_branch(1);
16525 format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
16526
16527 ins_encode(aarch64_enc_br_conU(cmp, lbl));
16528
16529 ins_pipe(pipe_branch);
16530 %}
16531
16532 // counted loop end branch far
16533 // counted loop end branch far unsigned
16534 // TODO: fixme
16535
16536 // ============================================================================
16537 // inlined locking and unlocking
16538
16539 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16540 %{
16541 predicate(LockingMode != LM_LIGHTWEIGHT);
16542 match(Set cr (FastLock object box));
16543 effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16544
16545 ins_cost(5 * INSN_COST);
16546 format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16547
16548 ins_encode %{
16549 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16550 %}
16551
16552 ins_pipe(pipe_serial);
16553 %}
16554
16555 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16556 %{
16557 predicate(LockingMode != LM_LIGHTWEIGHT);
16558 match(Set cr (FastUnlock object box));
16559 effect(TEMP tmp, TEMP tmp2);
16560
16561 ins_cost(5 * INSN_COST);
16562 format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16563
16564 ins_encode %{
16565 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16566 %}
16567
16568 ins_pipe(pipe_serial);
16569 %}
16570
16571 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16572 %{
16573 predicate(LockingMode == LM_LIGHTWEIGHT);
16574 match(Set cr (FastLock object box));
16575 effect(TEMP tmp, TEMP tmp2);
16576
16577 ins_cost(5 * INSN_COST);
16578 format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16579
16580 ins_encode %{
16581 __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16582 %}
16583
16584 ins_pipe(pipe_serial);
16585 %}
16586
16587 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16588 %{
16589 predicate(LockingMode == LM_LIGHTWEIGHT);
16590 match(Set cr (FastUnlock object box));
16591 effect(TEMP tmp, TEMP tmp2);
16592
16593 ins_cost(5 * INSN_COST);
16594 format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16595
16596 ins_encode %{
16597 __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16598 %}
16599
16600 ins_pipe(pipe_serial);
16601 %}
16602
16603 // ============================================================================
16604 // Safepoint Instructions
16605
16606 // TODO
16607 // provide a near and far version of this code
16608
16609 instruct safePoint(rFlagsReg cr, iRegP poll)
16610 %{
16611 match(SafePoint poll);
16612 effect(KILL cr);
16613
16614 format %{
16615 "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16616 %}
16617 ins_encode %{
16618 __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16619 %}
16620 ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16621 %}
|