23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "register_x86.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "utilities/macros.hpp"
40 #include "vmreg_x86.inline.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 #endif
44
45
46 // Implementation of StubAssembler
47
48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
49 // setup registers
50 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
52 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
53 assert(args_size >= 0, "illegal args_size");
54 bool align_stack = false;
55 #ifdef _LP64
56 // At a method handle call, the stack may not be properly aligned
57 // when returning with an exception.
58 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
59 #endif
60
61 #ifdef _LP64
62 mov(c_rarg0, thread);
1598 #else
1599 __ xorptr(rax, rax);
1600 #endif // _LP64
1601
1602 __ bind(do_return);
1603 __ addptr(rsp, 32);
1604 LP64_ONLY(__ pop(rdx);)
1605 __ pop(rcx);
1606 __ pop(rsi);
1607 __ ret(0);
1608 }
1609 break;
1610
1611 #if INCLUDE_ALL_GCS
1612 case g1_pre_barrier_slow_id:
1613 {
1614 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1615 // arg0 : previous value of memory
1616
1617 BarrierSet* bs = Universe::heap()->barrier_set();
1618 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1619 __ movptr(rax, (int)id);
1620 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1621 __ should_not_reach_here();
1622 break;
1623 }
1624 __ push(rax);
1625 __ push(rdx);
1626
1627 const Register pre_val = rax;
1628 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1629 const Register tmp = rdx;
1630
1631 NOT_LP64(__ get_thread(thread);)
1632
1633 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1634 PtrQueue::byte_offset_of_active()));
1635
1636 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1637 PtrQueue::byte_offset_of_index()));
1638 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1675 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1676
1677 restore_live_registers(sasm);
1678
1679 __ bind(done);
1680
1681 __ pop(rdx);
1682 __ pop(rax);
1683 }
1684 break;
1685
1686 case g1_post_barrier_slow_id:
1687 {
1688 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1689
1690
1691 // arg0: store_address
1692 Address store_addr(rbp, 2*BytesPerWord);
1693
1694 BarrierSet* bs = Universe::heap()->barrier_set();
1695 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1696 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1697
1698 Label done;
1699 Label runtime;
1700
1701 // At this point we know new_value is non-NULL and the new_value crosses regions.
1702 // Must check to see if card is already dirty
1703
1704 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1705
1706 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1707 PtrQueue::byte_offset_of_index()));
1708 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1709 PtrQueue::byte_offset_of_buf()));
1710
1711 __ push(rax);
1712 __ push(rcx);
1713
1714 const Register cardtable = rax;
1752 #endif
1753 __ movptr(Address(buffer_addr, 0), card_addr);
1754
1755 __ pop(rbx);
1756 __ jmp(done);
1757
1758 __ bind(runtime);
1759 __ push(rdx);
1760
1761 save_live_registers(sasm, 3);
1762
1763 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1764
1765 restore_live_registers(sasm);
1766
1767 __ pop(rdx);
1768 __ bind(done);
1769
1770 __ pop(rcx);
1771 __ pop(rax);
1772
1773 }
1774 break;
1775 #endif // INCLUDE_ALL_GCS
1776
1777 case predicate_failed_trap_id:
1778 {
1779 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1780
1781 OopMap* map = save_live_registers(sasm, 1);
1782
1783 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1784 oop_maps = new OopMapSet();
1785 oop_maps->add_gc_map(call_offset, map);
1786 restore_live_registers(sasm);
1787 __ leave();
1788 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1789 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1790
1791 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
|
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "register_x86.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "utilities/macros.hpp"
40 #include "vmreg_x86.inline.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 #include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
44 #endif
45
46
47 // Implementation of StubAssembler
48
49 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
50 // setup registers
51 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
52 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
53 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
54 assert(args_size >= 0, "illegal args_size");
55 bool align_stack = false;
56 #ifdef _LP64
57 // At a method handle call, the stack may not be properly aligned
58 // when returning with an exception.
59 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
60 #endif
61
62 #ifdef _LP64
63 mov(c_rarg0, thread);
1599 #else
1600 __ xorptr(rax, rax);
1601 #endif // _LP64
1602
1603 __ bind(do_return);
1604 __ addptr(rsp, 32);
1605 LP64_ONLY(__ pop(rdx);)
1606 __ pop(rcx);
1607 __ pop(rsi);
1608 __ ret(0);
1609 }
1610 break;
1611
1612 #if INCLUDE_ALL_GCS
1613 case g1_pre_barrier_slow_id:
1614 {
1615 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1616 // arg0 : previous value of memory
1617
1618 BarrierSet* bs = Universe::heap()->barrier_set();
1619 if (bs->kind() != BarrierSet::G1SATBCTLogging && bs->kind() != BarrierSet::ShenandoahBarrierSet) {
1620 __ movptr(rax, (int)id);
1621 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1622 __ should_not_reach_here();
1623 break;
1624 }
1625 __ push(rax);
1626 __ push(rdx);
1627
1628 const Register pre_val = rax;
1629 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1630 const Register tmp = rdx;
1631
1632 NOT_LP64(__ get_thread(thread);)
1633
1634 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1635 PtrQueue::byte_offset_of_active()));
1636
1637 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1638 PtrQueue::byte_offset_of_index()));
1639 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1676 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1677
1678 restore_live_registers(sasm);
1679
1680 __ bind(done);
1681
1682 __ pop(rdx);
1683 __ pop(rax);
1684 }
1685 break;
1686
1687 case g1_post_barrier_slow_id:
1688 {
1689 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1690
1691
1692 // arg0: store_address
1693 Address store_addr(rbp, 2*BytesPerWord);
1694
1695 BarrierSet* bs = Universe::heap()->barrier_set();
1696 if (bs->kind() == BarrierSet::ShenandoahBarrierSet) {
1697 __ movptr(rax, (int)id);
1698 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1699 __ should_not_reach_here();
1700 break;
1701 }
1702 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1703 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1704
1705 Label done;
1706 Label runtime;
1707
1708 // At this point we know new_value is non-NULL and the new_value crosses regions.
1709 // Must check to see if card is already dirty
1710
1711 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1712
1713 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1714 PtrQueue::byte_offset_of_index()));
1715 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1716 PtrQueue::byte_offset_of_buf()));
1717
1718 __ push(rax);
1719 __ push(rcx);
1720
1721 const Register cardtable = rax;
1759 #endif
1760 __ movptr(Address(buffer_addr, 0), card_addr);
1761
1762 __ pop(rbx);
1763 __ jmp(done);
1764
1765 __ bind(runtime);
1766 __ push(rdx);
1767
1768 save_live_registers(sasm, 3);
1769
1770 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1771
1772 restore_live_registers(sasm);
1773
1774 __ pop(rdx);
1775 __ bind(done);
1776
1777 __ pop(rcx);
1778 __ pop(rax);
1779
1780 }
1781 break;
1782 case shenandoah_lrb_slow_id:
1783 {
1784 StubFrame f(sasm, "shenandoah_load_reference_barrier", dont_gc_arguments);
1785 // arg0 : object to be resolved
1786
1787 save_live_registers(sasm, 1);
1788 #ifdef _LP64
1789 f.load_argument(0, c_rarg0);
1790 f.load_argument(1, c_rarg1);
1791 if (UseCompressedOops) {
1792 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), c_rarg0, c_rarg1);
1793 } else {
1794 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0, c_rarg1);
1795 }
1796 #else
1797 f.load_argument(0, rax);
1798 f.load_argument(1, rbx);
1799 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx);
1800 #endif
1801 restore_live_registers_except_rax(sasm, true);
1802
1803 }
1804 break;
1805 #endif // INCLUDE_ALL_GCS
1806
1807 case predicate_failed_trap_id:
1808 {
1809 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1810
1811 OopMap* map = save_live_registers(sasm, 1);
1812
1813 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1814 oop_maps = new OopMapSet();
1815 oop_maps->add_gc_map(call_offset, map);
1816 restore_live_registers(sasm);
1817 __ leave();
1818 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1819 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1820
1821 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
|