< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page




  36 #include "oops/accessDecorators.hpp"
  37 #include "oops/compressedOops.inline.hpp"
  38 #include "oops/klass.inline.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/biasedLocking.hpp"
  41 #include "runtime/flags/flagSetting.hpp"
  42 #include "runtime/interfaceSupport.inline.hpp"
  43 #include "runtime/objectMonitor.hpp"
  44 #include "runtime/os.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/macros.hpp"
  51 #include "crc32c.h"
  52 #ifdef COMPILER2
  53 #include "opto/intrinsicnode.hpp"
  54 #endif
  55 


  56 #ifdef PRODUCT
  57 #define BLOCK_COMMENT(str) /* nothing */
  58 #define STOP(error) stop(error)
  59 #else
  60 #define BLOCK_COMMENT(str) block_comment(str)
  61 #define STOP(error) block_comment(error); stop(error)
  62 #endif
  63 
  64 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  65 
  66 #ifdef ASSERT
  67 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
  68 #endif
  69 
  70 static Assembler::Condition reverse[] = {
  71     Assembler::noOverflow     /* overflow      = 0x0 */ ,
  72     Assembler::overflow       /* noOverflow    = 0x1 */ ,
  73     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
  74     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
  75     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,


 533   Address array(rscratch1, index._index, index._scale, index._disp);
 534   return array;
 535 }
 536 
 537 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
 538   Label L, E;
 539 
 540 #ifdef _WIN64
 541   // Windows always allocates space for it's register args
 542   assert(num_args <= 4, "only register arguments supported");
 543   subq(rsp,  frame::arg_reg_save_area_bytes);
 544 #endif
 545 
 546   // Align stack if necessary
 547   testl(rsp, 15);
 548   jcc(Assembler::zero, L);
 549 
 550   subq(rsp, 8);
 551   {
 552     call(RuntimeAddress(entry_point));

 553   }
 554   addq(rsp, 8);
 555   jmp(E);
 556 
 557   bind(L);
 558   {
 559     call(RuntimeAddress(entry_point));

 560   }
 561 
 562   bind(E);
 563 
 564 #ifdef _WIN64
 565   // restore stack pointer
 566   addq(rsp, frame::arg_reg_save_area_bytes);
 567 #endif
 568 
 569 }
 570 
 571 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
 572   assert(!src2.is_lval(), "should use cmpptr");
 573 
 574   if (reachable(src2)) {
 575     cmpq(src1, as_Address(src2));
 576   } else {
 577     lea(rscratch1, src2);
 578     Assembler::cmpq(src1, Address(rscratch1, 0));
 579   }


 748 void MacroAssembler::pushoop(jobject obj) {
 749   movoop(rscratch1, obj);
 750   push(rscratch1);
 751 }
 752 
 753 void MacroAssembler::pushklass(Metadata* obj) {
 754   mov_metadata(rscratch1, obj);
 755   push(rscratch1);
 756 }
 757 
 758 void MacroAssembler::pushptr(AddressLiteral src) {
 759   lea(rscratch1, src);
 760   if (src.is_lval()) {
 761     push(rscratch1);
 762   } else {
 763     pushq(Address(rscratch1, 0));
 764   }
 765 }
 766 
 767 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {

 768   // we must set sp to zero to clear frame
 769   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
 770   // must clear fp, so that compiled frames are not confused; it is
 771   // possible that we need it only for debugging
 772   if (clear_fp) {
 773     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
 774   }
 775 
 776   // Always clear the pc because it could have been set by make_walkable()
 777   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 778   vzeroupper();
 779 }
 780 
 781 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 782                                          Register last_java_fp,
 783                                          address  last_java_pc) {
 784   vzeroupper();
 785   // determine last_java_sp register
 786   if (!last_java_sp->is_valid()) {
 787     last_java_sp = rsp;
 788   }
 789 
 790   // last_java_fp is optional
 791   if (last_java_fp->is_valid()) {
 792     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
 793            last_java_fp);
 794   }
 795 
 796   // last_java_pc is optional
 797   if (last_java_pc != NULL) {


 938     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
 939     os::print_location(tty, *dump_sp++);
 940   }
 941   for (int row = 0; row < 25; row++) {
 942     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
 943     for (int col = 0; col < 4; col++) {
 944       tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
 945     }
 946     tty->cr();
 947   }
 948   // Print some instructions around pc:
 949   Disassembler::decode((address)pc-64, (address)pc);
 950   tty->print_cr("--------");
 951   Disassembler::decode((address)pc, (address)pc+32);
 952 }
 953 
 954 #endif // _LP64
 955 
 956 // Now versions that are common to 32/64 bit
 957 





 958 void MacroAssembler::addptr(Register dst, int32_t imm32) {
 959   LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
 960 }
 961 
 962 void MacroAssembler::addptr(Register dst, Register src) {
 963   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 964 }
 965 
 966 void MacroAssembler::addptr(Address dst, Register src) {
 967   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 968 }
 969 
 970 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
 971   if (reachable(src)) {
 972     Assembler::addsd(dst, as_Address(src));
 973   } else {
 974     lea(rscratch1, src);
 975     Assembler::addsd(dst, Address(rscratch1, 0));
 976   }
 977 }


 987 
 988 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) {
 989   if (reachable(src)) {
 990     Assembler::addpd(dst, as_Address(src));
 991   } else {
 992     lea(rscratch1, src);
 993     Assembler::addpd(dst, Address(rscratch1, 0));
 994   }
 995 }
 996 
 997 void MacroAssembler::align(int modulus) {
 998   align(modulus, offset());
 999 }
1000 
1001 void MacroAssembler::align(int modulus, int target) {
1002   if (target % modulus != 0) {
1003     nop(modulus - (target % modulus));
1004   }
1005 }
1006 




















1007 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) {
1008   // Used in sign-masking with aligned address.
1009   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1010   if (reachable(src)) {
1011     Assembler::andpd(dst, as_Address(src));
1012   } else {
1013     lea(scratch_reg, src);
1014     Assembler::andpd(dst, Address(scratch_reg, 0));
1015   }
1016 }
1017 
1018 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register scratch_reg) {
1019   // Used in sign-masking with aligned address.
1020   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1021   if (reachable(src)) {
1022     Assembler::andps(dst, as_Address(src));
1023   } else {
1024     lea(scratch_reg, src);
1025     Assembler::andps(dst, Address(scratch_reg, 0));
1026   }


2743   }
2744 }
2745 
2746 // !defined(COMPILER2) is because of stupid core builds
2747 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) || INCLUDE_JVMCI
2748 void MacroAssembler::empty_FPU_stack() {
2749   if (VM_Version::supports_mmx()) {
2750     emms();
2751   } else {
2752     for (int i = 8; i-- > 0; ) ffree(i);
2753   }
2754 }
2755 #endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI
2756 
2757 
2758 void MacroAssembler::enter() {
2759   push(rbp);
2760   mov(rbp, rsp);
2761 }
2762 















2763 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2764 void MacroAssembler::fat_nop() {
2765   if (UseAddressNop) {
2766     addr_nop_5();
2767   } else {
2768     emit_int8(0x26); // es:
2769     emit_int8(0x2e); // cs:
2770     emit_int8(0x64); // fs:
2771     emit_int8(0x65); // gs:
2772     emit_int8((unsigned char)0x90);
2773   }
2774 }
2775 
2776 void MacroAssembler::fcmp(Register tmp) {
2777   fcmp(tmp, 1, true, true);
2778 }
2779 
2780 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
2781   assert(!pop_right || pop_left, "usage error");
2782   if (VM_Version::supports_cmov()) {
2783     assert(tmp == noreg, "unneeded temp");
2784     if (pop_left) {
2785       fucomip(index);
2786     } else {
2787       fucomi(index);
2788     }
2789     if (pop_right) {
2790       fpop();
2791     }
2792   } else {


3437   stop(buf);
3438 }
3439 
3440 #ifdef _LP64
3441 #define XSTATE_BV 0x200
3442 #endif
3443 
3444 void MacroAssembler::pop_CPU_state() {
3445   pop_FPU_state();
3446   pop_IU_state();
3447 }
3448 
3449 void MacroAssembler::pop_FPU_state() {
3450 #ifndef _LP64
3451   frstor(Address(rsp, 0));
3452 #else
3453   fxrstor(Address(rsp, 0));
3454 #endif
3455   addptr(rsp, FPUStateSizeInWords * wordSize);
3456 }























3457 
3458 void MacroAssembler::pop_IU_state() {
3459   popa();
3460   LP64_ONLY(addq(rsp, 8));
3461   popf();
3462 }
3463 
3464 // Save Integer and Float state
3465 // Warning: Stack must be 16 byte aligned (64bit)
3466 void MacroAssembler::push_CPU_state() {
3467   push_IU_state();
3468   push_FPU_state();
3469 }
3470 
3471 void MacroAssembler::push_FPU_state() {
3472   subptr(rsp, FPUStateSizeInWords * wordSize);
3473 #ifndef _LP64
3474   fnsave(Address(rsp, 0));
3475   fwait();
3476 #else




  36 #include "oops/accessDecorators.hpp"
  37 #include "oops/compressedOops.inline.hpp"
  38 #include "oops/klass.inline.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/biasedLocking.hpp"
  41 #include "runtime/flags/flagSetting.hpp"
  42 #include "runtime/interfaceSupport.inline.hpp"
  43 #include "runtime/objectMonitor.hpp"
  44 #include "runtime/os.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/macros.hpp"
  51 #include "crc32c.h"
  52 #ifdef COMPILER2
  53 #include "opto/intrinsicnode.hpp"
  54 #endif
  55 
  56 #include "runtime/continuation.hpp" // TODO LOOM remove after testing CONT_DOUBLE_NOP
  57 
  58 #ifdef PRODUCT
  59 #define BLOCK_COMMENT(str) /* nothing */
  60 #define STOP(error) stop(error)
  61 #else
  62 #define BLOCK_COMMENT(str) block_comment(str)
  63 #define STOP(error) block_comment(error); stop(error)
  64 #endif
  65 
  66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  67 
  68 #ifdef ASSERT
  69 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
  70 #endif
  71 
  72 static Assembler::Condition reverse[] = {
  73     Assembler::noOverflow     /* overflow      = 0x0 */ ,
  74     Assembler::overflow       /* noOverflow    = 0x1 */ ,
  75     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
  76     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
  77     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,


 535   Address array(rscratch1, index._index, index._scale, index._disp);
 536   return array;
 537 }
 538 
 539 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
 540   Label L, E;
 541 
 542 #ifdef _WIN64
 543   // Windows always allocates space for it's register args
 544   assert(num_args <= 4, "only register arguments supported");
 545   subq(rsp,  frame::arg_reg_save_area_bytes);
 546 #endif
 547 
 548   // Align stack if necessary
 549   testl(rsp, 15);
 550   jcc(Assembler::zero, L);
 551 
 552   subq(rsp, 8);
 553   {
 554     call(RuntimeAddress(entry_point));
 555     oopmap_metadata(-1);
 556   }
 557   addq(rsp, 8);
 558   jmp(E);
 559 
 560   bind(L);
 561   {
 562     call(RuntimeAddress(entry_point));
 563     oopmap_metadata(-1);
 564   }
 565 
 566   bind(E);
 567 
 568 #ifdef _WIN64
 569   // restore stack pointer
 570   addq(rsp, frame::arg_reg_save_area_bytes);
 571 #endif
 572 
 573 }
 574 
 575 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
 576   assert(!src2.is_lval(), "should use cmpptr");
 577 
 578   if (reachable(src2)) {
 579     cmpq(src1, as_Address(src2));
 580   } else {
 581     lea(rscratch1, src2);
 582     Assembler::cmpq(src1, Address(rscratch1, 0));
 583   }


 752 void MacroAssembler::pushoop(jobject obj) {
 753   movoop(rscratch1, obj);
 754   push(rscratch1);
 755 }
 756 
 757 void MacroAssembler::pushklass(Metadata* obj) {
 758   mov_metadata(rscratch1, obj);
 759   push(rscratch1);
 760 }
 761 
 762 void MacroAssembler::pushptr(AddressLiteral src) {
 763   lea(rscratch1, src);
 764   if (src.is_lval()) {
 765     push(rscratch1);
 766   } else {
 767     pushq(Address(rscratch1, 0));
 768   }
 769 }
 770 
 771 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 772   mov64(rscratch1, NULL_WORD);
 773   // we must set sp to zero to clear frame
 774   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), rscratch1);
 775   // must clear fp, so that compiled frames are not confused; it is
 776   // possible that we need it only for debugging
 777   if (clear_fp) {
 778     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), rscratch1);
 779   }
 780 
 781   // Always clear the pc because it could have been set by make_walkable()
 782   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), rscratch1);
 783   vzeroupper();
 784 }
 785 
 786 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 787                                          Register last_java_fp,
 788                                          address  last_java_pc) {
 789   vzeroupper();
 790   // determine last_java_sp register
 791   if (!last_java_sp->is_valid()) {
 792     last_java_sp = rsp;
 793   }
 794 
 795   // last_java_fp is optional
 796   if (last_java_fp->is_valid()) {
 797     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
 798            last_java_fp);
 799   }
 800 
 801   // last_java_pc is optional
 802   if (last_java_pc != NULL) {


 943     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
 944     os::print_location(tty, *dump_sp++);
 945   }
 946   for (int row = 0; row < 25; row++) {
 947     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
 948     for (int col = 0; col < 4; col++) {
 949       tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
 950     }
 951     tty->cr();
 952   }
 953   // Print some instructions around pc:
 954   Disassembler::decode((address)pc-64, (address)pc);
 955   tty->print_cr("--------");
 956   Disassembler::decode((address)pc, (address)pc+32);
 957 }
 958 
 959 #endif // _LP64
 960 
 961 // Now versions that are common to 32/64 bit
 962 
 963 void MacroAssembler::oopmap_metadata(int index) {
 964   // if (index != -1) tty->print_cr("oopmap_metadata %d", index);
 965   // mov64(r10, 1234); // TODO: Add a new relocInfo with external semantics. see relocInfo::metadata_type
 966 }
 967 
 968 void MacroAssembler::addptr(Register dst, int32_t imm32) {
 969   LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
 970 }
 971 
 972 void MacroAssembler::addptr(Register dst, Register src) {
 973   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 974 }
 975 
 976 void MacroAssembler::addptr(Address dst, Register src) {
 977   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 978 }
 979 
 980 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
 981   if (reachable(src)) {
 982     Assembler::addsd(dst, as_Address(src));
 983   } else {
 984     lea(rscratch1, src);
 985     Assembler::addsd(dst, Address(rscratch1, 0));
 986   }
 987 }


 997 
 998 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) {
 999   if (reachable(src)) {
1000     Assembler::addpd(dst, as_Address(src));
1001   } else {
1002     lea(rscratch1, src);
1003     Assembler::addpd(dst, Address(rscratch1, 0));
1004   }
1005 }
1006 
1007 void MacroAssembler::align(int modulus) {
1008   align(modulus, offset());
1009 }
1010 
1011 void MacroAssembler::align(int modulus, int target) {
1012   if (target % modulus != 0) {
1013     nop(modulus - (target % modulus));
1014   }
1015 }
1016 
1017 void MacroAssembler::push_f(XMMRegister r) {
1018   subptr(rsp, wordSize);
1019   movflt(Address(rsp, 0), r);
1020 }
1021 
1022 void MacroAssembler::pop_f(XMMRegister r) {
1023   movflt(r, Address(rsp, 0));
1024   addptr(rsp, wordSize);
1025 }
1026 
1027 void MacroAssembler::push_d(XMMRegister r) {
1028   subptr(rsp, 2 * wordSize);
1029   movdbl(Address(rsp, 0), r);
1030 }
1031 
1032 void MacroAssembler::pop_d(XMMRegister r) {
1033   movdbl(r, Address(rsp, 0));
1034   addptr(rsp, 2 * Interpreter::stackElementSize);
1035 }
1036 
1037 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) {
1038   // Used in sign-masking with aligned address.
1039   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1040   if (reachable(src)) {
1041     Assembler::andpd(dst, as_Address(src));
1042   } else {
1043     lea(scratch_reg, src);
1044     Assembler::andpd(dst, Address(scratch_reg, 0));
1045   }
1046 }
1047 
1048 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register scratch_reg) {
1049   // Used in sign-masking with aligned address.
1050   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1051   if (reachable(src)) {
1052     Assembler::andps(dst, as_Address(src));
1053   } else {
1054     lea(scratch_reg, src);
1055     Assembler::andps(dst, Address(scratch_reg, 0));
1056   }


2773   }
2774 }
2775 
2776 // !defined(COMPILER2) is because of stupid core builds
2777 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) || INCLUDE_JVMCI
2778 void MacroAssembler::empty_FPU_stack() {
2779   if (VM_Version::supports_mmx()) {
2780     emms();
2781   } else {
2782     for (int i = 8; i-- > 0; ) ffree(i);
2783   }
2784 }
2785 #endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI
2786 
2787 
2788 void MacroAssembler::enter() {
2789   push(rbp);
2790   mov(rbp, rsp);
2791 }
2792 
2793 void MacroAssembler::post_call_nop() {
2794   emit_int8((int8_t)0x0f);
2795   emit_int8((int8_t)0x1f);
2796   emit_int8((int8_t)0x84);
2797   emit_int8((int8_t)0x00);
2798   emit_int32(0x00);
2799 #ifdef CONT_DOUBLE_NOP
2800   emit_int8((int8_t)0x0f);
2801   emit_int8((int8_t)0x1f);
2802   emit_int8((int8_t)0x84);
2803   emit_int8((int8_t)0x00);
2804   emit_int32(0x00);
2805 #endif
2806 }
2807 
2808 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2809 void MacroAssembler::fat_nop() {
2810   if (UseAddressNop) {
2811     addr_nop_5();
2812   } else {
2813     emit_int8((int8_t)0x26); // es:
2814     emit_int8((int8_t)0x2e); // cs:
2815     emit_int8((int8_t)0x64); // fs:
2816     emit_int8((int8_t)0x65); // gs:
2817     emit_int8((int8_t)0x90);
2818   }
2819 }
2820 
2821 void MacroAssembler::fcmp(Register tmp) {
2822   fcmp(tmp, 1, true, true);
2823 }
2824 
2825 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
2826   assert(!pop_right || pop_left, "usage error");
2827   if (VM_Version::supports_cmov()) {
2828     assert(tmp == noreg, "unneeded temp");
2829     if (pop_left) {
2830       fucomip(index);
2831     } else {
2832       fucomi(index);
2833     }
2834     if (pop_right) {
2835       fpop();
2836     }
2837   } else {


3482   stop(buf);
3483 }
3484 
3485 #ifdef _LP64
3486 #define XSTATE_BV 0x200
3487 #endif
3488 
3489 void MacroAssembler::pop_CPU_state() {
3490   pop_FPU_state();
3491   pop_IU_state();
3492 }
3493 
3494 void MacroAssembler::pop_FPU_state() {
3495 #ifndef _LP64
3496   frstor(Address(rsp, 0));
3497 #else
3498   fxrstor(Address(rsp, 0));
3499 #endif
3500   addptr(rsp, FPUStateSizeInWords * wordSize);
3501 }
3502 
3503 void MacroAssembler::get_cont_fastpath(Register java_thread, Register dst) {
3504   movl(dst, Address(java_thread, JavaThread::cont_fastpath_offset()));
3505 }
3506 
3507 void MacroAssembler::set_cont_fastpath(Register java_thread, int32_t imm) {
3508   movl(Address(java_thread, JavaThread::cont_fastpath_offset()), imm);
3509 }
3510 
3511 #ifdef ASSERT
3512 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
3513 #ifdef _LP64
3514   Label no_cont;
3515   movptr(cont, Address(r15_thread, in_bytes(JavaThread::continuation_offset())));
3516   testl(cont, cont);
3517   jcc(Assembler::zero, no_cont);
3518   stop(name);
3519   bind(no_cont);
3520 #else
3521   Unimplemented();
3522 #endif
3523 }
3524 #endif
3525 
3526 void MacroAssembler::pop_IU_state() {
3527   popa();
3528   LP64_ONLY(addq(rsp, 8));
3529   popf();
3530 }
3531 
3532 // Save Integer and Float state
3533 // Warning: Stack must be 16 byte aligned (64bit)
3534 void MacroAssembler::push_CPU_state() {
3535   push_IU_state();
3536   push_FPU_state();
3537 }
3538 
3539 void MacroAssembler::push_FPU_state() {
3540   subptr(rsp, FPUStateSizeInWords * wordSize);
3541 #ifndef _LP64
3542   fnsave(Address(rsp, 0));
3543   fwait();
3544 #else


< prev index next >