< prev index next >

src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp

Print this page
*** 47,12 ***
--- 47,14 ---
  #include "runtime/osThread.hpp"
  #include "runtime/safepointMechanism.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "runtime/stubRoutines.hpp"
  #include "runtime/timer.hpp"
+ #include "runtime/vm_version.hpp"
  #include "signals_posix.hpp"
  #include "utilities/align.hpp"
+ #include "utilities/debug.hpp"
  #include "utilities/events.hpp"
  #include "utilities/vmError.hpp"
  
  // put OS-includes here
  # include <sys/types.h>

*** 504,44 ***
  static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
    *(jlong *) dst = *(const jlong *) src;
  }
  
  extern "C" {
-   // needs local assembler label '1:' to avoid trouble when using linktime optimization
    int SpinPause() {
      // We don't use StubRoutines::aarch64::spin_wait stub in order to
      // avoid a costly call to os::current_thread_enable_wx() on MacOS.
      // We should return 1 if SpinPause is implemented, and since there
!     // will be a sequence of 11 instructions for NONE and YIELD and 12
!     // instructions for NOP and ISB, SpinPause will always return 1.
!     uint64_t br_dst;
!     const int instructions_per_case = 2;
!     int64_t off = VM_Version::spin_wait_desc().inst() * instructions_per_case * Assembler::instruction_size;
! 
!     assert(VM_Version::spin_wait_desc().inst() >= SpinWait::NONE &&
!            VM_Version::spin_wait_desc().inst() <= SpinWait::YIELD, "must be");
!     assert(-1 == SpinWait::NONE,  "must be");
!     assert( 0 == SpinWait::NOP,   "must be");
!     assert( 1 == SpinWait::ISB,   "must be");
!     assert( 2 == SpinWait::YIELD, "must be");
! 
!     asm volatile(
!         "  adr  %[d], 20          \n" // 20 == PC here + 5 instructions => address
!                                       // to entry for case SpinWait::NOP
!         "  add  %[d], %[d], %[o]  \n"
!         "  br   %[d]              \n"
!         "  b    1f                \n" // case SpinWait::NONE  (-1)
!         "  nop                    \n" // padding
!         "  nop                    \n" // case SpinWait::NOP   ( 0)
!         "  b    1f                \n"
-         "  isb                    \n" // case SpinWait::ISB   ( 1)
-         "  b    1f                \n"
-         "  yield                  \n" // case SpinWait::YIELD ( 2)
-         "1:        \n"
-         : [d]"=&r"(br_dst)
-         : [o]"r"(off)
-         : "memory");
      return 1;
    }
  
    void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
      if (from > to) {
--- 506,36 ---
  static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
    *(jlong *) dst = *(const jlong *) src;
  }
  
  extern "C" {
    int SpinPause() {
      // We don't use StubRoutines::aarch64::spin_wait stub in order to
      // avoid a costly call to os::current_thread_enable_wx() on MacOS.
      // We should return 1 if SpinPause is implemented, and since there
!     // will be always a sequence of instructions, SpinPause will always return 1.
!     switch (VM_Version::spin_wait_desc().inst()) {
!     case SpinWait::NONE:
!       break;
!     case SpinWait::NOP:
!       asm volatile("nop" : : : "memory");
!       break;
!     case SpinWait::ISB:
!       asm volatile("isb" : : : "memory");
!       break;
!     case SpinWait::YIELD:
!       asm volatile("yield" : : : "memory");
!       break;
!     case SpinWait::SB:
!       assert(VM_Version::supports_sb(), "current CPU does not support SB instruction");
!       asm volatile(".inst 0xd50330ff" : : : "memory");
!       break;
! #ifdef ASSERT
!     default:
!       ShouldNotReachHere();
! #endif
!     }
      return 1;
    }
  
    void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
      if (from > to) {
< prev index next >