1 /*
   2  * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_AARCH64_VM_OS_LINUX_AARCH64_HPP
  26 #define OS_CPU_LINUX_AARCH64_VM_OS_LINUX_AARCH64_HPP
  27 
  28   static void setup_fpu();
  29   static bool supports_sse();
  30 
  31   static jlong rdtsc();
  32 
  33   static bool is_allocatable(size_t bytes);
  34 
  35   // Used to register dynamic code cache area with the OS
  36   // Note: Currently only used in 64 bit Windows implementations
  37   static bool register_code_area(char *low, char *high) { return true; }
  38 
  39   // Atomically copy 64 bits of data
  40   static void atomic_copy64(volatile void *src, volatile void *dst) {
  41 #if defined(PPC) && !defined(_LP64)
  42     double tmp;
  43     asm volatile ("lfd  %0, 0(%1)\n"
  44                   "stfd %0, 0(%2)\n"
  45                   : "=f"(tmp)
  46                   : "b"(src), "b"(dst));
  47 #elif defined(S390) && !defined(_LP64)
  48     double tmp;
  49     asm volatile ("ld  %0, 0(%1)\n"
  50                   "std %0, 0(%2)\n"
  51                   : "=r"(tmp)
  52                   : "a"(src), "a"(dst));
  53 #else
  54     *(jlong *) dst = *(jlong *) src;
  55 #endif
  56   }
  57 
  58 #endif // OS_CPU_LINUX_AARCH64_VM_OS_LINUX_AARCH64_HPP