1 /*
   2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "jvm.h"
  37 #include "jvmtifiles/jvmti.h"
  38 #include "libo4.hpp"
  39 #include "libperfstat_aix.hpp"
  40 #include "libodm_aix.hpp"
  41 #include "loadlib_aix.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "misc_aix.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "os_aix.inline.hpp"
  48 #include "os_posix.hpp"
  49 #include "porting_aix.hpp"
  50 #include "prims/jniFastGetField.hpp"
  51 #include "prims/jvm_misc.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/globals_extension.hpp"
  56 #include "runtime/interfaceSupport.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/javaThread.hpp"
  60 #include "runtime/mutexLocker.hpp"
  61 #include "runtime/objectMonitor.hpp"
  62 #include "runtime/os.hpp"
  63 #include "runtime/osInfo.hpp"
  64 #include "runtime/osThread.hpp"
  65 #include "runtime/perfMemory.hpp"
  66 #include "runtime/safefetch.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/threadCritical.hpp"
  70 #include "runtime/threads.hpp"
  71 #include "runtime/timer.hpp"
  72 #include "runtime/vm_version.hpp"
  73 #include "services/attachListener.hpp"
  74 #include "services/runtimeService.hpp"
  75 #include "signals_posix.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/decoder.hpp"
  78 #include "utilities/defaultStream.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/growableArray.hpp"
  81 #include "utilities/vmError.hpp"
  82 
  83 // put OS-includes here (sorted alphabetically)
  84 #ifdef AIX_XLC_GE_17
  85 #include <alloca.h>
  86 #endif
  87 #include <errno.h>
  88 #include <fcntl.h>
  89 #include <inttypes.h>
  90 #include <poll.h>
  91 #include <procinfo.h>
  92 #include <pthread.h>
  93 #include <pwd.h>
  94 #include <semaphore.h>
  95 #include <signal.h>
  96 #include <stdint.h>
  97 #include <stdio.h>
  98 #include <string.h>
  99 #include <unistd.h>
 100 #include <sys/ioctl.h>
 101 #include <sys/ipc.h>
 102 #include <sys/mman.h>
 103 #include <sys/resource.h>
 104 #include <sys/select.h>
 105 #include <sys/shm.h>
 106 #include <sys/socket.h>
 107 #include <sys/stat.h>
 108 #include <sys/sysinfo.h>
 109 #include <sys/systemcfg.h>
 110 #include <sys/time.h>
 111 #include <sys/times.h>
 112 #include <sys/types.h>
 113 #include <sys/utsname.h>
 114 #include <sys/vminfo.h>
 115 
 116 #ifndef _LARGE_FILES
 117 #error Hotspot on AIX must be compiled with -D_LARGE_FILES
 118 #endif
 119 
 120 // Missing prototypes for various system APIs.
 121 extern "C"
 122 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 123 
 124 #if !defined(_AIXVERSION_610)
 125 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 126 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 127 extern "C" int getargs(procsinfo*, int, char*, int);
 128 #endif
 129 
 130 #define MAX_PATH (2 * K)
 131 
 132 // for timer info max values which include all bits
 133 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 134 // for multipage initialization error analysis (in 'g_multipage_error')
 135 #define ERROR_MP_OS_TOO_OLD                          100
 136 #define ERROR_MP_EXTSHM_ACTIVE                       101
 137 #define ERROR_MP_VMGETINFO_FAILED                    102
 138 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 139 
 140 // excerpts from systemcfg.h that might be missing on older os levels
 141 #ifndef PV_7
 142   #define PV_7 0x200000          /* Power PC 7 */
 143 #endif
 144 #ifndef PV_7_Compat
 145   #define PV_7_Compat 0x208000   /* Power PC 7 */
 146 #endif
 147 #ifndef PV_8
 148   #define PV_8 0x300000          /* Power PC 8 */
 149 #endif
 150 #ifndef PV_8_Compat
 151   #define PV_8_Compat 0x308000   /* Power PC 8 */
 152 #endif
 153 #ifndef PV_9
 154   #define PV_9 0x400000          /* Power PC 9 */
 155 #endif
 156 #ifndef PV_9_Compat
 157   #define PV_9_Compat  0x408000  /* Power PC 9 */
 158 #endif
 159 
 160 
 161 static address resolve_function_descriptor_to_code_pointer(address p);
 162 
 163 static void vmembk_print_on(outputStream* os);
 164 
 165 ////////////////////////////////////////////////////////////////////////////////
 166 // global variables (for a description see os_aix.hpp)
 167 
 168 julong    os::Aix::_physical_memory = 0;
 169 
 170 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 171 
 172 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 173 int       os::Aix::_on_pase = -1;
 174 
 175 // 0 = uninitialized, otherwise 32 bit number:
 176 //  0xVVRRTTSS
 177 //  VV - major version
 178 //  RR - minor version
 179 //  TT - tech level, if known, 0 otherwise
 180 //  SS - service pack, if known, 0 otherwise
 181 uint32_t  os::Aix::_os_version = 0;
 182 
 183 // -1 = uninitialized, 0 - no, 1 - yes
 184 int       os::Aix::_xpg_sus_mode = -1;
 185 
 186 // -1 = uninitialized, 0 - no, 1 - yes
 187 int       os::Aix::_extshm = -1;
 188 
 189 ////////////////////////////////////////////////////////////////////////////////
 190 // local variables
 191 
 192 static volatile jlong max_real_time = 0;
 193 
 194 // Process break recorded at startup.
 195 static address g_brk_at_startup = nullptr;
 196 
 197 // This describes the state of multipage support of the underlying
 198 // OS. Note that this is of no interest to the outsize world and
 199 // therefore should not be defined in AIX class.
 200 //
 201 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 202 // latter two (16M "large" resp. 16G "huge" pages) require special
 203 // setup and are normally not available.
 204 //
 205 // AIX supports multiple page sizes per process, for:
 206 //  - Stack (of the primordial thread, so not relevant for us)
 207 //  - Data - data, bss, heap, for us also pthread stacks
 208 //  - Text - text code
 209 //  - shared memory
 210 //
 211 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 212 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 213 //
 214 // For shared memory, page size can be set dynamically via
 215 // shmctl(). Different shared memory regions can have different page
 216 // sizes.
 217 //
 218 // More information can be found at AIBM info center:
 219 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 220 //
 221 static struct {
 222   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 223   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 224   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 225   size_t pthr_stack_pagesize; // stack page size of pthread threads
 226   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 227   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 228   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 229   int error;                  // Error describing if something went wrong at multipage init.
 230 } g_multipage_support = {
 231   (size_t) -1,
 232   (size_t) -1,
 233   (size_t) -1,
 234   (size_t) -1,
 235   (size_t) -1,
 236   false, false,
 237   0
 238 };
 239 
 240 // We must not accidentally allocate memory close to the BRK - even if
 241 // that would work - because then we prevent the BRK segment from
 242 // growing which may result in a malloc OOM even though there is
 243 // enough memory. The problem only arises if we shmat() or mmap() at
 244 // a specific wish address, e.g. to place the heap in a
 245 // compressed-oops-friendly way.
 246 static bool is_close_to_brk(address a) {
 247   assert0(g_brk_at_startup != nullptr);
 248   if (a >= g_brk_at_startup &&
 249       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 250     return true;
 251   }
 252   return false;
 253 }
 254 
 255 julong os::free_memory() {
 256   return Aix::available_memory();
 257 }
 258 
 259 julong os::available_memory() {
 260   return Aix::available_memory();
 261 }
 262 
 263 julong os::Aix::available_memory() {
 264   // Avoid expensive API call here, as returned value will always be null.
 265   if (os::Aix::on_pase()) {
 266     return 0x0LL;
 267   }
 268   os::Aix::meminfo_t mi;
 269   if (os::Aix::get_meminfo(&mi)) {
 270     return mi.real_free;
 271   } else {
 272     return ULONG_MAX;
 273   }
 274 }
 275 
 276 julong os::physical_memory() {
 277   return Aix::physical_memory();
 278 }
 279 
 280 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 281 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 282 static bool my_disclaim64(char* addr, size_t size) {
 283 
 284   if (size == 0) {
 285     return true;
 286   }
 287 
 288   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 289   const unsigned int maxDisclaimSize = 0x40000000;
 290 
 291   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 292   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 293 
 294   char* p = addr;
 295 
 296   for (unsigned int i = 0; i < numFullDisclaimsNeeded; i ++) {
 297     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 298       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 299       return false;
 300     }
 301     p += maxDisclaimSize;
 302   }
 303 
 304   if (lastDisclaimSize > 0) {
 305     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 306       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 307       return false;
 308     }
 309   }
 310 
 311   return true;
 312 }
 313 
 314 // Cpu architecture string
 315 #if defined(PPC32)
 316 static char cpu_arch[] = "ppc";
 317 #elif defined(PPC64)
 318 static char cpu_arch[] = "ppc64";
 319 #else
 320 #error Add appropriate cpu_arch setting
 321 #endif
 322 
 323 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 324 static int checked_vmgetinfo(void *out, int command, int arg) {
 325   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 326     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 327   }
 328   return ::vmgetinfo(out, command, arg);
 329 }
 330 
 331 // Given an address, returns the size of the page backing that address.
 332 size_t os::Aix::query_pagesize(void* addr) {
 333 
 334   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 335     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 336     return 4*K;
 337   }
 338 
 339   vm_page_info pi;
 340   pi.addr = (uint64_t)addr;
 341   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 342     return pi.pagesize;
 343   } else {
 344     assert(false, "vmgetinfo failed to retrieve page size");
 345     return 4*K;
 346   }
 347 }
 348 
 349 void os::Aix::initialize_system_info() {
 350 
 351   // Get the number of online(logical) cpus instead of configured.
 352   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 353   assert(_processor_count > 0, "_processor_count must be > 0");
 354 
 355   // Retrieve total physical storage.
 356   os::Aix::meminfo_t mi;
 357   if (!os::Aix::get_meminfo(&mi)) {
 358     assert(false, "os::Aix::get_meminfo failed.");
 359   }
 360   _physical_memory = (julong) mi.real_total;
 361 }
 362 
 363 // Helper function for tracing page sizes.
 364 static const char* describe_pagesize(size_t pagesize) {
 365   switch (pagesize) {
 366     case 4*K : return "4K";
 367     case 64*K: return "64K";
 368     case 16*M: return "16M";
 369     case 16*G: return "16G";
 370     default:
 371       assert(false, "surprise");
 372       return "??";
 373   }
 374 }
 375 
 376 // Probe OS for multipage support.
 377 // Will fill the global g_multipage_support structure.
 378 // Must be called before calling os::large_page_init().
 379 static void query_multipage_support() {
 380 
 381   guarantee(g_multipage_support.pagesize == (size_t)-1,
 382             "do not call twice");
 383 
 384   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 385 
 386   // This really would surprise me.
 387   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 388 
 389   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 390   // Default data page size is defined either by linker options (-bdatapsize)
 391   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 392   // default should be 4K.
 393   {
 394     void* p = ::malloc(16*M);
 395     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 396     ::free(p);
 397   }
 398 
 399   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 400   // Note that this is pure curiosity. We do not rely on default page size but set
 401   // our own page size after allocated.
 402   {
 403     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 404     guarantee(shmid != -1, "shmget failed");
 405     void* p = ::shmat(shmid, nullptr, 0);
 406     ::shmctl(shmid, IPC_RMID, nullptr);
 407     guarantee(p != (void*) -1, "shmat failed");
 408     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 409     ::shmdt(p);
 410   }
 411 
 412   // Before querying the stack page size, make sure we are not running as primordial
 413   // thread (because primordial thread's stack may have different page size than
 414   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 415   // number of reasons so we may just as well guarantee it here.
 416   guarantee0(!os::is_primordial_thread());
 417 
 418   // Query pthread stack page size. Should be the same as data page size because
 419   // pthread stacks are allocated from C-Heap.
 420   {
 421     int dummy = 0;
 422     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 423   }
 424 
 425   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 426   {
 427     address any_function =
 428       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 429     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 430   }
 431 
 432   // Now probe for support of 64K pages and 16M pages.
 433 
 434   // Before OS/400 V6R1, there is no support for pages other than 4K.
 435   if (os::Aix::on_pase_V5R4_or_older()) {
 436     trcVerbose("OS/400 < V6R1 - no large page support.");
 437     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 438     goto query_multipage_support_end;
 439   }
 440 
 441   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 442   {
 443     const int MAX_PAGE_SIZES = 4;
 444     psize_t sizes[MAX_PAGE_SIZES];
 445     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 446     if (num_psizes == -1) {
 447       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 448       trcVerbose("disabling multipage support.");
 449       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 450       goto query_multipage_support_end;
 451     }
 452     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 453     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 454     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 455     for (int i = 0; i < num_psizes; i ++) {
 456       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 457     }
 458 
 459     // Can we use 64K, 16M pages?
 460     for (int i = 0; i < num_psizes; i ++) {
 461       const size_t pagesize = sizes[i];
 462       if (pagesize != 64*K && pagesize != 16*M) {
 463         continue;
 464       }
 465       bool can_use = false;
 466       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 467       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 468         IPC_CREAT | S_IRUSR | S_IWUSR);
 469       guarantee0(shmid != -1); // Should always work.
 470       // Try to set pagesize.
 471       struct shmid_ds shm_buf = { };
 472       shm_buf.shm_pagesize = pagesize;
 473       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 474         const int en = errno;
 475         ::shmctl(shmid, IPC_RMID, nullptr); // As early as possible!
 476         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%d", errno);
 477       } else {
 478         // Attach and double check pageisze.
 479         void* p = ::shmat(shmid, nullptr, 0);
 480         ::shmctl(shmid, IPC_RMID, nullptr); // As early as possible!
 481         guarantee0(p != (void*) -1); // Should always work.
 482         const size_t real_pagesize = os::Aix::query_pagesize(p);
 483         if (real_pagesize != pagesize) {
 484           trcVerbose("real page size (" SIZE_FORMAT_X ") differs.", real_pagesize);
 485         } else {
 486           can_use = true;
 487         }
 488         ::shmdt(p);
 489       }
 490       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 491       if (pagesize == 64*K) {
 492         g_multipage_support.can_use_64K_pages = can_use;
 493       } else if (pagesize == 16*M) {
 494         g_multipage_support.can_use_16M_pages = can_use;
 495       }
 496     }
 497 
 498   } // end: check which pages can be used for shared memory
 499 
 500 query_multipage_support_end:
 501 
 502   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 503       describe_pagesize(g_multipage_support.pagesize));
 504   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 505       describe_pagesize(g_multipage_support.datapsize));
 506   trcVerbose("Text page size: %s",
 507       describe_pagesize(g_multipage_support.textpsize));
 508   trcVerbose("Thread stack page size (pthread): %s",
 509       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 510   trcVerbose("Default shared memory page size: %s",
 511       describe_pagesize(g_multipage_support.shmpsize));
 512   trcVerbose("Can use 64K pages dynamically with shared memory: %s",
 513       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 514   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 515       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 516   trcVerbose("Multipage error details: %d",
 517       g_multipage_support.error);
 518 
 519   // sanity checks
 520   assert0(g_multipage_support.pagesize == 4*K);
 521   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 522   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 523   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 524   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 525 
 526 }
 527 
 528 void os::init_system_properties_values() {
 529 
 530 #ifndef OVERRIDE_LIBPATH
 531   #define DEFAULT_LIBPATH "/lib:/usr/lib"
 532 #else
 533   #define DEFAULT_LIBPATH OVERRIDE_LIBPATH
 534 #endif
 535 #define EXTENSIONS_DIR  "/lib/ext"
 536 
 537   // Buffer that fits several snprintfs.
 538   // Note that the space for the trailing null is provided
 539   // by the nulls included by the sizeof operator.
 540   const size_t bufsize =
 541     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 542          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 543   char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 544 
 545   // sysclasspath, java_home, dll_dir
 546   {
 547     char *pslash;
 548     os::jvm_path(buf, bufsize);
 549 
 550     // Found the full path to libjvm.so.
 551     // Now cut the path to <java_home>/jre if we can.
 552     pslash = strrchr(buf, '/');
 553     if (pslash != nullptr) {
 554       *pslash = '\0';            // Get rid of /libjvm.so.
 555     }
 556     pslash = strrchr(buf, '/');
 557     if (pslash != nullptr) {
 558       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 559     }
 560     Arguments::set_dll_dir(buf);
 561 
 562     if (pslash != nullptr) {
 563       pslash = strrchr(buf, '/');
 564       if (pslash != nullptr) {
 565         *pslash = '\0';        // Get rid of /lib.
 566       }
 567     }
 568     Arguments::set_java_home(buf);
 569     if (!set_boot_path('/', ':')) {
 570       vm_exit_during_initialization("Failed setting boot class path.", nullptr);
 571     }
 572   }
 573 
 574   // Where to look for native libraries.
 575 
 576   // On Aix we get the user setting of LIBPATH.
 577   // Eventually, all the library path setting will be done here.
 578   // Get the user setting of LIBPATH.
 579   const char *v = ::getenv("LIBPATH");
 580   const char *v_colon = ":";
 581   if (v == nullptr) { v = ""; v_colon = ""; }
 582 
 583   // Concatenate user and invariant part of ld_library_path.
 584   // That's +1 for the colon and +1 for the trailing '\0'.
 585   size_t pathsize = strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1;
 586   char *ld_library_path = NEW_C_HEAP_ARRAY(char, pathsize, mtInternal);
 587   os::snprintf_checked(ld_library_path, pathsize, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 588   Arguments::set_library_path(ld_library_path);
 589   FREE_C_HEAP_ARRAY(char, ld_library_path);
 590 
 591   // Extensions directories.
 592   os::snprintf_checked(buf, bufsize, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 593   Arguments::set_ext_dirs(buf);
 594 
 595   FREE_C_HEAP_ARRAY(char, buf);
 596 
 597 #undef DEFAULT_LIBPATH
 598 #undef EXTENSIONS_DIR
 599 }
 600 
 601 ////////////////////////////////////////////////////////////////////////////////
 602 // breakpoint support
 603 
 604 void os::breakpoint() {
 605   BREAKPOINT;
 606 }
 607 
 608 extern "C" void breakpoint() {
 609   // use debugger to set breakpoint here
 610 }
 611 
 612 // retrieve memory information.
 613 // Returns false if something went wrong;
 614 // content of pmi undefined in this case.
 615 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 616 
 617   assert(pmi, "get_meminfo: invalid parameter");
 618 
 619   memset(pmi, 0, sizeof(meminfo_t));
 620 
 621   if (os::Aix::on_pase()) {
 622     // On PASE, use the libo4 porting library.
 623 
 624     unsigned long long virt_total = 0;
 625     unsigned long long real_total = 0;
 626     unsigned long long real_free = 0;
 627     unsigned long long pgsp_total = 0;
 628     unsigned long long pgsp_free = 0;
 629     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 630       pmi->virt_total = virt_total;
 631       pmi->real_total = real_total;
 632       pmi->real_free = real_free;
 633       pmi->pgsp_total = pgsp_total;
 634       pmi->pgsp_free = pgsp_free;
 635       return true;
 636     }
 637     return false;
 638 
 639   } else {
 640 
 641     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 642     // See:
 643     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 644     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 645     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 646     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 647 
 648     perfstat_memory_total_t psmt;
 649     memset (&psmt, '\0', sizeof(psmt));
 650     const int rc = libperfstat::perfstat_memory_total(nullptr, &psmt, sizeof(psmt), 1);
 651     if (rc == -1) {
 652       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 653       assert(0, "perfstat_memory_total() failed");
 654       return false;
 655     }
 656 
 657     assert(rc == 1, "perfstat_memory_total() - weird return code");
 658 
 659     // excerpt from
 660     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 661     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 662     // The fields of perfstat_memory_total_t:
 663     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 664     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 665     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 666     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 667     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 668 
 669     pmi->virt_total = psmt.virt_total * 4096;
 670     pmi->real_total = psmt.real_total * 4096;
 671     pmi->real_free = psmt.real_free * 4096;
 672     pmi->pgsp_total = psmt.pgsp_total * 4096;
 673     pmi->pgsp_free = psmt.pgsp_free * 4096;
 674 
 675     return true;
 676 
 677   }
 678 } // end os::Aix::get_meminfo
 679 
 680 //////////////////////////////////////////////////////////////////////////////
 681 // create new thread
 682 
 683 // Thread start routine for all newly created threads
 684 static void *thread_native_entry(Thread *thread) {
 685 
 686   thread->record_stack_base_and_size();
 687 
 688   const pthread_t pthread_id = ::pthread_self();
 689   const tid_t kernel_thread_id = ::thread_self();
 690 
 691   LogTarget(Info, os, thread) lt;
 692   if (lt.is_enabled()) {
 693     address low_address = thread->stack_end();
 694     address high_address = thread->stack_base();
 695     lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT
 696              ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).",
 697              os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address,
 698              (high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K);
 699   }
 700 
 701   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 702   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 703   // tools hook pthread_create(). In this case, we may run into problems establishing
 704   // guard pages on those stacks, because the stacks may reside in memory which is not
 705   // protectable (shmated).
 706   if (thread->stack_base() > ::sbrk(0)) {
 707     log_warning(os, thread)("Thread stack not in data segment.");
 708   }
 709 
 710   // Try to randomize the cache line index of hot stack frames.
 711   // This helps when threads of the same stack traces evict each other's
 712   // cache lines. The threads can be either from the same JVM instance, or
 713   // from different JVM instances. The benefit is especially true for
 714   // processors with hyperthreading technology.
 715 
 716   static int counter = 0;
 717   int pid = os::current_process_id();
 718   alloca(((pid ^ counter++) & 7) * 128);
 719 
 720   thread->initialize_thread_current();
 721 
 722   OSThread* osthread = thread->osthread();
 723 
 724   // Thread_id is pthread id.
 725   osthread->set_thread_id(pthread_id);
 726 
 727   // .. but keep kernel thread id too for diagnostics
 728   osthread->set_kernel_thread_id(kernel_thread_id);
 729 
 730   // Initialize signal mask for this thread.
 731   PosixSignals::hotspot_sigmask(thread);
 732 
 733   // Initialize floating point control register.
 734   os::Aix::init_thread_fpu_state();
 735 
 736   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 737 
 738   // Call one more level start routine.
 739   thread->call_run();
 740 
 741   // Note: at this point the thread object may already have deleted itself.
 742   // Prevent dereferencing it from here on out.
 743   thread = nullptr;
 744 
 745   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 746     os::current_thread_id(), (uintx) kernel_thread_id);
 747 
 748   return 0;
 749 }
 750 
 751 bool os::create_thread(Thread* thread, ThreadType thr_type,
 752                        size_t req_stack_size) {
 753 
 754   assert(thread->osthread() == nullptr, "caller responsible");
 755 
 756   // Allocate the OSThread object.
 757   OSThread* osthread = new (std::nothrow) OSThread();
 758   if (osthread == nullptr) {
 759     return false;
 760   }
 761 
 762   // Set the correct thread state.
 763   osthread->set_thread_type(thr_type);
 764 
 765   // Initial state is ALLOCATED but not INITIALIZED
 766   osthread->set_state(ALLOCATED);
 767 
 768   thread->set_osthread(osthread);
 769 
 770   // Init thread attributes.
 771   pthread_attr_t attr;
 772   pthread_attr_init(&attr);
 773   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 774 
 775   // Make sure we run in 1:1 kernel-user-thread mode.
 776   if (os::Aix::on_aix()) {
 777     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 778     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 779   }
 780 
 781   // Start in suspended state, and in os::thread_start, wake the thread up.
 782   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 783 
 784   // Calculate stack size if it's not specified by caller.
 785   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 786 
 787   // JDK-8187028: It was observed that on some configurations (4K backed thread stacks)
 788   // the real thread stack size may be smaller than the requested stack size, by as much as 64K.
 789   // This very much looks like a pthread lib error. As a workaround, increase the stack size
 790   // by 64K for small thread stacks (arbitrarily chosen to be < 4MB)
 791   if (stack_size < 4096 * K) {
 792     stack_size += 64 * K;
 793   }
 794 
 795   // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
 796   // thread size in attr unchanged. If this is the minimal stack size as set
 797   // by pthread_attr_init this leads to crashes after thread creation. E.g. the
 798   // guard pages might not fit on the tiny stack created.
 799   int ret = pthread_attr_setstacksize(&attr, stack_size);
 800   if (ret != 0) {
 801     log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
 802                             (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
 803                             stack_size / K);
 804     thread->set_osthread(nullptr);
 805     delete osthread;
 806     return false;
 807   }
 808 
 809   // Save some cycles and a page by disabling OS guard pages where we have our own
 810   // VM guard pages (in java threads). For other threads, keep system default guard
 811   // pages in place.
 812   if (thr_type == java_thread || thr_type == compiler_thread) {
 813     ret = pthread_attr_setguardsize(&attr, 0);
 814   }
 815 
 816   ResourceMark rm;
 817   pthread_t tid = 0;
 818 
 819   if (ret == 0) {
 820     int limit = 3;
 821     do {
 822       ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 823     } while (ret == EAGAIN && limit-- > 0);
 824   }
 825 
 826   if (ret == 0) {
 827     char buf[64];
 828     log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 829                          thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 830   } else {
 831     char buf[64];
 832     log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%d=%s) for attributes: %s.",
 833                             thread->name(), ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 834     // Log some OS information which might explain why creating the thread failed.
 835     log_warning(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 836     log_warning(os, thread)("Checking JVM parameter MaxExpectedDataSegmentSize (currently " SIZE_FORMAT "k)  might be helpful", MaxExpectedDataSegmentSize/K);
 837     LogStream st(Log(os, thread)::info());
 838     os::Posix::print_rlimit_info(&st);
 839     os::print_memory_info(&st);
 840   }
 841 
 842   pthread_attr_destroy(&attr);
 843 
 844   if (ret != 0) {
 845     // Need to clean up stuff we've allocated so far.
 846     thread->set_osthread(nullptr);
 847     delete osthread;
 848     return false;
 849   }
 850 
 851   // OSThread::thread_id is the pthread id.
 852   osthread->set_thread_id(tid);
 853 
 854   return true;
 855 }
 856 
 857 /////////////////////////////////////////////////////////////////////////////
 858 // attach existing thread
 859 
 860 // bootstrap the main thread
 861 bool os::create_main_thread(JavaThread* thread) {
 862   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 863   return create_attached_thread(thread);
 864 }
 865 
 866 bool os::create_attached_thread(JavaThread* thread) {
 867 #ifdef ASSERT
 868     thread->verify_not_published();
 869 #endif
 870 
 871   // Allocate the OSThread object
 872   OSThread* osthread = new (std::nothrow) OSThread();
 873 
 874   if (osthread == nullptr) {
 875     return false;
 876   }
 877 
 878   const pthread_t pthread_id = ::pthread_self();
 879   const tid_t kernel_thread_id = ::thread_self();
 880 
 881   // OSThread::thread_id is the pthread id.
 882   osthread->set_thread_id(pthread_id);
 883 
 884   // .. but keep kernel thread id too for diagnostics
 885   osthread->set_kernel_thread_id(kernel_thread_id);
 886 
 887   // initialize floating point control register
 888   os::Aix::init_thread_fpu_state();
 889 
 890   // Initial thread state is RUNNABLE
 891   osthread->set_state(RUNNABLE);
 892 
 893   thread->set_osthread(osthread);
 894 
 895   if (UseNUMA) {
 896     int lgrp_id = os::numa_get_group_id();
 897     if (lgrp_id != -1) {
 898       thread->set_lgrp_id(lgrp_id);
 899     }
 900   }
 901 
 902   // initialize signal mask for this thread
 903   // and save the caller's signal mask
 904   PosixSignals::hotspot_sigmask(thread);
 905 
 906   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread  id: " UINTX_FORMAT
 907                        ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).",
 908                        os::current_thread_id(), (uintx) kernel_thread_id,
 909                        p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size() / K);
 910 
 911   return true;
 912 }
 913 
 914 void os::pd_start_thread(Thread* thread) {
 915   int status = pthread_continue_np(thread->osthread()->pthread_id());
 916   assert(status == 0, "thr_continue failed");
 917 }
 918 
 919 // Free OS resources related to the OSThread
 920 void os::free_thread(OSThread* osthread) {
 921   assert(osthread != nullptr, "osthread not set");
 922 
 923   // We are told to free resources of the argument thread,
 924   // but we can only really operate on the current thread.
 925   assert(Thread::current()->osthread() == osthread,
 926          "os::free_thread but not current thread");
 927 
 928   // Restore caller's signal mask
 929   sigset_t sigmask = osthread->caller_sigmask();
 930   pthread_sigmask(SIG_SETMASK, &sigmask, nullptr);
 931 
 932   delete osthread;
 933 }
 934 
 935 ////////////////////////////////////////////////////////////////////////////////
 936 // time support
 937 
 938 double os::elapsedVTime() {
 939   struct rusage usage;
 940   int retval = getrusage(RUSAGE_THREAD, &usage);
 941   if (retval == 0) {
 942     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
 943   } else {
 944     // better than nothing, but not much
 945     return elapsedTime();
 946   }
 947 }
 948 
 949 // We use mread_real_time here.
 950 // On AIX: If the CPU has a time register, the result will be RTC_POWER and
 951 // it has to be converted to real time. AIX documentations suggests to do
 952 // this unconditionally, so we do it.
 953 //
 954 // See: https://www.ibm.com/support/knowledgecenter/ssw_aix_61/com.ibm.aix.basetrf2/read_real_time.htm
 955 //
 956 // On PASE: mread_real_time will always return RTC_POWER_PC data, so no
 957 // conversion is necessary. However, mread_real_time will not return
 958 // monotonic results but merely matches read_real_time. So we need a tweak
 959 // to ensure monotonic results.
 960 //
 961 // For PASE no public documentation exists, just word by IBM
 962 jlong os::javaTimeNanos() {
 963   timebasestruct_t time;
 964   int rc = mread_real_time(&time, TIMEBASE_SZ);
 965   if (os::Aix::on_pase()) {
 966     assert(rc == RTC_POWER, "expected time format RTC_POWER from mread_real_time in PASE");
 967     jlong now = jlong(time.tb_high) * NANOSECS_PER_SEC + jlong(time.tb_low);
 968     jlong prev = max_real_time;
 969     if (now <= prev) {
 970       return prev;   // same or retrograde time;
 971     }
 972     jlong obsv = Atomic::cmpxchg(&max_real_time, prev, now);
 973     assert(obsv >= prev, "invariant");   // Monotonicity
 974     // If the CAS succeeded then we're done and return "now".
 975     // If the CAS failed and the observed value "obsv" is >= now then
 976     // we should return "obsv".  If the CAS failed and now > obsv > prv then
 977     // some other thread raced this thread and installed a new value, in which case
 978     // we could either (a) retry the entire operation, (b) retry trying to install now
 979     // or (c) just return obsv.  We use (c).   No loop is required although in some cases
 980     // we might discard a higher "now" value in deference to a slightly lower but freshly
 981     // installed obsv value.   That's entirely benign -- it admits no new orderings compared
 982     // to (a) or (b) -- and greatly reduces coherence traffic.
 983     // We might also condition (c) on the magnitude of the delta between obsv and now.
 984     // Avoiding excessive CAS operations to hot RW locations is critical.
 985     // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
 986     return (prev == obsv) ? now : obsv;
 987   } else {
 988     if (rc != RTC_POWER) {
 989       rc = time_base_to_time(&time, TIMEBASE_SZ);
 990       assert(rc != -1, "error calling time_base_to_time()");
 991     }
 992     return jlong(time.tb_high) * NANOSECS_PER_SEC + jlong(time.tb_low);
 993   }
 994 }
 995 
 996 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 997   info_ptr->max_value = ALL_64_BITS;
 998   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
 999   info_ptr->may_skip_backward = false;
1000   info_ptr->may_skip_forward = false;
1001   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1002 }
1003 
1004 intx os::current_thread_id() {
1005   return (intx)pthread_self();
1006 }
1007 
1008 int os::current_process_id() {
1009   return getpid();
1010 }
1011 
1012 // DLL functions
1013 
1014 // This must be hard coded because it's the system's temporary
1015 // directory not the java application's temp directory, ala java.io.tmpdir.
1016 const char* os::get_temp_directory() { return "/tmp"; }
1017 
1018 void os::prepare_native_symbols() {
1019   LoadedLibraries::reload();
1020 }
1021 
1022 // Check if addr is inside libjvm.so.
1023 bool os::address_is_in_vm(address addr) {
1024 
1025   // Input could be a real pc or a function pointer literal. The latter
1026   // would be a function descriptor residing in the data segment of a module.
1027   loaded_module_t lm;
1028   if (LoadedLibraries::find_for_text_address(addr, &lm)) {
1029     return lm.is_in_vm;
1030   } else if (LoadedLibraries::find_for_data_address(addr, &lm)) {
1031     return lm.is_in_vm;
1032   } else {
1033     return false;
1034   }
1035 
1036 }
1037 
1038 // Resolve an AIX function descriptor literal to a code pointer.
1039 // If the input is a valid code pointer to a text segment of a loaded module,
1040 //   it is returned unchanged.
1041 // If the input is a valid AIX function descriptor, it is resolved to the
1042 //   code entry point.
1043 // If the input is neither a valid function descriptor nor a valid code pointer,
1044 //   null is returned.
1045 static address resolve_function_descriptor_to_code_pointer(address p) {
1046 
1047   if (LoadedLibraries::find_for_text_address(p, nullptr)) {
1048     // It is a real code pointer.
1049     return p;
1050   } else if (LoadedLibraries::find_for_data_address(p, nullptr)) {
1051     // Pointer to data segment, potential function descriptor.
1052     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1053     if (LoadedLibraries::find_for_text_address(code_entry, nullptr)) {
1054       // It is a function descriptor.
1055       return code_entry;
1056     }
1057   }
1058 
1059   return nullptr;
1060 }
1061 
1062 bool os::dll_address_to_function_name(address addr, char *buf,
1063                                       int buflen, int *offset,
1064                                       bool demangle) {
1065   if (offset) {
1066     *offset = -1;
1067   }
1068   // Buf is not optional, but offset is optional.
1069   assert(buf != nullptr, "sanity check");
1070   buf[0] = '\0';
1071 
1072   // Resolve function ptr literals first.
1073   addr = resolve_function_descriptor_to_code_pointer(addr);
1074   if (!addr) {
1075     return false;
1076   }
1077 
1078   return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
1079 }
1080 
1081 bool os::dll_address_to_library_name(address addr, char* buf,
1082                                      int buflen, int* offset) {
1083   if (offset) {
1084     *offset = -1;
1085   }
1086   // Buf is not optional, but offset is optional.
1087   assert(buf != nullptr, "sanity check");
1088   buf[0] = '\0';
1089 
1090   // Resolve function ptr literals first.
1091   addr = resolve_function_descriptor_to_code_pointer(addr);
1092   if (!addr) {
1093     return false;
1094   }
1095 
1096   address  base = nullptr;
1097   if (!AixSymbols::get_module_name_and_base(addr, buf, buflen, &base)
1098       || base == nullptr) {
1099     return false;
1100   }
1101   assert(addr >= base && addr <= base + INT_MAX, "address not in library text range");
1102   if (offset != nullptr) {
1103     *offset = addr - base;
1104   }
1105 
1106   return true;
1107 }
1108 
1109 static void* dll_load_library(const char *filename, char *ebuf, int ebuflen) {
1110 
1111   log_info(os)("attempting shared library load of %s", filename);
1112   if (ebuf && ebuflen > 0) {
1113     ebuf[0] = '\0';
1114     ebuf[ebuflen - 1] = '\0';
1115   }
1116 
1117   if (!filename || strlen(filename) == 0) {
1118     if (ebuf != nullptr && ebuflen > 0) {
1119       ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1120     }
1121     return nullptr;
1122   }
1123 
1124   // RTLD_LAZY has currently the same behavior as RTLD_NOW
1125   // The dl is loaded immediately with all its dependants.
1126   int dflags = RTLD_LAZY;
1127   // check for filename ending with ')', it indicates we want to load
1128   // a MEMBER module that is a member of an archive.
1129   int flen = strlen(filename);
1130   if (flen > 0 && filename[flen - 1] == ')') {
1131     dflags |= RTLD_MEMBER;
1132   }
1133 
1134   void* result;
1135   const char* error_report = nullptr;
1136   result = Aix_dlopen(filename, dflags, &error_report);
1137   if (result != nullptr) {
1138     Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
1139     // Reload dll cache. Don't do this in signal handling.
1140     LoadedLibraries::reload();
1141     log_info(os)("shared library load of %s was successful", filename);
1142     return result;
1143   } else {
1144     // error analysis when dlopen fails
1145     if (error_report == nullptr) {
1146       error_report = "dlerror returned no error description";
1147     }
1148     if (ebuf != nullptr && ebuflen > 0) {
1149       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1150                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1151     }
1152     Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report);
1153     log_info(os)("shared library load of %s failed, %s", filename, error_report);
1154   }
1155   return nullptr;
1156 }
1157 // Load library named <filename>
1158 // If filename matches <name>.so, and loading fails, repeat with <name>.a.
1159 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1160   void* result = nullptr;
1161   char* const file_path = strdup(filename);
1162   char* const pointer_to_dot = strrchr(file_path, '.');
1163   const char old_extension[] = ".so";
1164   const char new_extension[] = ".a";
1165   STATIC_ASSERT(sizeof(old_extension) >= sizeof(new_extension));
1166   // First try to load the existing file.
1167   result = dll_load_library(filename, ebuf, ebuflen);
1168   // If the load fails,we try to reload by changing the extension to .a for .so files only.
1169   // Shared object in .so format dont have braces, hence they get removed for archives with members.
1170   if (result == nullptr && pointer_to_dot != nullptr && strcmp(pointer_to_dot, old_extension) == 0) {
1171     snprintf(pointer_to_dot, sizeof(old_extension), "%s", new_extension);
1172     result = dll_load_library(file_path, ebuf, ebuflen);
1173   }
1174   FREE_C_HEAP_ARRAY(char, file_path);
1175   return result;
1176 }
1177 
1178 void os::print_dll_info(outputStream *st) {
1179   st->print_cr("Dynamic libraries:");
1180   LoadedLibraries::print(st);
1181 }
1182 
1183 void os::get_summary_os_info(char* buf, size_t buflen) {
1184   // There might be something more readable than uname results for AIX.
1185   struct utsname name;
1186   uname(&name);
1187   snprintf(buf, buflen, "%s %s", name.release, name.version);
1188 }
1189 
1190 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1191 
1192   if (!LoadedLibraries::for_each(callback, param)) {
1193     return -1;
1194   }
1195 
1196   return 0;
1197 }
1198 
1199 void os::print_os_info_brief(outputStream* st) {
1200   uint32_t ver = os::Aix::os_version();
1201   st->print_cr("AIX kernel version %u.%u.%u.%u",
1202                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1203 
1204   os::Posix::print_uname_info(st);
1205 
1206   // Linux uses print_libversion_info(st); here.
1207 }
1208 
1209 void os::print_os_info(outputStream* st) {
1210   st->print_cr("OS:");
1211 
1212   os::Posix::print_uname_info(st);
1213 
1214   uint32_t ver = os::Aix::os_version();
1215   st->print_cr("AIX kernel version %u.%u.%u.%u",
1216                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1217 
1218   os::Posix::print_uptime_info(st);
1219 
1220   os::Posix::print_rlimit_info(st);
1221 
1222   os::Posix::print_load_average(st);
1223 
1224   // _SC_THREAD_THREADS_MAX is the maximum number of threads within a process.
1225   long tmax = sysconf(_SC_THREAD_THREADS_MAX);
1226   st->print_cr("maximum #threads within a process:%ld", tmax);
1227 
1228   // print wpar info
1229   libperfstat::wparinfo_t wi;
1230   if (libperfstat::get_wparinfo(&wi)) {
1231     st->print_cr("wpar info");
1232     st->print_cr("name: %s", wi.name);
1233     st->print_cr("id:   %d", wi.wpar_id);
1234     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1235   }
1236 
1237   VM_Version::print_platform_virtualization_info(st);
1238 }
1239 
1240 void os::print_memory_info(outputStream* st) {
1241 
1242   st->print_cr("Memory:");
1243 
1244   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1245     describe_pagesize(g_multipage_support.pagesize));
1246   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1247     describe_pagesize(g_multipage_support.datapsize));
1248   st->print_cr("  Text page size:                         %s",
1249     describe_pagesize(g_multipage_support.textpsize));
1250   st->print_cr("  Thread stack page size (pthread):       %s",
1251     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1252   st->print_cr("  Default shared memory page size:        %s",
1253     describe_pagesize(g_multipage_support.shmpsize));
1254   st->print_cr("  Can use 64K pages dynamically with shared memory:  %s",
1255     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1256   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1257     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1258   st->print_cr("  Multipage error: %d",
1259     g_multipage_support.error);
1260   st->cr();
1261   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1262 
1263   // print out LDR_CNTRL because it affects the default page sizes
1264   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1265   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1266 
1267   // Print out EXTSHM because it is an unsupported setting.
1268   const char* const extshm = ::getenv("EXTSHM");
1269   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1270   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1271     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1272   }
1273 
1274   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1275   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1276   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1277       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1278   st->cr();
1279 
1280   os::Aix::meminfo_t mi;
1281   if (os::Aix::get_meminfo(&mi)) {
1282     if (os::Aix::on_aix()) {
1283       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1284       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1285       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1286       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1287     } else {
1288       // PASE - Numbers are result of QWCRSSTS; they mean:
1289       // real_total: Sum of all system pools
1290       // real_free: always 0
1291       // pgsp_total: we take the size of the system ASP
1292       // pgsp_free: size of system ASP times percentage of system ASP unused
1293       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1294       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1295       st->print_cr("%% system asp used : %.2f",
1296         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1297     }
1298   }
1299   st->cr();
1300 
1301   // Print program break.
1302   st->print_cr("Program break at VM startup: " PTR_FORMAT ".", p2i(g_brk_at_startup));
1303   address brk_now = (address)::sbrk(0);
1304   if (brk_now != (address)-1) {
1305     st->print_cr("Program break now          : " PTR_FORMAT " (distance: " SIZE_FORMAT "k).",
1306                  p2i(brk_now), (size_t)((brk_now - g_brk_at_startup) / K));
1307   }
1308   st->print_cr("MaxExpectedDataSegmentSize    : " SIZE_FORMAT "k.", MaxExpectedDataSegmentSize / K);
1309   st->cr();
1310 
1311   // Print segments allocated with os::reserve_memory.
1312   st->print_cr("internal virtual memory regions used by vm:");
1313   vmembk_print_on(st);
1314 }
1315 
1316 // Get a string for the cpuinfo that is a summary of the cpu type
1317 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1318   // read _system_configuration.version
1319   switch (_system_configuration.version) {
1320   case PV_9:
1321     strncpy(buf, "Power PC 9", buflen);
1322     break;
1323   case PV_8:
1324     strncpy(buf, "Power PC 8", buflen);
1325     break;
1326   case PV_7:
1327     strncpy(buf, "Power PC 7", buflen);
1328     break;
1329   case PV_6_1:
1330     strncpy(buf, "Power PC 6 DD1.x", buflen);
1331     break;
1332   case PV_6:
1333     strncpy(buf, "Power PC 6", buflen);
1334     break;
1335   case PV_5:
1336     strncpy(buf, "Power PC 5", buflen);
1337     break;
1338   case PV_5_2:
1339     strncpy(buf, "Power PC 5_2", buflen);
1340     break;
1341   case PV_5_3:
1342     strncpy(buf, "Power PC 5_3", buflen);
1343     break;
1344   case PV_5_Compat:
1345     strncpy(buf, "PV_5_Compat", buflen);
1346     break;
1347   case PV_6_Compat:
1348     strncpy(buf, "PV_6_Compat", buflen);
1349     break;
1350   case PV_7_Compat:
1351     strncpy(buf, "PV_7_Compat", buflen);
1352     break;
1353   case PV_8_Compat:
1354     strncpy(buf, "PV_8_Compat", buflen);
1355     break;
1356   case PV_9_Compat:
1357     strncpy(buf, "PV_9_Compat", buflen);
1358     break;
1359   default:
1360     strncpy(buf, "unknown", buflen);
1361   }
1362 }
1363 
1364 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1365   // Nothing to do beyond of what os::print_cpu_info() does.
1366 }
1367 
1368 static char saved_jvm_path[MAXPATHLEN] = {0};
1369 
1370 // Find the full path to the current module, libjvm.so.
1371 void os::jvm_path(char *buf, jint buflen) {
1372   // Error checking.
1373   if (buflen < MAXPATHLEN) {
1374     assert(false, "must use a large-enough buffer");
1375     buf[0] = '\0';
1376     return;
1377   }
1378   // Lazy resolve the path to current module.
1379   if (saved_jvm_path[0] != 0) {
1380     strcpy(buf, saved_jvm_path);
1381     return;
1382   }
1383 
1384   Dl_info dlinfo;
1385   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1386   assert(ret != 0, "cannot locate libjvm");
1387   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1388   assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
1389 
1390   if (Arguments::sun_java_launcher_is_altjvm()) {
1391     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1392     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1393     // If "/jre/lib/" appears at the right place in the string, then
1394     // assume we are installed in a JDK and we're done. Otherwise, check
1395     // for a JAVA_HOME environment variable and fix up the path so it
1396     // looks like libjvm.so is installed there (append a fake suffix
1397     // hotspot/libjvm.so).
1398     const char *p = buf + strlen(buf) - 1;
1399     for (int count = 0; p > buf && count < 4; ++count) {
1400       for (--p; p > buf && *p != '/'; --p)
1401         /* empty */ ;
1402     }
1403 
1404     if (strncmp(p, "/jre/lib/", 9) != 0) {
1405       // Look for JAVA_HOME in the environment.
1406       char* java_home_var = ::getenv("JAVA_HOME");
1407       if (java_home_var != nullptr && java_home_var[0] != 0) {
1408         char* jrelib_p;
1409         int len;
1410 
1411         // Check the current module name "libjvm.so".
1412         p = strrchr(buf, '/');
1413         if (p == nullptr) {
1414           return;
1415         }
1416         assert(strstr(p, "/libjvm") == p, "invalid library name");
1417 
1418         rp = os::Posix::realpath(java_home_var, buf, buflen);
1419         if (rp == nullptr) {
1420           return;
1421         }
1422 
1423         // determine if this is a legacy image or modules image
1424         // modules image doesn't have "jre" subdirectory
1425         len = strlen(buf);
1426         assert(len < buflen, "Ran out of buffer room");
1427         jrelib_p = buf + len;
1428         snprintf(jrelib_p, buflen-len, "/jre/lib");
1429         if (0 != access(buf, F_OK)) {
1430           snprintf(jrelib_p, buflen-len, "/lib");
1431         }
1432 
1433         if (0 == access(buf, F_OK)) {
1434           // Use current module name "libjvm.so"
1435           len = strlen(buf);
1436           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1437         } else {
1438           // Go back to path of .so
1439           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1440           if (rp == nullptr) {
1441             return;
1442           }
1443         }
1444       }
1445     }
1446   }
1447 
1448   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1449   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1450 }
1451 
1452 ////////////////////////////////////////////////////////////////////////////////
1453 // Virtual Memory
1454 
1455 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1456 
1457 #define VMEM_MAPPED  1
1458 #define VMEM_SHMATED 2
1459 
1460 struct vmembk_t {
1461   int type;         // 1 - mmap, 2 - shmat
1462   char* addr;
1463   size_t size;      // Real size, may be larger than usersize.
1464   size_t pagesize;  // page size of area
1465   vmembk_t* next;
1466 
1467   bool contains_addr(char* p) const {
1468     return p >= addr && p < (addr + size);
1469   }
1470 
1471   bool contains_range(char* p, size_t s) const {
1472     return contains_addr(p) && contains_addr(p + s - 1);
1473   }
1474 
1475   void print_on(outputStream* os) const {
1476     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1477       " bytes, %d %s pages), %s",
1478       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1479       (type == VMEM_SHMATED ? "shmat" : "mmap")
1480     );
1481   }
1482 
1483   // Check that range is a sub range of memory block (or equal to memory block);
1484   // also check that range is fully page aligned to the page size if the block.
1485   void assert_is_valid_subrange(char* p, size_t s) const {
1486     if (!contains_range(p, s)) {
1487       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1488               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1489               p2i(p), p2i(p + s), p2i(addr), p2i(addr + size));
1490       guarantee0(false);
1491     }
1492     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1493       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1494               " aligned to pagesize (%lu)", p2i(p), p2i(p + s), (unsigned long) pagesize);
1495       guarantee0(false);
1496     }
1497   }
1498 };
1499 
1500 static struct {
1501   vmembk_t* first;
1502   MiscUtils::CritSect cs;
1503 } vmem;
1504 
1505 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1506   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1507   assert0(p);
1508   if (p) {
1509     MiscUtils::AutoCritSect lck(&vmem.cs);
1510     p->addr = addr; p->size = size;
1511     p->pagesize = pagesize;
1512     p->type = type;
1513     p->next = vmem.first;
1514     vmem.first = p;
1515   }
1516 }
1517 
1518 static vmembk_t* vmembk_find(char* addr) {
1519   MiscUtils::AutoCritSect lck(&vmem.cs);
1520   for (vmembk_t* p = vmem.first; p; p = p->next) {
1521     if (p->addr <= addr && (p->addr + p->size) > addr) {
1522       return p;
1523     }
1524   }
1525   return nullptr;
1526 }
1527 
1528 static void vmembk_remove(vmembk_t* p0) {
1529   MiscUtils::AutoCritSect lck(&vmem.cs);
1530   assert0(p0);
1531   assert0(vmem.first); // List should not be empty.
1532   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1533     if (*pp == p0) {
1534       *pp = p0->next;
1535       ::free(p0);
1536       return;
1537     }
1538   }
1539   assert0(false); // Not found?
1540 }
1541 
1542 static void vmembk_print_on(outputStream* os) {
1543   MiscUtils::AutoCritSect lck(&vmem.cs);
1544   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1545     vmi->print_on(os);
1546     os->cr();
1547   }
1548 }
1549 
1550 // Reserve and attach a section of System V memory.
1551 // If <requested_addr> is not null, function will attempt to attach the memory at the given
1552 // address. Failing that, it will attach the memory anywhere.
1553 // If <requested_addr> is null, function will attach the memory anywhere.
1554 static char* reserve_shmated_memory (size_t bytes, char* requested_addr) {
1555 
1556   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1557     PTR_FORMAT "...", bytes, p2i(requested_addr));
1558 
1559   // We must prevent anyone from attaching too close to the
1560   // BRK because that may cause malloc OOM.
1561   if (requested_addr != nullptr && is_close_to_brk((address)requested_addr)) {
1562     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment.", p2i(requested_addr));
1563     // Since we treat an attach to the wrong address as an error later anyway,
1564     // we return null here
1565     return nullptr;
1566   }
1567 
1568   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1569   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1570   if (os::Aix::on_pase_V5R4_or_older()) {
1571     ShouldNotReachHere();
1572   }
1573 
1574   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1575   const size_t size = align_up(bytes, 64*K);
1576 
1577   // Reserve the shared segment.
1578   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1579   if (shmid == -1) {
1580     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1581     return nullptr;
1582   }
1583 
1584   // Important note:
1585   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1586   // We must right after attaching it remove it from the system. System V shm segments are global and
1587   // survive the process.
1588   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1589 
1590   struct shmid_ds shmbuf;
1591   memset(&shmbuf, 0, sizeof(shmbuf));
1592   shmbuf.shm_pagesize = 64*K;
1593   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1594     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1595                size / (64*K), errno);
1596     // I want to know if this ever happens.
1597     assert(false, "failed to set page size for shmat");
1598   }
1599 
1600   // Now attach the shared segment.
1601   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1602   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1603   // were not a segment boundary.
1604   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1605   const int errno_shmat = errno;
1606 
1607   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1608   if (::shmctl(shmid, IPC_RMID, nullptr) == -1) {
1609     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1610     assert(false, "failed to remove shared memory segment!");
1611   }
1612 
1613   // Handle shmat error. If we failed to attach, just return.
1614   if (addr == (char*)-1) {
1615     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", p2i(requested_addr), errno_shmat);
1616     return nullptr;
1617   }
1618 
1619   // Just for info: query the real page size. In case setting the page size did not
1620   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1621   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1622   if (real_pagesize != (size_t)shmbuf.shm_pagesize) {
1623     trcVerbose("pagesize is, surprisingly, " SIZE_FORMAT, real_pagesize);
1624   }
1625 
1626   if (addr) {
1627     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1628       p2i(addr), p2i(addr + size - 1), size, size/real_pagesize, describe_pagesize(real_pagesize));
1629   } else {
1630     if (requested_addr != nullptr) {
1631       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, p2i(requested_addr));
1632     } else {
1633       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1634     }
1635   }
1636 
1637   // book-keeping
1638   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1639   assert0(is_aligned_to(addr, os::vm_page_size()));
1640 
1641   return addr;
1642 }
1643 
1644 static bool release_shmated_memory(char* addr, size_t size) {
1645 
1646   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1647     p2i(addr), p2i(addr + size - 1));
1648 
1649   bool rc = false;
1650 
1651   // TODO: is there a way to verify shm size without doing bookkeeping?
1652   if (::shmdt(addr) != 0) {
1653     trcVerbose("error (%d).", errno);
1654   } else {
1655     trcVerbose("ok.");
1656     rc = true;
1657   }
1658   return rc;
1659 }
1660 
1661 static bool uncommit_shmated_memory(char* addr, size_t size) {
1662   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1663     p2i(addr), p2i(addr + size - 1));
1664 
1665   const bool rc = my_disclaim64(addr, size);
1666 
1667   if (!rc) {
1668     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", p2i(addr), size);
1669     return false;
1670   }
1671   return true;
1672 }
1673 
1674 ////////////////////////////////  mmap-based routines /////////////////////////////////
1675 
1676 // Reserve memory via mmap.
1677 // If <requested_addr> is given, an attempt is made to attach at the given address.
1678 // Failing that, memory is allocated at any address.
1679 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
1680   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT "...",
1681     bytes, p2i(requested_addr));
1682 
1683   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
1684     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", p2i(requested_addr));
1685     return nullptr;
1686   }
1687 
1688   // We must prevent anyone from attaching too close to the
1689   // BRK because that may cause malloc OOM.
1690   if (requested_addr != nullptr && is_close_to_brk((address)requested_addr)) {
1691     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment.", p2i(requested_addr));
1692     // Since we treat an attach to the wrong address as an error later anyway,
1693     // we return null here
1694     return nullptr;
1695   }
1696 
1697   // In 64K mode, we lie and claim the global page size (os::vm_page_size()) is 64K
1698   //  (complicated story). This mostly works just fine since 64K is a multiple of the
1699   //  actual 4K lowest page size. Only at a few seams light shines thru, e.g. when
1700   //  calling mmap. mmap will return memory aligned to the lowest pages size - 4K -
1701   //  so we must make sure - transparently - that the caller only ever sees 64K
1702   //  aligned mapping start addresses.
1703   const size_t alignment = os::vm_page_size();
1704 
1705   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
1706   const size_t size = align_up(bytes, os::vm_page_size());
1707 
1708   // alignment: Allocate memory large enough to include an aligned range of the right size and
1709   // cut off the leading and trailing waste pages.
1710   assert0(alignment != 0 && is_aligned_to(alignment, os::vm_page_size())); // see above
1711   const size_t extra_size = size + alignment;
1712 
1713   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
1714   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
1715   int flags = MAP_ANONYMOUS | MAP_SHARED;
1716 
1717   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
1718   // it means if wishaddress is given but MAP_FIXED is not set.
1719   //
1720   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
1721   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
1722   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
1723   // get clobbered.
1724   if (requested_addr != nullptr) {
1725     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
1726       flags |= MAP_FIXED;
1727     }
1728   }
1729 
1730   char* addr = (char*)::mmap(requested_addr, extra_size,
1731       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
1732 
1733   if (addr == MAP_FAILED) {
1734     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", p2i(requested_addr), size, errno);
1735     return nullptr;
1736   } else if (requested_addr != nullptr && addr != requested_addr) {
1737     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) succeeded, but at a different address than requested (" PTR_FORMAT "), will unmap",
1738                p2i(requested_addr), size, p2i(addr));
1739     ::munmap(addr, extra_size);
1740     return nullptr;
1741   }
1742 
1743   // Handle alignment.
1744   char* const addr_aligned = align_up(addr, alignment);
1745   const size_t waste_pre = addr_aligned - addr;
1746   char* const addr_aligned_end = addr_aligned + size;
1747   const size_t waste_post = extra_size - waste_pre - size;
1748   if (waste_pre > 0) {
1749     ::munmap(addr, waste_pre);
1750   }
1751   if (waste_post > 0) {
1752     ::munmap(addr_aligned_end, waste_post);
1753   }
1754   addr = addr_aligned;
1755 
1756   trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
1757     p2i(addr), p2i(addr + bytes), bytes);
1758 
1759   // bookkeeping
1760   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
1761 
1762   // Test alignment, see above.
1763   assert0(is_aligned_to(addr, os::vm_page_size()));
1764 
1765   return addr;
1766 }
1767 
1768 static bool release_mmaped_memory(char* addr, size_t size) {
1769   assert0(is_aligned_to(addr, os::vm_page_size()));
1770   assert0(is_aligned_to(size, os::vm_page_size()));
1771 
1772   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1773     p2i(addr), p2i(addr + size - 1));
1774   bool rc = false;
1775 
1776   if (::munmap(addr, size) != 0) {
1777     trcVerbose("failed (%d)\n", errno);
1778     rc = false;
1779   } else {
1780     trcVerbose("ok.");
1781     rc = true;
1782   }
1783 
1784   return rc;
1785 }
1786 
1787 static bool uncommit_mmaped_memory(char* addr, size_t size) {
1788 
1789   assert0(is_aligned_to(addr, os::vm_page_size()));
1790   assert0(is_aligned_to(size, os::vm_page_size()));
1791 
1792   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1793     p2i(addr), p2i(addr + size - 1));
1794   bool rc = false;
1795 
1796   // Uncommit mmap memory with msync MS_INVALIDATE.
1797   if (::msync(addr, size, MS_INVALIDATE) != 0) {
1798     trcVerbose("failed (%d)\n", errno);
1799     rc = false;
1800   } else {
1801     trcVerbose("ok.");
1802     rc = true;
1803   }
1804 
1805   return rc;
1806 }
1807 
1808 #ifdef PRODUCT
1809 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1810                                     int err) {
1811   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
1812           ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
1813           os::errno_name(err), err);
1814 }
1815 #endif
1816 
1817 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
1818                                   const char* mesg) {
1819   assert(mesg != nullptr, "mesg must be specified");
1820   if (!pd_commit_memory(addr, size, exec)) {
1821     // Add extra info in product mode for vm_exit_out_of_memory():
1822     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
1823     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
1824   }
1825 }
1826 
1827 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1828 
1829   assert(is_aligned_to(addr, os::vm_page_size()),
1830     "addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1831     p2i(addr), os::vm_page_size());
1832   assert(is_aligned_to(size, os::vm_page_size()),
1833     "size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1834     size, os::vm_page_size());
1835 
1836   vmembk_t* const vmi = vmembk_find(addr);
1837   guarantee0(vmi);
1838   vmi->assert_is_valid_subrange(addr, size);
1839 
1840   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", p2i(addr), p2i(addr + size - 1));
1841 
1842   if (UseExplicitCommit) {
1843     // AIX commits memory on touch. So, touch all pages to be committed.
1844     for (char* p = addr; p < (addr + size); p += 4*K) {
1845       *p = '\0';
1846     }
1847   }
1848 
1849   return true;
1850 }
1851 
1852 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
1853   return pd_commit_memory(addr, size, exec);
1854 }
1855 
1856 void os::pd_commit_memory_or_exit(char* addr, size_t size,
1857                                   size_t alignment_hint, bool exec,
1858                                   const char* mesg) {
1859   // Alignment_hint is ignored on this OS.
1860   pd_commit_memory_or_exit(addr, size, exec, mesg);
1861 }
1862 
1863 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
1864   assert(is_aligned_to(addr, os::vm_page_size()),
1865     "addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1866     p2i(addr), os::vm_page_size());
1867   assert(is_aligned_to(size, os::vm_page_size()),
1868     "size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1869     size, os::vm_page_size());
1870 
1871   // Dynamically do different things for mmap/shmat.
1872   const vmembk_t* const vmi = vmembk_find(addr);
1873   guarantee0(vmi);
1874   vmi->assert_is_valid_subrange(addr, size);
1875 
1876   if (vmi->type == VMEM_SHMATED) {
1877     return uncommit_shmated_memory(addr, size);
1878   } else {
1879     return uncommit_mmaped_memory(addr, size);
1880   }
1881 }
1882 
1883 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
1884   // Do not call this; no need to commit stack pages on AIX.
1885   ShouldNotReachHere();
1886   return true;
1887 }
1888 
1889 bool os::remove_stack_guard_pages(char* addr, size_t size) {
1890   // Do not call this; no need to commit stack pages on AIX.
1891   ShouldNotReachHere();
1892   return true;
1893 }
1894 
1895 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
1896 }
1897 
1898 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
1899 }
1900 
1901 void os::numa_make_global(char *addr, size_t bytes) {
1902 }
1903 
1904 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
1905 }
1906 
1907 bool os::numa_topology_changed() {
1908   return false;
1909 }
1910 
1911 size_t os::numa_get_groups_num() {
1912   return 1;
1913 }
1914 
1915 int os::numa_get_group_id() {
1916   return 0;
1917 }
1918 
1919 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
1920   if (size > 0) {
1921     ids[0] = 0;
1922     return 1;
1923   }
1924   return 0;
1925 }
1926 
1927 int os::numa_get_group_id_for_address(const void* address) {
1928   return 0;
1929 }
1930 
1931 bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, size_t count) {
1932   return false;
1933 }
1934 
1935 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
1936   return end;
1937 }
1938 
1939 // Reserves and attaches a shared memory segment.
1940 char* os::pd_reserve_memory(size_t bytes, bool exec) {
1941   // Always round to os::vm_page_size(), which may be larger than 4K.
1942   bytes = align_up(bytes, os::vm_page_size());
1943 
1944   // In 4K mode always use mmap.
1945   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
1946   if (os::vm_page_size() == 4*K) {
1947     return reserve_mmaped_memory(bytes, nullptr /* requested_addr */);
1948   } else {
1949     if (bytes >= Use64KPagesThreshold) {
1950       return reserve_shmated_memory(bytes, nullptr /* requested_addr */);
1951     } else {
1952       return reserve_mmaped_memory(bytes, nullptr /* requested_addr */);
1953     }
1954   }
1955 }
1956 
1957 bool os::pd_release_memory(char* addr, size_t size) {
1958 
1959   // Dynamically do different things for mmap/shmat.
1960   vmembk_t* const vmi = vmembk_find(addr);
1961   guarantee0(vmi);
1962   vmi->assert_is_valid_subrange(addr, size);
1963 
1964   // Always round to os::vm_page_size(), which may be larger than 4K.
1965   size = align_up(size, os::vm_page_size());
1966   addr = align_up(addr, os::vm_page_size());
1967 
1968   bool rc = false;
1969   bool remove_bookkeeping = false;
1970   if (vmi->type == VMEM_SHMATED) {
1971     // For shmatted memory, we do:
1972     // - If user wants to release the whole range, release the memory (shmdt).
1973     // - If user only wants to release a partial range, uncommit (disclaim) that
1974     //   range. That way, at least, we do not use memory anymore (bust still page
1975     //   table space).
1976     if (addr == vmi->addr && size == vmi->size) {
1977       rc = release_shmated_memory(addr, size);
1978       remove_bookkeeping = true;
1979     } else {
1980       rc = uncommit_shmated_memory(addr, size);
1981     }
1982   } else {
1983     // In mmap-mode:
1984     //  - If the user wants to release the full range, we do that and remove the mapping.
1985     //  - If the user wants to release part of the range, we release that part, but need
1986     //    to adjust bookkeeping.
1987     assert(is_aligned(size, 4 * K), "Sanity");
1988     rc = release_mmaped_memory(addr, size);
1989     if (addr == vmi->addr && size == vmi->size) {
1990       remove_bookkeeping = true;
1991     } else {
1992       if (addr == vmi->addr && size < vmi->size) {
1993         // Chopped from head
1994         vmi->addr += size;
1995         vmi->size -= size;
1996       } else if (addr + size == vmi->addr + vmi->size) {
1997         // Chopped from tail
1998         vmi->size -= size;
1999       } else {
2000         // releasing a mapping in the middle of the original mapping:
2001         // For now we forbid this, since this is an invalid scenario
2002         // (the bookkeeping is easy enough to fix if needed but there
2003         //  is no use case for it; any occurrence is likely an error.
2004         ShouldNotReachHere();
2005       }
2006     }
2007   }
2008 
2009   // update bookkeeping
2010   if (rc && remove_bookkeeping) {
2011     vmembk_remove(vmi);
2012   }
2013 
2014   return rc;
2015 }
2016 
2017 static bool checked_mprotect(char* addr, size_t size, int prot) {
2018 
2019   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2020   // not tell me if protection failed when trying to protect an un-protectable range.
2021   //
2022   // This means if the memory was allocated using shmget/shmat, protection won't work
2023   // but mprotect will still return 0:
2024   //
2025   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2026 
2027   Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
2028   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2029 
2030   if (!rc) {
2031     const char* const s_errno = os::errno_name(errno);
2032     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2033     return false;
2034   }
2035 
2036   // mprotect success check
2037   //
2038   // Mprotect said it changed the protection but can I believe it?
2039   //
2040   // To be sure I need to check the protection afterwards. Try to
2041   // read from protected memory and check whether that causes a segfault.
2042   //
2043   if (!os::Aix::xpg_sus_mode()) {
2044 
2045     const bool read_protected =
2046       (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2047        SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2048 
2049     if (prot & PROT_READ) {
2050       rc = !read_protected;
2051     } else {
2052       rc = read_protected;
2053     }
2054 
2055     if (!rc) {
2056       if (os::Aix::on_pase()) {
2057         // There is an issue on older PASE systems where mprotect() will return success but the
2058         // memory will not be protected.
2059         // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2060         // machines; we only see it rarely, when using mprotect() to protect the guard page of
2061         // a stack. It is an OS error.
2062         //
2063         // A valid strategy is just to try again. This usually works. :-/
2064 
2065         ::usleep(1000);
2066         Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
2067         if (::mprotect(addr, size, prot) == 0) {
2068           const bool read_protected_2 =
2069             (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2070             SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2071           rc = true;
2072         }
2073       }
2074     }
2075   }
2076 
2077   assert(rc == true, "mprotect failed.");
2078 
2079   return rc;
2080 }
2081 
2082 // Set protections specified
2083 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2084   unsigned int p = 0;
2085   switch (prot) {
2086   case MEM_PROT_NONE: p = PROT_NONE; break;
2087   case MEM_PROT_READ: p = PROT_READ; break;
2088   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2089   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2090   default:
2091     ShouldNotReachHere();
2092   }
2093   // is_committed is unused.
2094   return checked_mprotect(addr, size, p);
2095 }
2096 
2097 bool os::guard_memory(char* addr, size_t size) {
2098   return checked_mprotect(addr, size, PROT_NONE);
2099 }
2100 
2101 bool os::unguard_memory(char* addr, size_t size) {
2102   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2103 }
2104 
2105 // Large page support
2106 
2107 static size_t _large_page_size = 0;
2108 
2109 // Enable large page support if OS allows that.
2110 void os::large_page_init() {
2111   return; // Nothing to do. See query_multipage_support and friends.
2112 }
2113 
2114 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
2115   fatal("os::reserve_memory_special should not be called on AIX.");
2116   return nullptr;
2117 }
2118 
2119 bool os::pd_release_memory_special(char* base, size_t bytes) {
2120   fatal("os::release_memory_special should not be called on AIX.");
2121   return false;
2122 }
2123 
2124 size_t os::large_page_size() {
2125   return _large_page_size;
2126 }
2127 
2128 bool os::can_commit_large_page_memory() {
2129   // Does not matter, we do not support huge pages.
2130   return false;
2131 }
2132 
2133 bool os::can_execute_large_page_memory() {
2134   // Does not matter, we do not support huge pages.
2135   return false;
2136 }
2137 
2138 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
2139   assert(file_desc >= 0, "file_desc is not valid");
2140   char* result = nullptr;
2141 
2142   // Always round to os::vm_page_size(), which may be larger than 4K.
2143   bytes = align_up(bytes, os::vm_page_size());
2144   result = reserve_mmaped_memory(bytes, requested_addr);
2145 
2146   if (result != nullptr) {
2147     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
2148       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2149     }
2150   }
2151   return result;
2152 }
2153 
2154 // Reserve memory at an arbitrary address, only if that area is
2155 // available (and not reserved for something else).
2156 char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
2157   char* addr = nullptr;
2158 
2159   // Always round to os::vm_page_size(), which may be larger than 4K.
2160   bytes = align_up(bytes, os::vm_page_size());
2161 
2162   // In 4K mode always use mmap.
2163   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2164   if (os::vm_page_size() == 4*K) {
2165     return reserve_mmaped_memory(bytes, requested_addr);
2166   } else {
2167     if (bytes >= Use64KPagesThreshold) {
2168       return reserve_shmated_memory(bytes, requested_addr);
2169     } else {
2170       return reserve_mmaped_memory(bytes, requested_addr);
2171     }
2172   }
2173 
2174   return addr;
2175 }
2176 
2177 // Used to convert frequent JVM_Yield() to nops
2178 bool os::dont_yield() {
2179   return DontYieldALot;
2180 }
2181 
2182 void os::naked_yield() {
2183   sched_yield();
2184 }
2185 
2186 ////////////////////////////////////////////////////////////////////////////////
2187 // thread priority support
2188 
2189 // From AIX manpage to pthread_setschedparam
2190 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2191 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2192 //
2193 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2194 // range from 40 to 80, where 40 is the least favored priority and 80
2195 // is the most favored."
2196 //
2197 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2198 // scheduling there; however, this still leaves iSeries.)
2199 //
2200 // We use the same values for AIX and PASE.
2201 int os::java_to_os_priority[CriticalPriority + 1] = {
2202   54,             // 0 Entry should never be used
2203 
2204   55,             // 1 MinPriority
2205   55,             // 2
2206   56,             // 3
2207 
2208   56,             // 4
2209   57,             // 5 NormPriority
2210   57,             // 6
2211 
2212   58,             // 7
2213   58,             // 8
2214   59,             // 9 NearMaxPriority
2215 
2216   60,             // 10 MaxPriority
2217 
2218   60              // 11 CriticalPriority
2219 };
2220 
2221 static int prio_init() {
2222   if (ThreadPriorityPolicy == 1) {
2223     if (geteuid() != 0) {
2224       if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {
2225         warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
2226                 "e.g., being the root user. If the necessary permission is not " \
2227                 "possessed, changes to priority will be silently ignored.");
2228       }
2229     }
2230   }
2231   if (UseCriticalJavaThreadPriority) {
2232     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
2233   }
2234   return 0;
2235 }
2236 
2237 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2238   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
2239   pthread_t thr = thread->osthread()->pthread_id();
2240   int policy = SCHED_OTHER;
2241   struct sched_param param;
2242   param.sched_priority = newpri;
2243   int ret = pthread_setschedparam(thr, policy, &param);
2244 
2245   if (ret != 0) {
2246     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2247         (int)thr, newpri, ret, os::errno_name(ret));
2248   }
2249   return (ret == 0) ? OS_OK : OS_ERR;
2250 }
2251 
2252 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2253   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
2254     *priority_ptr = java_to_os_priority[NormPriority];
2255     return OS_OK;
2256   }
2257   pthread_t thr = thread->osthread()->pthread_id();
2258   int policy = SCHED_OTHER;
2259   struct sched_param param;
2260   int ret = pthread_getschedparam(thr, &policy, &param);
2261   *priority_ptr = param.sched_priority;
2262 
2263   return (ret == 0) ? OS_OK : OS_ERR;
2264 }
2265 
2266 // To install functions for atexit system call
2267 extern "C" {
2268   static void perfMemory_exit_helper() {
2269     perfMemory_exit();
2270   }
2271 }
2272 
2273 static void set_page_size(size_t page_size) {
2274   OSInfo::set_vm_page_size(page_size);
2275   OSInfo::set_vm_allocation_granularity(page_size);
2276 }
2277 
2278 // This is called _before_ the most of global arguments have been parsed.
2279 void os::init(void) {
2280   // This is basic, we want to know if that ever changes.
2281   // (Shared memory boundary is supposed to be a 256M aligned.)
2282   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
2283 
2284   // Record process break at startup.
2285   g_brk_at_startup = (address) ::sbrk(0);
2286   assert(g_brk_at_startup != (address) -1, "sbrk failed");
2287 
2288   // First off, we need to know whether we run on AIX or PASE, and
2289   // the OS level we run on.
2290   os::Aix::initialize_os_info();
2291 
2292   // Scan environment (SPEC1170 behaviour, etc).
2293   os::Aix::scan_environment();
2294 
2295   // Probe multipage support.
2296   query_multipage_support();
2297 
2298   // Act like we only have one page size by eliminating corner cases which
2299   // we did not support very well anyway.
2300   // We have two input conditions:
2301   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
2302   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
2303   //    setting.
2304   //    Data segment page size is important for us because it defines the thread stack page
2305   //    size, which is needed for guard page handling, stack banging etc.
2306   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
2307   //    and should be allocated with 64k pages.
2308   //
2309   // So, we do the following:
2310   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
2311   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
2312   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
2313   // 64k          no              --- AIX 5.2 ? ---
2314   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
2315 
2316   // We explicitly leave no option to change page size, because only upgrading would work,
2317   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
2318 
2319   if (g_multipage_support.datapsize == 4*K) {
2320     // datapsize = 4K. Data segment, thread stacks are 4K paged.
2321     if (g_multipage_support.can_use_64K_pages) {
2322       // .. but we are able to use 64K pages dynamically.
2323       // This would be typical for java launchers which are not linked
2324       // with datapsize=64K (like, any other launcher but our own).
2325       //
2326       // In this case it would be smart to allocate the java heap with 64K
2327       // to get the performance benefit, and to fake 64k pages for the
2328       // data segment (when dealing with thread stacks).
2329       //
2330       // However, leave a possibility to downgrade to 4K, using
2331       // -XX:-Use64KPages.
2332       if (Use64KPages) {
2333         trcVerbose("64K page mode (faked for data segment)");
2334         set_page_size(64*K);
2335       } else {
2336         trcVerbose("4K page mode (Use64KPages=off)");
2337         set_page_size(4*K);
2338       }
2339     } else {
2340       // .. and not able to allocate 64k pages dynamically. Here, just
2341       // fall back to 4K paged mode and use mmap for everything.
2342       trcVerbose("4K page mode");
2343       set_page_size(4*K);
2344       FLAG_SET_ERGO(Use64KPages, false);
2345     }
2346   } else {
2347     // datapsize = 64k. Data segment, thread stacks are 64k paged.
2348     // This normally means that we can allocate 64k pages dynamically.
2349     // (There is one special case where this may be false: EXTSHM=on.
2350     // but we decided to not support that mode).
2351     assert0(g_multipage_support.can_use_64K_pages);
2352     set_page_size(64*K);
2353     trcVerbose("64K page mode");
2354     FLAG_SET_ERGO(Use64KPages, true);
2355   }
2356 
2357   // For now UseLargePages is just ignored.
2358   FLAG_SET_ERGO(UseLargePages, false);
2359   _page_sizes.add(os::vm_page_size());
2360 
2361   // debug trace
2362   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
2363 
2364   // Next, we need to initialize libo4 and libperfstat libraries.
2365   if (os::Aix::on_pase()) {
2366     os::Aix::initialize_libo4();
2367   } else {
2368     os::Aix::initialize_libperfstat();
2369   }
2370 
2371   // Reset the perfstat information provided by ODM.
2372   if (os::Aix::on_aix()) {
2373     libperfstat::perfstat_reset();
2374   }
2375 
2376   // Now initialize basic system properties. Note that for some of the values we
2377   // need libperfstat etc.
2378   os::Aix::initialize_system_info();
2379 
2380   // _main_thread points to the thread that created/loaded the JVM.
2381   Aix::_main_thread = pthread_self();
2382 
2383   os::Posix::init();
2384 }
2385 
2386 // This is called _after_ the global arguments have been parsed.
2387 jint os::init_2(void) {
2388 
2389   // This could be set after os::Posix::init() but all platforms
2390   // have to set it the same so we have to mirror Solaris.
2391   DEBUG_ONLY(os::set_mutex_init_done();)
2392 
2393   os::Posix::init_2();
2394 
2395   if (os::Aix::on_pase()) {
2396     trcVerbose("Running on PASE.");
2397   } else {
2398     trcVerbose("Running on AIX (not PASE).");
2399   }
2400 
2401   trcVerbose("processor count: %d", os::_processor_count);
2402   trcVerbose("physical memory: %lu", Aix::_physical_memory);
2403 
2404   // Initially build up the loaded dll map.
2405   LoadedLibraries::reload();
2406   if (Verbose) {
2407     trcVerbose("Loaded Libraries: ");
2408     LoadedLibraries::print(tty);
2409   }
2410 
2411   if (PosixSignals::init() == JNI_ERR) {
2412     return JNI_ERR;
2413   }
2414 
2415   // Check and sets minimum stack sizes against command line options
2416   if (set_minimum_stack_sizes() == JNI_ERR) {
2417     return JNI_ERR;
2418   }
2419 
2420   // Not supported.
2421   FLAG_SET_ERGO(UseNUMA, false);
2422   FLAG_SET_ERGO(UseNUMAInterleaving, false);
2423 
2424   if (MaxFDLimit) {
2425     // Set the number of file descriptors to max. print out error
2426     // if getrlimit/setrlimit fails but continue regardless.
2427     struct rlimit nbr_files;
2428     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
2429     if (status != 0) {
2430       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
2431     } else {
2432       nbr_files.rlim_cur = nbr_files.rlim_max;
2433       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
2434       if (status != 0) {
2435         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
2436       }
2437     }
2438   }
2439 
2440   if (PerfAllowAtExitRegistration) {
2441     // Only register atexit functions if PerfAllowAtExitRegistration is set.
2442     // At exit functions can be delayed until process exit time, which
2443     // can be problematic for embedded VM situations. Embedded VMs should
2444     // call DestroyJavaVM() to assure that VM resources are released.
2445 
2446     // Note: perfMemory_exit_helper atexit function may be removed in
2447     // the future if the appropriate cleanup code can be added to the
2448     // VM_Exit VMOperation's doit method.
2449     if (atexit(perfMemory_exit_helper) != 0) {
2450       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
2451     }
2452   }
2453 
2454   // initialize thread priority policy
2455   prio_init();
2456 
2457   return JNI_OK;
2458 }
2459 
2460 int os::active_processor_count() {
2461   // User has overridden the number of active processors
2462   if (ActiveProcessorCount > 0) {
2463     log_trace(os)("active_processor_count: "
2464                   "active processor count set by user : %d",
2465                   ActiveProcessorCount);
2466     return ActiveProcessorCount;
2467   }
2468 
2469   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
2470   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
2471   return online_cpus;
2472 }
2473 
2474 void os::set_native_thread_name(const char *name) {
2475   // Not yet implemented.
2476   return;
2477 }
2478 
2479 ////////////////////////////////////////////////////////////////////////////////
2480 // debug support
2481 
2482 bool os::find(address addr, outputStream* st) {
2483 
2484   st->print(PTR_FORMAT ": ", addr);
2485 
2486   loaded_module_t lm;
2487   if (LoadedLibraries::find_for_text_address(addr, &lm) ||
2488       LoadedLibraries::find_for_data_address(addr, &lm)) {
2489     st->print_cr("%s", lm.path);
2490     return true;
2491   }
2492 
2493   return false;
2494 }
2495 
2496 ////////////////////////////////////////////////////////////////////////////////
2497 // misc
2498 
2499 // This does not do anything on Aix. This is basically a hook for being
2500 // able to use structured exception handling (thread-local exception filters)
2501 // on, e.g., Win32.
2502 void
2503 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
2504                          JavaCallArguments* args, JavaThread* thread) {
2505   f(value, method, args, thread);
2506 }
2507 
2508 // This code originates from JDK's sysOpen and open64_w
2509 // from src/solaris/hpi/src/system_md.c
2510 
2511 int os::open(const char *path, int oflag, int mode) {
2512 
2513   if (strlen(path) > MAX_PATH - 1) {
2514     errno = ENAMETOOLONG;
2515     return -1;
2516   }
2517   // AIX 7.X now supports O_CLOEXEC too, like modern Linux; but we have to be careful, see
2518   // IV90804: OPENING A FILE IN AFS WITH O_CLOEXEC FAILS WITH AN EINVAL ERROR APPLIES TO AIX 7100-04 17/04/14 PTF PECHANGE
2519   int oflag_with_o_cloexec = oflag | O_CLOEXEC;
2520 
2521   int fd = ::open(path, oflag_with_o_cloexec, mode);
2522   if (fd == -1) {
2523     // we might fail in the open call when O_CLOEXEC is set, so try again without (see IV90804)
2524     fd = ::open(path, oflag, mode);
2525     if (fd == -1) {
2526       return -1;
2527     }
2528   }
2529 
2530   // If the open succeeded, the file might still be a directory.
2531   {
2532     struct stat buf64;
2533     int ret = ::fstat(fd, &buf64);
2534     int st_mode = buf64.st_mode;
2535 
2536     if (ret != -1) {
2537       if ((st_mode & S_IFMT) == S_IFDIR) {
2538         errno = EISDIR;
2539         ::close(fd);
2540         return -1;
2541       }
2542     } else {
2543       ::close(fd);
2544       return -1;
2545     }
2546   }
2547 
2548   // All file descriptors that are opened in the JVM and not
2549   // specifically destined for a subprocess should have the
2550   // close-on-exec flag set. If we don't set it, then careless 3rd
2551   // party native code might fork and exec without closing all
2552   // appropriate file descriptors (e.g. as we do in closeDescriptors in
2553   // UNIXProcess.c), and this in turn might:
2554   //
2555   // - cause end-of-file to fail to be detected on some file
2556   //   descriptors, resulting in mysterious hangs, or
2557   //
2558   // - might cause an fopen in the subprocess to fail on a system
2559   //   suffering from bug 1085341.
2560 
2561   // Validate that the use of the O_CLOEXEC flag on open above worked.
2562   static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
2563   if (O_CLOEXEC_is_known_to_work == 0) {
2564     int flags = ::fcntl(fd, F_GETFD);
2565     if (flags != -1) {
2566       if ((flags & FD_CLOEXEC) != 0) {
2567         O_CLOEXEC_is_known_to_work = 1;
2568       } else { // it does not work
2569         ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
2570         O_CLOEXEC_is_known_to_work = -1;
2571       }
2572     }
2573   } else if (O_CLOEXEC_is_known_to_work == -1) {
2574     int flags = ::fcntl(fd, F_GETFD);
2575     if (flags != -1) {
2576       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
2577     }
2578   }
2579 
2580   return fd;
2581 }
2582 
2583 // create binary file, rewriting existing file if required
2584 int os::create_binary_file(const char* path, bool rewrite_existing) {
2585   int oflags = O_WRONLY | O_CREAT;
2586   oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
2587   return ::open(path, oflags, S_IREAD | S_IWRITE);
2588 }
2589 
2590 // return current position of file pointer
2591 jlong os::current_file_offset(int fd) {
2592   return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
2593 }
2594 
2595 // move file pointer to the specified offset
2596 jlong os::seek_to_file_offset(int fd, jlong offset) {
2597   return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
2598 }
2599 
2600 // Map a block of memory.
2601 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
2602                         char *addr, size_t bytes, bool read_only,
2603                         bool allow_exec) {
2604   int prot;
2605   int flags = MAP_PRIVATE;
2606 
2607   if (read_only) {
2608     prot = PROT_READ;
2609     flags = MAP_SHARED;
2610   } else {
2611     prot = PROT_READ | PROT_WRITE;
2612     flags = MAP_PRIVATE;
2613   }
2614 
2615   if (allow_exec) {
2616     prot |= PROT_EXEC;
2617   }
2618 
2619   if (addr != nullptr) {
2620     flags |= MAP_FIXED;
2621   }
2622 
2623   // Allow anonymous mappings if 'fd' is -1.
2624   if (fd == -1) {
2625     flags |= MAP_ANONYMOUS;
2626   }
2627 
2628   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
2629                                      fd, file_offset);
2630   if (mapped_address == MAP_FAILED) {
2631     return nullptr;
2632   }
2633   return mapped_address;
2634 }
2635 
2636 // Remap a block of memory.
2637 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
2638                           char *addr, size_t bytes, bool read_only,
2639                           bool allow_exec) {
2640   // same as map_memory() on this OS
2641   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
2642                         allow_exec);
2643 }
2644 
2645 // Unmap a block of memory.
2646 bool os::pd_unmap_memory(char* addr, size_t bytes) {
2647   return munmap(addr, bytes) == 0;
2648 }
2649 
2650 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
2651 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
2652 // of a thread.
2653 //
2654 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
2655 // the fast estimate available on the platform.
2656 
2657 jlong os::current_thread_cpu_time() {
2658   // return user + sys since the cost is the same
2659   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
2660   assert(n >= 0, "negative CPU time");
2661   return n;
2662 }
2663 
2664 jlong os::thread_cpu_time(Thread* thread) {
2665   // consistent with what current_thread_cpu_time() returns
2666   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
2667   assert(n >= 0, "negative CPU time");
2668   return n;
2669 }
2670 
2671 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
2672   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
2673   assert(n >= 0, "negative CPU time");
2674   return n;
2675 }
2676 
2677 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
2678   bool error = false;
2679 
2680   jlong sys_time = 0;
2681   jlong user_time = 0;
2682 
2683   // Reimplemented using getthrds64().
2684   //
2685   // Works like this:
2686   // For the thread in question, get the kernel thread id. Then get the
2687   // kernel thread statistics using that id.
2688   //
2689   // This only works of course when no pthread scheduling is used,
2690   // i.e. there is a 1:1 relationship to kernel threads.
2691   // On AIX, see AIXTHREAD_SCOPE variable.
2692 
2693   pthread_t pthtid = thread->osthread()->pthread_id();
2694 
2695   // retrieve kernel thread id for the pthread:
2696   tid64_t tid = 0;
2697   struct __pthrdsinfo pinfo;
2698   // I just love those otherworldly IBM APIs which force me to hand down
2699   // dummy buffers for stuff I dont care for...
2700   char dummy[1];
2701   int dummy_size = sizeof(dummy);
2702   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
2703                           dummy, &dummy_size) == 0) {
2704     tid = pinfo.__pi_tid;
2705   } else {
2706     tty->print_cr("pthread_getthrds_np failed.");
2707     error = true;
2708   }
2709 
2710   // retrieve kernel timing info for that kernel thread
2711   if (!error) {
2712     struct thrdentry64 thrdentry;
2713     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
2714       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
2715       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
2716     } else {
2717       tty->print_cr("pthread_getthrds_np failed.");
2718       error = true;
2719     }
2720   }
2721 
2722   if (p_sys_time) {
2723     *p_sys_time = sys_time;
2724   }
2725 
2726   if (p_user_time) {
2727     *p_user_time = user_time;
2728   }
2729 
2730   if (error) {
2731     return false;
2732   }
2733 
2734   return true;
2735 }
2736 
2737 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
2738   jlong sys_time;
2739   jlong user_time;
2740 
2741   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
2742     return -1;
2743   }
2744 
2745   return user_sys_cpu_time ? sys_time + user_time : user_time;
2746 }
2747 
2748 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
2749   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
2750   info_ptr->may_skip_backward = false;     // elapsed time not wall time
2751   info_ptr->may_skip_forward = false;      // elapsed time not wall time
2752   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
2753 }
2754 
2755 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
2756   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
2757   info_ptr->may_skip_backward = false;     // elapsed time not wall time
2758   info_ptr->may_skip_forward = false;      // elapsed time not wall time
2759   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
2760 }
2761 
2762 bool os::is_thread_cpu_time_supported() {
2763   return true;
2764 }
2765 
2766 // System loadavg support. Returns -1 if load average cannot be obtained.
2767 // For now just return the system wide load average (no processor sets).
2768 int os::loadavg(double values[], int nelem) {
2769 
2770   guarantee(nelem >= 0 && nelem <= 3, "argument error");
2771   guarantee(values, "argument error");
2772 
2773   if (os::Aix::on_pase()) {
2774 
2775     // AS/400 PASE: use libo4 porting library
2776     double v[3] = { 0.0, 0.0, 0.0 };
2777 
2778     if (libo4::get_load_avg(v, v + 1, v + 2)) {
2779       for (int i = 0; i < nelem; i ++) {
2780         values[i] = v[i];
2781       }
2782       return nelem;
2783     } else {
2784       return -1;
2785     }
2786 
2787   } else {
2788 
2789     // AIX: use libperfstat
2790     libperfstat::cpuinfo_t ci;
2791     if (libperfstat::get_cpuinfo(&ci)) {
2792       for (int i = 0; i < nelem; i++) {
2793         values[i] = ci.loadavg[i];
2794       }
2795     } else {
2796       return -1;
2797     }
2798     return nelem;
2799   }
2800 }
2801 
2802 bool os::is_primordial_thread(void) {
2803   if (pthread_self() == (pthread_t)1) {
2804     return true;
2805   } else {
2806     return false;
2807   }
2808 }
2809 
2810 // OS recognitions (PASE/AIX, OS level) call this before calling any
2811 // one of Aix::on_pase(), Aix::os_version() static
2812 void os::Aix::initialize_os_info() {
2813 
2814   assert(_on_pase == -1 && _os_version == 0, "already called.");
2815 
2816   struct utsname uts;
2817   memset(&uts, 0, sizeof(uts));
2818   strcpy(uts.sysname, "?");
2819   if (::uname(&uts) == -1) {
2820     trcVerbose("uname failed (%d)", errno);
2821     guarantee(0, "Could not determine whether we run on AIX or PASE");
2822   } else {
2823     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
2824                "node \"%s\" machine \"%s\"\n",
2825                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
2826     const int major = atoi(uts.version);
2827     assert(major > 0, "invalid OS version");
2828     const int minor = atoi(uts.release);
2829     assert(minor > 0, "invalid OS release");
2830     _os_version = (major << 24) | (minor << 16);
2831     char ver_str[20] = {0};
2832     const char* name_str = "unknown OS";
2833     if (strcmp(uts.sysname, "OS400") == 0) {
2834       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
2835       _on_pase = 1;
2836       if (os_version_short() < 0x0504) {
2837         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
2838         assert(false, "OS/400 release too old.");
2839       }
2840       name_str = "OS/400 (pase)";
2841       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
2842     } else if (strcmp(uts.sysname, "AIX") == 0) {
2843       // We run on AIX. We do not support versions older than AIX 7.1.
2844       _on_pase = 0;
2845       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
2846       odmWrapper::determine_os_kernel_version(&_os_version);
2847       if (os_version_short() < 0x0701) {
2848         trcVerbose("AIX releases older than AIX 7.1 are not supported.");
2849         assert(false, "AIX release too old.");
2850       }
2851       name_str = "AIX";
2852       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
2853                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
2854     } else {
2855       assert(false, "%s", name_str);
2856     }
2857     trcVerbose("We run on %s %s", name_str, ver_str);
2858   }
2859 
2860   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
2861 } // end: os::Aix::initialize_os_info()
2862 
2863 // Scan environment for important settings which might effect the VM.
2864 // Trace out settings. Warn about invalid settings and/or correct them.
2865 //
2866 // Must run after os::Aix::initialue_os_info().
2867 void os::Aix::scan_environment() {
2868 
2869   char* p;
2870   int rc;
2871 
2872   // Warn explicitly if EXTSHM=ON is used. That switch changes how
2873   // System V shared memory behaves. One effect is that page size of
2874   // shared memory cannot be change dynamically, effectivly preventing
2875   // large pages from working.
2876   // This switch was needed on AIX 32bit, but on AIX 64bit the general
2877   // recommendation is (in OSS notes) to switch it off.
2878   p = ::getenv("EXTSHM");
2879   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
2880   if (p && strcasecmp(p, "ON") == 0) {
2881     _extshm = 1;
2882     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
2883     if (!AllowExtshm) {
2884       // We allow under certain conditions the user to continue. However, we want this
2885       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
2886       // that the VM is not able to allocate 64k pages for the heap.
2887       // We do not want to run with reduced performance.
2888       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
2889     }
2890   } else {
2891     _extshm = 0;
2892   }
2893 
2894   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
2895   // Not tested, not supported.
2896   //
2897   // Note that it might be worth the trouble to test and to require it, if only to
2898   // get useful return codes for mprotect.
2899   //
2900   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
2901   // exec() ? before loading the libjvm ? ....)
2902   p = ::getenv("XPG_SUS_ENV");
2903   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
2904   if (p && strcmp(p, "ON") == 0) {
2905     _xpg_sus_mode = 1;
2906     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
2907     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
2908     // clobber address ranges. If we ever want to support that, we have to do some
2909     // testing first.
2910     guarantee(false, "XPG_SUS_ENV=ON not supported");
2911   } else {
2912     _xpg_sus_mode = 0;
2913   }
2914 
2915   if (os::Aix::on_pase()) {
2916     p = ::getenv("QIBM_MULTI_THREADED");
2917     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
2918   }
2919 
2920   p = ::getenv("LDR_CNTRL");
2921   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
2922   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
2923     if (p && ::strstr(p, "TEXTPSIZE")) {
2924       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
2925         "you may experience hangs or crashes on OS/400 V7R1.");
2926     }
2927   }
2928 
2929   p = ::getenv("AIXTHREAD_GUARDPAGES");
2930   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
2931 
2932 } // end: os::Aix::scan_environment()
2933 
2934 // PASE: initialize the libo4 library (PASE porting library).
2935 void os::Aix::initialize_libo4() {
2936   guarantee(os::Aix::on_pase(), "OS/400 only.");
2937   if (!libo4::init()) {
2938     trcVerbose("libo4 initialization failed.");
2939     assert(false, "libo4 initialization failed");
2940   } else {
2941     trcVerbose("libo4 initialized.");
2942   }
2943 }
2944 
2945 // AIX: initialize the libperfstat library.
2946 void os::Aix::initialize_libperfstat() {
2947   assert(os::Aix::on_aix(), "AIX only");
2948   if (!libperfstat::init()) {
2949     trcVerbose("libperfstat initialization failed.");
2950     assert(false, "libperfstat initialization failed");
2951   } else {
2952     trcVerbose("libperfstat initialized.");
2953   }
2954 }
2955 
2956 /////////////////////////////////////////////////////////////////////////////
2957 // thread stack
2958 
2959 // Get the current stack base from the OS (actually, the pthread library).
2960 // Note: usually not page aligned.
2961 address os::current_stack_base() {
2962   AixMisc::stackbounds_t bounds;
2963   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
2964   guarantee(rc, "Unable to retrieve stack bounds.");
2965   return bounds.base;
2966 }
2967 
2968 // Get the current stack size from the OS (actually, the pthread library).
2969 // Returned size is such that (base - size) is always aligned to page size.
2970 size_t os::current_stack_size() {
2971   AixMisc::stackbounds_t bounds;
2972   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
2973   guarantee(rc, "Unable to retrieve stack bounds.");
2974   // Align the returned stack size such that the stack low address
2975   // is aligned to page size (Note: base is usually not and we do not care).
2976   // We need to do this because caller code will assume stack low address is
2977   // page aligned and will place guard pages without checking.
2978   address low = bounds.base - bounds.size;
2979   address low_aligned = (address)align_up(low, os::vm_page_size());
2980   size_t s = bounds.base - low_aligned;
2981   return s;
2982 }
2983 
2984 // Get the default path to the core file
2985 // Returns the length of the string
2986 int os::get_core_path(char* buffer, size_t bufferSize) {
2987   const char* p = get_current_directory(buffer, bufferSize);
2988 
2989   if (p == nullptr) {
2990     assert(p != nullptr, "failed to get current directory");
2991     return 0;
2992   }
2993 
2994   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
2995                                                p, current_process_id());
2996 
2997   return strlen(buffer);
2998 }
2999 
3000 bool os::start_debugging(char *buf, int buflen) {
3001   int len = (int)strlen(buf);
3002   char *p = &buf[len];
3003 
3004   jio_snprintf(p, buflen -len,
3005                  "\n\n"
3006                  "Do you want to debug the problem?\n\n"
3007                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
3008                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
3009                  "Otherwise, press RETURN to abort...",
3010                  os::current_process_id(),
3011                  os::current_thread_id(), thread_self());
3012 
3013   bool yes = os::message_box("Unexpected Error", buf);
3014 
3015   if (yes) {
3016     // yes, user asked VM to launch debugger
3017     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
3018 
3019     os::fork_and_exec(buf);
3020     yes = false;
3021   }
3022   return yes;
3023 }
3024 
3025 static inline time_t get_mtime(const char* filename) {
3026   struct stat st;
3027   int ret = os::stat(filename, &st);
3028   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
3029   return st.st_mtime;
3030 }
3031 
3032 int os::compare_file_modified_times(const char* file1, const char* file2) {
3033   time_t t1 = get_mtime(file1);
3034   time_t t2 = get_mtime(file2);
3035   return t1 - t2;
3036 }
3037 
3038 bool os::supports_map_sync() {
3039   return false;
3040 }
3041 
3042 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {}
3043 
3044 #if INCLUDE_JFR
3045 
3046 void os::jfr_report_memory_info() {}
3047 
3048 #endif // INCLUDE_JFR
3049