1 /*
   2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "jvm.h"
  37 #include "jvmtifiles/jvmti.h"
  38 #include "libo4.hpp"
  39 #include "libperfstat_aix.hpp"
  40 #include "libodm_aix.hpp"
  41 #include "loadlib_aix.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "misc_aix.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "os_aix.inline.hpp"
  48 #include "os_posix.hpp"
  49 #include "porting_aix.hpp"
  50 #include "prims/jniFastGetField.hpp"
  51 #include "prims/jvm_misc.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/globals_extension.hpp"
  56 #include "runtime/interfaceSupport.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/javaThread.hpp"
  60 #include "runtime/mutexLocker.hpp"
  61 #include "runtime/objectMonitor.hpp"
  62 #include "runtime/os.hpp"
  63 #include "runtime/osInfo.hpp"
  64 #include "runtime/osThread.hpp"
  65 #include "runtime/perfMemory.hpp"
  66 #include "runtime/safefetch.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/threadCritical.hpp"
  70 #include "runtime/threads.hpp"
  71 #include "runtime/timer.hpp"
  72 #include "runtime/vm_version.hpp"
  73 #include "services/attachListener.hpp"
  74 #include "services/runtimeService.hpp"
  75 #include "signals_posix.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/decoder.hpp"
  78 #include "utilities/defaultStream.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/growableArray.hpp"
  81 #include "utilities/vmError.hpp"
  82 
  83 // put OS-includes here (sorted alphabetically)
  84 #ifdef AIX_XLC_GE_17
  85 #include <alloca.h>
  86 #endif
  87 #include <errno.h>
  88 #include <fcntl.h>
  89 #include <inttypes.h>
  90 #include <poll.h>
  91 #include <procinfo.h>
  92 #include <pthread.h>
  93 #include <pwd.h>
  94 #include <semaphore.h>
  95 #include <signal.h>
  96 #include <stdint.h>
  97 #include <stdio.h>
  98 #include <string.h>
  99 #include <unistd.h>
 100 #include <sys/ioctl.h>
 101 #include <sys/ipc.h>
 102 #include <sys/mman.h>
 103 #include <sys/resource.h>
 104 #include <sys/select.h>
 105 #include <sys/shm.h>
 106 #include <sys/socket.h>
 107 #include <sys/stat.h>
 108 #include <sys/sysinfo.h>
 109 #include <sys/systemcfg.h>
 110 #include <sys/time.h>
 111 #include <sys/times.h>
 112 #include <sys/types.h>
 113 #include <sys/utsname.h>
 114 #include <sys/vminfo.h>
 115 
 116 // Missing prototypes for various system APIs.
 117 extern "C"
 118 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 119 
 120 #if !defined(_AIXVERSION_610)
 121 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 122 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 123 extern "C" int getargs(procsinfo*, int, char*, int);
 124 #endif
 125 
 126 #define MAX_PATH (2 * K)
 127 
 128 // for timer info max values which include all bits
 129 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 130 // for multipage initialization error analysis (in 'g_multipage_error')
 131 #define ERROR_MP_OS_TOO_OLD                          100
 132 #define ERROR_MP_EXTSHM_ACTIVE                       101
 133 #define ERROR_MP_VMGETINFO_FAILED                    102
 134 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 135 
 136 // excerpts from systemcfg.h that might be missing on older os levels
 137 #ifndef PV_7
 138   #define PV_7 0x200000          /* Power PC 7 */
 139 #endif
 140 #ifndef PV_7_Compat
 141   #define PV_7_Compat 0x208000   /* Power PC 7 */
 142 #endif
 143 #ifndef PV_8
 144   #define PV_8 0x300000          /* Power PC 8 */
 145 #endif
 146 #ifndef PV_8_Compat
 147   #define PV_8_Compat 0x308000   /* Power PC 8 */
 148 #endif
 149 #ifndef PV_9
 150   #define PV_9 0x400000          /* Power PC 9 */
 151 #endif
 152 #ifndef PV_9_Compat
 153   #define PV_9_Compat  0x408000  /* Power PC 9 */
 154 #endif
 155 
 156 
 157 static address resolve_function_descriptor_to_code_pointer(address p);
 158 
 159 static void vmembk_print_on(outputStream* os);
 160 
 161 ////////////////////////////////////////////////////////////////////////////////
 162 // global variables (for a description see os_aix.hpp)
 163 
 164 julong    os::Aix::_physical_memory = 0;
 165 
 166 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 167 
 168 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 169 int       os::Aix::_on_pase = -1;
 170 
 171 // 0 = uninitialized, otherwise 32 bit number:
 172 //  0xVVRRTTSS
 173 //  VV - major version
 174 //  RR - minor version
 175 //  TT - tech level, if known, 0 otherwise
 176 //  SS - service pack, if known, 0 otherwise
 177 uint32_t  os::Aix::_os_version = 0;
 178 
 179 // -1 = uninitialized, 0 - no, 1 - yes
 180 int       os::Aix::_xpg_sus_mode = -1;
 181 
 182 // -1 = uninitialized, 0 - no, 1 - yes
 183 int       os::Aix::_extshm = -1;
 184 
 185 ////////////////////////////////////////////////////////////////////////////////
 186 // local variables
 187 
 188 static volatile jlong max_real_time = 0;
 189 
 190 // Process break recorded at startup.
 191 static address g_brk_at_startup = nullptr;
 192 
 193 // This describes the state of multipage support of the underlying
 194 // OS. Note that this is of no interest to the outsize world and
 195 // therefore should not be defined in AIX class.
 196 //
 197 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 198 // latter two (16M "large" resp. 16G "huge" pages) require special
 199 // setup and are normally not available.
 200 //
 201 // AIX supports multiple page sizes per process, for:
 202 //  - Stack (of the primordial thread, so not relevant for us)
 203 //  - Data - data, bss, heap, for us also pthread stacks
 204 //  - Text - text code
 205 //  - shared memory
 206 //
 207 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 208 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 209 //
 210 // For shared memory, page size can be set dynamically via
 211 // shmctl(). Different shared memory regions can have different page
 212 // sizes.
 213 //
 214 // More information can be found at AIBM info center:
 215 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 216 //
 217 static struct {
 218   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 219   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 220   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 221   size_t pthr_stack_pagesize; // stack page size of pthread threads
 222   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 223   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 224   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 225   int error;                  // Error describing if something went wrong at multipage init.
 226 } g_multipage_support = {
 227   (size_t) -1,
 228   (size_t) -1,
 229   (size_t) -1,
 230   (size_t) -1,
 231   (size_t) -1,
 232   false, false,
 233   0
 234 };
 235 
 236 // We must not accidentally allocate memory close to the BRK - even if
 237 // that would work - because then we prevent the BRK segment from
 238 // growing which may result in a malloc OOM even though there is
 239 // enough memory. The problem only arises if we shmat() or mmap() at
 240 // a specific wish address, e.g. to place the heap in a
 241 // compressed-oops-friendly way.
 242 static bool is_close_to_brk(address a) {
 243   assert0(g_brk_at_startup != nullptr);
 244   if (a >= g_brk_at_startup &&
 245       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 246     return true;
 247   }
 248   return false;
 249 }
 250 
 251 julong os::free_memory() {
 252   return Aix::available_memory();
 253 }
 254 
 255 julong os::available_memory() {
 256   return Aix::available_memory();
 257 }
 258 
 259 julong os::Aix::available_memory() {
 260   // Avoid expensive API call here, as returned value will always be null.
 261   if (os::Aix::on_pase()) {
 262     return 0x0LL;
 263   }
 264   os::Aix::meminfo_t mi;
 265   if (os::Aix::get_meminfo(&mi)) {
 266     return mi.real_free;
 267   } else {
 268     return ULONG_MAX;
 269   }
 270 }
 271 
 272 julong os::physical_memory() {
 273   return Aix::physical_memory();
 274 }
 275 
 276 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 277 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 278 static bool my_disclaim64(char* addr, size_t size) {
 279 
 280   if (size == 0) {
 281     return true;
 282   }
 283 
 284   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 285   const unsigned int maxDisclaimSize = 0x40000000;
 286 
 287   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 288   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 289 
 290   char* p = addr;
 291 
 292   for (unsigned int i = 0; i < numFullDisclaimsNeeded; i ++) {
 293     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 294       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 295       return false;
 296     }
 297     p += maxDisclaimSize;
 298   }
 299 
 300   if (lastDisclaimSize > 0) {
 301     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 302       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 303       return false;
 304     }
 305   }
 306 
 307   return true;
 308 }
 309 
 310 // Cpu architecture string
 311 #if defined(PPC32)
 312 static char cpu_arch[] = "ppc";
 313 #elif defined(PPC64)
 314 static char cpu_arch[] = "ppc64";
 315 #else
 316 #error Add appropriate cpu_arch setting
 317 #endif
 318 
 319 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 320 static int checked_vmgetinfo(void *out, int command, int arg) {
 321   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 322     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 323   }
 324   return ::vmgetinfo(out, command, arg);
 325 }
 326 
 327 // Given an address, returns the size of the page backing that address.
 328 size_t os::Aix::query_pagesize(void* addr) {
 329 
 330   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 331     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 332     return 4*K;
 333   }
 334 
 335   vm_page_info pi;
 336   pi.addr = (uint64_t)addr;
 337   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 338     return pi.pagesize;
 339   } else {
 340     assert(false, "vmgetinfo failed to retrieve page size");
 341     return 4*K;
 342   }
 343 }
 344 
 345 void os::Aix::initialize_system_info() {
 346 
 347   // Get the number of online(logical) cpus instead of configured.
 348   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 349   assert(_processor_count > 0, "_processor_count must be > 0");
 350 
 351   // Retrieve total physical storage.
 352   os::Aix::meminfo_t mi;
 353   if (!os::Aix::get_meminfo(&mi)) {
 354     assert(false, "os::Aix::get_meminfo failed.");
 355   }
 356   _physical_memory = (julong) mi.real_total;
 357 }
 358 
 359 // Helper function for tracing page sizes.
 360 static const char* describe_pagesize(size_t pagesize) {
 361   switch (pagesize) {
 362     case 4*K : return "4K";
 363     case 64*K: return "64K";
 364     case 16*M: return "16M";
 365     case 16*G: return "16G";
 366     default:
 367       assert(false, "surprise");
 368       return "??";
 369   }
 370 }
 371 
 372 // Probe OS for multipage support.
 373 // Will fill the global g_multipage_support structure.
 374 // Must be called before calling os::large_page_init().
 375 static void query_multipage_support() {
 376 
 377   guarantee(g_multipage_support.pagesize == (size_t)-1,
 378             "do not call twice");
 379 
 380   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 381 
 382   // This really would surprise me.
 383   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 384 
 385   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 386   // Default data page size is defined either by linker options (-bdatapsize)
 387   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 388   // default should be 4K.
 389   {
 390     void* p = ::malloc(16*M);
 391     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 392     ::free(p);
 393   }
 394 
 395   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 396   // Note that this is pure curiosity. We do not rely on default page size but set
 397   // our own page size after allocated.
 398   {
 399     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 400     guarantee(shmid != -1, "shmget failed");
 401     void* p = ::shmat(shmid, nullptr, 0);
 402     ::shmctl(shmid, IPC_RMID, nullptr);
 403     guarantee(p != (void*) -1, "shmat failed");
 404     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 405     ::shmdt(p);
 406   }
 407 
 408   // Before querying the stack page size, make sure we are not running as primordial
 409   // thread (because primordial thread's stack may have different page size than
 410   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 411   // number of reasons so we may just as well guarantee it here.
 412   guarantee0(!os::is_primordial_thread());
 413 
 414   // Query pthread stack page size. Should be the same as data page size because
 415   // pthread stacks are allocated from C-Heap.
 416   {
 417     int dummy = 0;
 418     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 419   }
 420 
 421   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 422   {
 423     address any_function =
 424       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 425     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 426   }
 427 
 428   // Now probe for support of 64K pages and 16M pages.
 429 
 430   // Before OS/400 V6R1, there is no support for pages other than 4K.
 431   if (os::Aix::on_pase_V5R4_or_older()) {
 432     trcVerbose("OS/400 < V6R1 - no large page support.");
 433     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 434     goto query_multipage_support_end;
 435   }
 436 
 437   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 438   {
 439     const int MAX_PAGE_SIZES = 4;
 440     psize_t sizes[MAX_PAGE_SIZES];
 441     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 442     if (num_psizes == -1) {
 443       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 444       trcVerbose("disabling multipage support.");
 445       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 446       goto query_multipage_support_end;
 447     }
 448     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 449     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 450     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 451     for (int i = 0; i < num_psizes; i ++) {
 452       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 453     }
 454 
 455     // Can we use 64K, 16M pages?
 456     for (int i = 0; i < num_psizes; i ++) {
 457       const size_t pagesize = sizes[i];
 458       if (pagesize != 64*K && pagesize != 16*M) {
 459         continue;
 460       }
 461       bool can_use = false;
 462       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 463       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 464         IPC_CREAT | S_IRUSR | S_IWUSR);
 465       guarantee0(shmid != -1); // Should always work.
 466       // Try to set pagesize.
 467       struct shmid_ds shm_buf = { };
 468       shm_buf.shm_pagesize = pagesize;
 469       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 470         const int en = errno;
 471         ::shmctl(shmid, IPC_RMID, nullptr); // As early as possible!
 472         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%d", errno);
 473       } else {
 474         // Attach and double check pageisze.
 475         void* p = ::shmat(shmid, nullptr, 0);
 476         ::shmctl(shmid, IPC_RMID, nullptr); // As early as possible!
 477         guarantee0(p != (void*) -1); // Should always work.
 478         const size_t real_pagesize = os::Aix::query_pagesize(p);
 479         if (real_pagesize != pagesize) {
 480           trcVerbose("real page size (" SIZE_FORMAT_X ") differs.", real_pagesize);
 481         } else {
 482           can_use = true;
 483         }
 484         ::shmdt(p);
 485       }
 486       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 487       if (pagesize == 64*K) {
 488         g_multipage_support.can_use_64K_pages = can_use;
 489       } else if (pagesize == 16*M) {
 490         g_multipage_support.can_use_16M_pages = can_use;
 491       }
 492     }
 493 
 494   } // end: check which pages can be used for shared memory
 495 
 496 query_multipage_support_end:
 497 
 498   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 499       describe_pagesize(g_multipage_support.pagesize));
 500   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 501       describe_pagesize(g_multipage_support.datapsize));
 502   trcVerbose("Text page size: %s",
 503       describe_pagesize(g_multipage_support.textpsize));
 504   trcVerbose("Thread stack page size (pthread): %s",
 505       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 506   trcVerbose("Default shared memory page size: %s",
 507       describe_pagesize(g_multipage_support.shmpsize));
 508   trcVerbose("Can use 64K pages dynamically with shared memory: %s",
 509       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 510   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 511       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 512   trcVerbose("Multipage error details: %d",
 513       g_multipage_support.error);
 514 
 515   // sanity checks
 516   assert0(g_multipage_support.pagesize == 4*K);
 517   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 518   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 519   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 520   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 521 
 522 }
 523 
 524 void os::init_system_properties_values() {
 525 
 526 #ifndef OVERRIDE_LIBPATH
 527   #define DEFAULT_LIBPATH "/lib:/usr/lib"
 528 #else
 529   #define DEFAULT_LIBPATH OVERRIDE_LIBPATH
 530 #endif
 531 #define EXTENSIONS_DIR  "/lib/ext"
 532 
 533   // Buffer that fits several snprintfs.
 534   // Note that the space for the trailing null is provided
 535   // by the nulls included by the sizeof operator.
 536   const size_t bufsize =
 537     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 538          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 539   char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 540 
 541   // sysclasspath, java_home, dll_dir
 542   {
 543     char *pslash;
 544     os::jvm_path(buf, bufsize);
 545 
 546     // Found the full path to libjvm.so.
 547     // Now cut the path to <java_home>/jre if we can.
 548     pslash = strrchr(buf, '/');
 549     if (pslash != nullptr) {
 550       *pslash = '\0';            // Get rid of /libjvm.so.
 551     }
 552     pslash = strrchr(buf, '/');
 553     if (pslash != nullptr) {
 554       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 555     }
 556     Arguments::set_dll_dir(buf);
 557 
 558     if (pslash != nullptr) {
 559       pslash = strrchr(buf, '/');
 560       if (pslash != nullptr) {
 561         *pslash = '\0';        // Get rid of /lib.
 562       }
 563     }
 564     Arguments::set_java_home(buf);
 565     if (!set_boot_path('/', ':')) {
 566       vm_exit_during_initialization("Failed setting boot class path.", nullptr);
 567     }
 568   }
 569 
 570   // Where to look for native libraries.
 571 
 572   // On Aix we get the user setting of LIBPATH.
 573   // Eventually, all the library path setting will be done here.
 574   // Get the user setting of LIBPATH.
 575   const char *v = ::getenv("LIBPATH");
 576   const char *v_colon = ":";
 577   if (v == nullptr) { v = ""; v_colon = ""; }
 578 
 579   // Concatenate user and invariant part of ld_library_path.
 580   // That's +1 for the colon and +1 for the trailing '\0'.
 581   size_t pathsize = strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1;
 582   char *ld_library_path = NEW_C_HEAP_ARRAY(char, pathsize, mtInternal);
 583   os::snprintf_checked(ld_library_path, pathsize, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 584   Arguments::set_library_path(ld_library_path);
 585   FREE_C_HEAP_ARRAY(char, ld_library_path);
 586 
 587   // Extensions directories.
 588   os::snprintf_checked(buf, bufsize, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 589   Arguments::set_ext_dirs(buf);
 590 
 591   FREE_C_HEAP_ARRAY(char, buf);
 592 
 593 #undef DEFAULT_LIBPATH
 594 #undef EXTENSIONS_DIR
 595 }
 596 
 597 ////////////////////////////////////////////////////////////////////////////////
 598 // breakpoint support
 599 
 600 void os::breakpoint() {
 601   BREAKPOINT;
 602 }
 603 
 604 extern "C" void breakpoint() {
 605   // use debugger to set breakpoint here
 606 }
 607 
 608 // retrieve memory information.
 609 // Returns false if something went wrong;
 610 // content of pmi undefined in this case.
 611 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 612 
 613   assert(pmi, "get_meminfo: invalid parameter");
 614 
 615   memset(pmi, 0, sizeof(meminfo_t));
 616 
 617   if (os::Aix::on_pase()) {
 618     // On PASE, use the libo4 porting library.
 619 
 620     unsigned long long virt_total = 0;
 621     unsigned long long real_total = 0;
 622     unsigned long long real_free = 0;
 623     unsigned long long pgsp_total = 0;
 624     unsigned long long pgsp_free = 0;
 625     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 626       pmi->virt_total = virt_total;
 627       pmi->real_total = real_total;
 628       pmi->real_free = real_free;
 629       pmi->pgsp_total = pgsp_total;
 630       pmi->pgsp_free = pgsp_free;
 631       return true;
 632     }
 633     return false;
 634 
 635   } else {
 636 
 637     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 638     // See:
 639     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 640     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 641     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 642     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 643 
 644     perfstat_memory_total_t psmt;
 645     memset (&psmt, '\0', sizeof(psmt));
 646     const int rc = libperfstat::perfstat_memory_total(nullptr, &psmt, sizeof(psmt), 1);
 647     if (rc == -1) {
 648       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 649       assert(0, "perfstat_memory_total() failed");
 650       return false;
 651     }
 652 
 653     assert(rc == 1, "perfstat_memory_total() - weird return code");
 654 
 655     // excerpt from
 656     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 657     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 658     // The fields of perfstat_memory_total_t:
 659     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 660     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 661     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 662     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 663     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 664 
 665     pmi->virt_total = psmt.virt_total * 4096;
 666     pmi->real_total = psmt.real_total * 4096;
 667     pmi->real_free = psmt.real_free * 4096;
 668     pmi->pgsp_total = psmt.pgsp_total * 4096;
 669     pmi->pgsp_free = psmt.pgsp_free * 4096;
 670 
 671     return true;
 672 
 673   }
 674 } // end os::Aix::get_meminfo
 675 
 676 //////////////////////////////////////////////////////////////////////////////
 677 // create new thread
 678 
 679 // Thread start routine for all newly created threads
 680 static void *thread_native_entry(Thread *thread) {
 681 
 682   thread->record_stack_base_and_size();
 683 
 684   const pthread_t pthread_id = ::pthread_self();
 685   const tid_t kernel_thread_id = ::thread_self();
 686 
 687   LogTarget(Info, os, thread) lt;
 688   if (lt.is_enabled()) {
 689     address low_address = thread->stack_end();
 690     address high_address = thread->stack_base();
 691     lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT
 692              ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).",
 693              os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address,
 694              (high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K);
 695   }
 696 
 697   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 698   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 699   // tools hook pthread_create(). In this case, we may run into problems establishing
 700   // guard pages on those stacks, because the stacks may reside in memory which is not
 701   // protectable (shmated).
 702   if (thread->stack_base() > ::sbrk(0)) {
 703     log_warning(os, thread)("Thread stack not in data segment.");
 704   }
 705 
 706   // Try to randomize the cache line index of hot stack frames.
 707   // This helps when threads of the same stack traces evict each other's
 708   // cache lines. The threads can be either from the same JVM instance, or
 709   // from different JVM instances. The benefit is especially true for
 710   // processors with hyperthreading technology.
 711 
 712   static int counter = 0;
 713   int pid = os::current_process_id();
 714   alloca(((pid ^ counter++) & 7) * 128);
 715 
 716   thread->initialize_thread_current();
 717 
 718   OSThread* osthread = thread->osthread();
 719 
 720   // Thread_id is pthread id.
 721   osthread->set_thread_id(pthread_id);
 722 
 723   // .. but keep kernel thread id too for diagnostics
 724   osthread->set_kernel_thread_id(kernel_thread_id);
 725 
 726   // Initialize signal mask for this thread.
 727   PosixSignals::hotspot_sigmask(thread);
 728 
 729   // Initialize floating point control register.
 730   os::Aix::init_thread_fpu_state();
 731 
 732   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 733 
 734   // Call one more level start routine.
 735   thread->call_run();
 736 
 737   // Note: at this point the thread object may already have deleted itself.
 738   // Prevent dereferencing it from here on out.
 739   thread = nullptr;
 740 
 741   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 742     os::current_thread_id(), (uintx) kernel_thread_id);
 743 
 744   return 0;
 745 }
 746 
 747 bool os::create_thread(Thread* thread, ThreadType thr_type,
 748                        size_t req_stack_size) {
 749 
 750   assert(thread->osthread() == nullptr, "caller responsible");
 751 
 752   // Allocate the OSThread object.
 753   OSThread* osthread = new (std::nothrow) OSThread();
 754   if (osthread == nullptr) {
 755     return false;
 756   }
 757 
 758   // Set the correct thread state.
 759   osthread->set_thread_type(thr_type);
 760 
 761   // Initial state is ALLOCATED but not INITIALIZED
 762   osthread->set_state(ALLOCATED);
 763 
 764   thread->set_osthread(osthread);
 765 
 766   // Init thread attributes.
 767   pthread_attr_t attr;
 768   pthread_attr_init(&attr);
 769   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 770 
 771   // Make sure we run in 1:1 kernel-user-thread mode.
 772   if (os::Aix::on_aix()) {
 773     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 774     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 775   }
 776 
 777   // Start in suspended state, and in os::thread_start, wake the thread up.
 778   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 779 
 780   // Calculate stack size if it's not specified by caller.
 781   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 782 
 783   // JDK-8187028: It was observed that on some configurations (4K backed thread stacks)
 784   // the real thread stack size may be smaller than the requested stack size, by as much as 64K.
 785   // This very much looks like a pthread lib error. As a workaround, increase the stack size
 786   // by 64K for small thread stacks (arbitrarily chosen to be < 4MB)
 787   if (stack_size < 4096 * K) {
 788     stack_size += 64 * K;
 789   }
 790 
 791   // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
 792   // thread size in attr unchanged. If this is the minimal stack size as set
 793   // by pthread_attr_init this leads to crashes after thread creation. E.g. the
 794   // guard pages might not fit on the tiny stack created.
 795   int ret = pthread_attr_setstacksize(&attr, stack_size);
 796   if (ret != 0) {
 797     log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
 798                             (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
 799                             stack_size / K);
 800     thread->set_osthread(nullptr);
 801     delete osthread;
 802     return false;
 803   }
 804 
 805   // Save some cycles and a page by disabling OS guard pages where we have our own
 806   // VM guard pages (in java threads). For other threads, keep system default guard
 807   // pages in place.
 808   if (thr_type == java_thread || thr_type == compiler_thread) {
 809     ret = pthread_attr_setguardsize(&attr, 0);
 810   }
 811 
 812   ResourceMark rm;
 813   pthread_t tid = 0;
 814 
 815   if (ret == 0) {
 816     int limit = 3;
 817     do {
 818       ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 819     } while (ret == EAGAIN && limit-- > 0);
 820   }
 821 
 822   if (ret == 0) {
 823     char buf[64];
 824     log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 825                          thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 826   } else {
 827     char buf[64];
 828     log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%d=%s) for attributes: %s.",
 829                             thread->name(), ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 830     // Log some OS information which might explain why creating the thread failed.
 831     log_warning(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 832     log_warning(os, thread)("Checking JVM parameter MaxExpectedDataSegmentSize (currently " SIZE_FORMAT "k)  might be helpful", MaxExpectedDataSegmentSize/K);
 833     LogStream st(Log(os, thread)::info());
 834     os::Posix::print_rlimit_info(&st);
 835     os::print_memory_info(&st);
 836   }
 837 
 838   pthread_attr_destroy(&attr);
 839 
 840   if (ret != 0) {
 841     // Need to clean up stuff we've allocated so far.
 842     thread->set_osthread(nullptr);
 843     delete osthread;
 844     return false;
 845   }
 846 
 847   // OSThread::thread_id is the pthread id.
 848   osthread->set_thread_id(tid);
 849 
 850   return true;
 851 }
 852 
 853 /////////////////////////////////////////////////////////////////////////////
 854 // attach existing thread
 855 
 856 // bootstrap the main thread
 857 bool os::create_main_thread(JavaThread* thread) {
 858   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 859   return create_attached_thread(thread);
 860 }
 861 
 862 bool os::create_attached_thread(JavaThread* thread) {
 863 #ifdef ASSERT
 864     thread->verify_not_published();
 865 #endif
 866 
 867   // Allocate the OSThread object
 868   OSThread* osthread = new (std::nothrow) OSThread();
 869 
 870   if (osthread == nullptr) {
 871     return false;
 872   }
 873 
 874   const pthread_t pthread_id = ::pthread_self();
 875   const tid_t kernel_thread_id = ::thread_self();
 876 
 877   // OSThread::thread_id is the pthread id.
 878   osthread->set_thread_id(pthread_id);
 879 
 880   // .. but keep kernel thread id too for diagnostics
 881   osthread->set_kernel_thread_id(kernel_thread_id);
 882 
 883   // initialize floating point control register
 884   os::Aix::init_thread_fpu_state();
 885 
 886   // Initial thread state is RUNNABLE
 887   osthread->set_state(RUNNABLE);
 888 
 889   thread->set_osthread(osthread);
 890 
 891   if (UseNUMA) {
 892     int lgrp_id = os::numa_get_group_id();
 893     if (lgrp_id != -1) {
 894       thread->set_lgrp_id(lgrp_id);
 895     }
 896   }
 897 
 898   // initialize signal mask for this thread
 899   // and save the caller's signal mask
 900   PosixSignals::hotspot_sigmask(thread);
 901 
 902   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread  id: " UINTX_FORMAT
 903                        ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).",
 904                        os::current_thread_id(), (uintx) kernel_thread_id,
 905                        p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size() / K);
 906 
 907   return true;
 908 }
 909 
 910 void os::pd_start_thread(Thread* thread) {
 911   int status = pthread_continue_np(thread->osthread()->pthread_id());
 912   assert(status == 0, "thr_continue failed");
 913 }
 914 
 915 // Free OS resources related to the OSThread
 916 void os::free_thread(OSThread* osthread) {
 917   assert(osthread != nullptr, "osthread not set");
 918 
 919   // We are told to free resources of the argument thread,
 920   // but we can only really operate on the current thread.
 921   assert(Thread::current()->osthread() == osthread,
 922          "os::free_thread but not current thread");
 923 
 924   // Restore caller's signal mask
 925   sigset_t sigmask = osthread->caller_sigmask();
 926   pthread_sigmask(SIG_SETMASK, &sigmask, nullptr);
 927 
 928   delete osthread;
 929 }
 930 
 931 ////////////////////////////////////////////////////////////////////////////////
 932 // time support
 933 
 934 double os::elapsedVTime() {
 935   struct rusage usage;
 936   int retval = getrusage(RUSAGE_THREAD, &usage);
 937   if (retval == 0) {
 938     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
 939   } else {
 940     // better than nothing, but not much
 941     return elapsedTime();
 942   }
 943 }
 944 
 945 // We use mread_real_time here.
 946 // On AIX: If the CPU has a time register, the result will be RTC_POWER and
 947 // it has to be converted to real time. AIX documentations suggests to do
 948 // this unconditionally, so we do it.
 949 //
 950 // See: https://www.ibm.com/support/knowledgecenter/ssw_aix_61/com.ibm.aix.basetrf2/read_real_time.htm
 951 //
 952 // On PASE: mread_real_time will always return RTC_POWER_PC data, so no
 953 // conversion is necessary. However, mread_real_time will not return
 954 // monotonic results but merely matches read_real_time. So we need a tweak
 955 // to ensure monotonic results.
 956 //
 957 // For PASE no public documentation exists, just word by IBM
 958 jlong os::javaTimeNanos() {
 959   timebasestruct_t time;
 960   int rc = mread_real_time(&time, TIMEBASE_SZ);
 961   if (os::Aix::on_pase()) {
 962     assert(rc == RTC_POWER, "expected time format RTC_POWER from mread_real_time in PASE");
 963     jlong now = jlong(time.tb_high) * NANOSECS_PER_SEC + jlong(time.tb_low);
 964     jlong prev = max_real_time;
 965     if (now <= prev) {
 966       return prev;   // same or retrograde time;
 967     }
 968     jlong obsv = Atomic::cmpxchg(&max_real_time, prev, now);
 969     assert(obsv >= prev, "invariant");   // Monotonicity
 970     // If the CAS succeeded then we're done and return "now".
 971     // If the CAS failed and the observed value "obsv" is >= now then
 972     // we should return "obsv".  If the CAS failed and now > obsv > prv then
 973     // some other thread raced this thread and installed a new value, in which case
 974     // we could either (a) retry the entire operation, (b) retry trying to install now
 975     // or (c) just return obsv.  We use (c).   No loop is required although in some cases
 976     // we might discard a higher "now" value in deference to a slightly lower but freshly
 977     // installed obsv value.   That's entirely benign -- it admits no new orderings compared
 978     // to (a) or (b) -- and greatly reduces coherence traffic.
 979     // We might also condition (c) on the magnitude of the delta between obsv and now.
 980     // Avoiding excessive CAS operations to hot RW locations is critical.
 981     // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
 982     return (prev == obsv) ? now : obsv;
 983   } else {
 984     if (rc != RTC_POWER) {
 985       rc = time_base_to_time(&time, TIMEBASE_SZ);
 986       assert(rc != -1, "error calling time_base_to_time()");
 987     }
 988     return jlong(time.tb_high) * NANOSECS_PER_SEC + jlong(time.tb_low);
 989   }
 990 }
 991 
 992 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 993   info_ptr->max_value = ALL_64_BITS;
 994   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
 995   info_ptr->may_skip_backward = false;
 996   info_ptr->may_skip_forward = false;
 997   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
 998 }
 999 
1000 intx os::current_thread_id() {
1001   return (intx)pthread_self();
1002 }
1003 
1004 int os::current_process_id() {
1005   return getpid();
1006 }
1007 
1008 // DLL functions
1009 
1010 // This must be hard coded because it's the system's temporary
1011 // directory not the java application's temp directory, ala java.io.tmpdir.
1012 const char* os::get_temp_directory() { return "/tmp"; }
1013 
1014 void os::prepare_native_symbols() {
1015   LoadedLibraries::reload();
1016 }
1017 
1018 // Check if addr is inside libjvm.so.
1019 bool os::address_is_in_vm(address addr) {
1020 
1021   // Input could be a real pc or a function pointer literal. The latter
1022   // would be a function descriptor residing in the data segment of a module.
1023   loaded_module_t lm;
1024   if (LoadedLibraries::find_for_text_address(addr, &lm)) {
1025     return lm.is_in_vm;
1026   } else if (LoadedLibraries::find_for_data_address(addr, &lm)) {
1027     return lm.is_in_vm;
1028   } else {
1029     return false;
1030   }
1031 
1032 }
1033 
1034 // Resolve an AIX function descriptor literal to a code pointer.
1035 // If the input is a valid code pointer to a text segment of a loaded module,
1036 //   it is returned unchanged.
1037 // If the input is a valid AIX function descriptor, it is resolved to the
1038 //   code entry point.
1039 // If the input is neither a valid function descriptor nor a valid code pointer,
1040 //   null is returned.
1041 static address resolve_function_descriptor_to_code_pointer(address p) {
1042 
1043   if (LoadedLibraries::find_for_text_address(p, nullptr)) {
1044     // It is a real code pointer.
1045     return p;
1046   } else if (LoadedLibraries::find_for_data_address(p, nullptr)) {
1047     // Pointer to data segment, potential function descriptor.
1048     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1049     if (LoadedLibraries::find_for_text_address(code_entry, nullptr)) {
1050       // It is a function descriptor.
1051       return code_entry;
1052     }
1053   }
1054 
1055   return nullptr;
1056 }
1057 
1058 bool os::dll_address_to_function_name(address addr, char *buf,
1059                                       int buflen, int *offset,
1060                                       bool demangle) {
1061   if (offset) {
1062     *offset = -1;
1063   }
1064   // Buf is not optional, but offset is optional.
1065   assert(buf != nullptr, "sanity check");
1066   buf[0] = '\0';
1067 
1068   // Resolve function ptr literals first.
1069   addr = resolve_function_descriptor_to_code_pointer(addr);
1070   if (!addr) {
1071     return false;
1072   }
1073 
1074   return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
1075 }
1076 
1077 bool os::dll_address_to_library_name(address addr, char* buf,
1078                                      int buflen, int* offset) {
1079   if (offset) {
1080     *offset = -1;
1081   }
1082   // Buf is not optional, but offset is optional.
1083   assert(buf != nullptr, "sanity check");
1084   buf[0] = '\0';
1085 
1086   // Resolve function ptr literals first.
1087   addr = resolve_function_descriptor_to_code_pointer(addr);
1088   if (!addr) {
1089     return false;
1090   }
1091 
1092   address  base = nullptr;
1093   if (!AixSymbols::get_module_name_and_base(addr, buf, buflen, &base)
1094       || base == nullptr) {
1095     return false;
1096   }
1097   assert(addr >= base && addr <= base + INT_MAX, "address not in library text range");
1098   if (offset != nullptr) {
1099     *offset = addr - base;
1100   }
1101 
1102   return true;
1103 }
1104 
1105 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1106 
1107   log_info(os)("attempting shared library load of %s", filename);
1108 
1109   if (ebuf && ebuflen > 0) {
1110     ebuf[0] = '\0';
1111     ebuf[ebuflen - 1] = '\0';
1112   }
1113 
1114   if (!filename || strlen(filename) == 0) {
1115     if (ebuf != nullptr && ebuflen > 0) {
1116       ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1117     }
1118     return nullptr;
1119   }
1120 
1121   // RTLD_LAZY has currently the same behavior as RTLD_NOW
1122   // The dl is loaded immediately with all its dependants.
1123   int dflags = RTLD_LAZY;
1124   // check for filename ending with ')', it indicates we want to load
1125   // a MEMBER module that is a member of an archive.
1126   int flen = strlen(filename);
1127   if (flen > 0 && filename[flen - 1] == ')') {
1128     dflags |= RTLD_MEMBER;
1129   }
1130 
1131   void* result;
1132   const char* error_report = nullptr;
1133   result = Aix_dlopen(filename, dflags, &error_report);
1134   if (result != nullptr) {
1135     Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
1136     // Reload dll cache. Don't do this in signal handling.
1137     LoadedLibraries::reload();
1138     log_info(os)("shared library load of %s was successful", filename);
1139     return result;
1140   } else {
1141     // error analysis when dlopen fails
1142     if (error_report == nullptr) {
1143       error_report = "dlerror returned no error description";
1144     }
1145     if (ebuf != nullptr && ebuflen > 0) {
1146       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1147                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1148     }
1149     Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report);
1150     log_info(os)("shared library load of %s failed, %s", filename, error_report);
1151   }
1152   return nullptr;
1153 }
1154 
1155 void os::print_dll_info(outputStream *st) {
1156   st->print_cr("Dynamic libraries:");
1157   LoadedLibraries::print(st);
1158 }
1159 
1160 void os::get_summary_os_info(char* buf, size_t buflen) {
1161   // There might be something more readable than uname results for AIX.
1162   struct utsname name;
1163   uname(&name);
1164   snprintf(buf, buflen, "%s %s", name.release, name.version);
1165 }
1166 
1167 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1168 
1169   if (!LoadedLibraries::for_each(callback, param)) {
1170     return -1;
1171   }
1172 
1173   return 0;
1174 }
1175 
1176 void os::print_os_info_brief(outputStream* st) {
1177   uint32_t ver = os::Aix::os_version();
1178   st->print_cr("AIX kernel version %u.%u.%u.%u",
1179                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1180 
1181   os::Posix::print_uname_info(st);
1182 
1183   // Linux uses print_libversion_info(st); here.
1184 }
1185 
1186 void os::print_os_info(outputStream* st) {
1187   st->print_cr("OS:");
1188 
1189   os::Posix::print_uname_info(st);
1190 
1191   uint32_t ver = os::Aix::os_version();
1192   st->print_cr("AIX kernel version %u.%u.%u.%u",
1193                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1194 
1195   os::Posix::print_uptime_info(st);
1196 
1197   os::Posix::print_rlimit_info(st);
1198 
1199   os::Posix::print_load_average(st);
1200 
1201   // _SC_THREAD_THREADS_MAX is the maximum number of threads within a process.
1202   long tmax = sysconf(_SC_THREAD_THREADS_MAX);
1203   st->print_cr("maximum #threads within a process:%ld", tmax);
1204 
1205   // print wpar info
1206   libperfstat::wparinfo_t wi;
1207   if (libperfstat::get_wparinfo(&wi)) {
1208     st->print_cr("wpar info");
1209     st->print_cr("name: %s", wi.name);
1210     st->print_cr("id:   %d", wi.wpar_id);
1211     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1212   }
1213 
1214   VM_Version::print_platform_virtualization_info(st);
1215 }
1216 
1217 void os::print_memory_info(outputStream* st) {
1218 
1219   st->print_cr("Memory:");
1220 
1221   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1222     describe_pagesize(g_multipage_support.pagesize));
1223   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1224     describe_pagesize(g_multipage_support.datapsize));
1225   st->print_cr("  Text page size:                         %s",
1226     describe_pagesize(g_multipage_support.textpsize));
1227   st->print_cr("  Thread stack page size (pthread):       %s",
1228     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1229   st->print_cr("  Default shared memory page size:        %s",
1230     describe_pagesize(g_multipage_support.shmpsize));
1231   st->print_cr("  Can use 64K pages dynamically with shared memory:  %s",
1232     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1233   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1234     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1235   st->print_cr("  Multipage error: %d",
1236     g_multipage_support.error);
1237   st->cr();
1238   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1239 
1240   // print out LDR_CNTRL because it affects the default page sizes
1241   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1242   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1243 
1244   // Print out EXTSHM because it is an unsupported setting.
1245   const char* const extshm = ::getenv("EXTSHM");
1246   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1247   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1248     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1249   }
1250 
1251   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1252   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1253   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1254       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1255   st->cr();
1256 
1257   os::Aix::meminfo_t mi;
1258   if (os::Aix::get_meminfo(&mi)) {
1259     if (os::Aix::on_aix()) {
1260       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1261       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1262       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1263       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1264     } else {
1265       // PASE - Numbers are result of QWCRSSTS; they mean:
1266       // real_total: Sum of all system pools
1267       // real_free: always 0
1268       // pgsp_total: we take the size of the system ASP
1269       // pgsp_free: size of system ASP times percentage of system ASP unused
1270       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1271       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1272       st->print_cr("%% system asp used : %.2f",
1273         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1274     }
1275   }
1276   st->cr();
1277 
1278   // Print program break.
1279   st->print_cr("Program break at VM startup: " PTR_FORMAT ".", p2i(g_brk_at_startup));
1280   address brk_now = (address)::sbrk(0);
1281   if (brk_now != (address)-1) {
1282     st->print_cr("Program break now          : " PTR_FORMAT " (distance: " SIZE_FORMAT "k).",
1283                  p2i(brk_now), (size_t)((brk_now - g_brk_at_startup) / K));
1284   }
1285   st->print_cr("MaxExpectedDataSegmentSize    : " SIZE_FORMAT "k.", MaxExpectedDataSegmentSize / K);
1286   st->cr();
1287 
1288   // Print segments allocated with os::reserve_memory.
1289   st->print_cr("internal virtual memory regions used by vm:");
1290   vmembk_print_on(st);
1291 }
1292 
1293 // Get a string for the cpuinfo that is a summary of the cpu type
1294 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1295   // read _system_configuration.version
1296   switch (_system_configuration.version) {
1297   case PV_9:
1298     strncpy(buf, "Power PC 9", buflen);
1299     break;
1300   case PV_8:
1301     strncpy(buf, "Power PC 8", buflen);
1302     break;
1303   case PV_7:
1304     strncpy(buf, "Power PC 7", buflen);
1305     break;
1306   case PV_6_1:
1307     strncpy(buf, "Power PC 6 DD1.x", buflen);
1308     break;
1309   case PV_6:
1310     strncpy(buf, "Power PC 6", buflen);
1311     break;
1312   case PV_5:
1313     strncpy(buf, "Power PC 5", buflen);
1314     break;
1315   case PV_5_2:
1316     strncpy(buf, "Power PC 5_2", buflen);
1317     break;
1318   case PV_5_3:
1319     strncpy(buf, "Power PC 5_3", buflen);
1320     break;
1321   case PV_5_Compat:
1322     strncpy(buf, "PV_5_Compat", buflen);
1323     break;
1324   case PV_6_Compat:
1325     strncpy(buf, "PV_6_Compat", buflen);
1326     break;
1327   case PV_7_Compat:
1328     strncpy(buf, "PV_7_Compat", buflen);
1329     break;
1330   case PV_8_Compat:
1331     strncpy(buf, "PV_8_Compat", buflen);
1332     break;
1333   case PV_9_Compat:
1334     strncpy(buf, "PV_9_Compat", buflen);
1335     break;
1336   default:
1337     strncpy(buf, "unknown", buflen);
1338   }
1339 }
1340 
1341 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1342   // Nothing to do beyond of what os::print_cpu_info() does.
1343 }
1344 
1345 static char saved_jvm_path[MAXPATHLEN] = {0};
1346 
1347 // Find the full path to the current module, libjvm.so.
1348 void os::jvm_path(char *buf, jint buflen) {
1349   // Error checking.
1350   if (buflen < MAXPATHLEN) {
1351     assert(false, "must use a large-enough buffer");
1352     buf[0] = '\0';
1353     return;
1354   }
1355   // Lazy resolve the path to current module.
1356   if (saved_jvm_path[0] != 0) {
1357     strcpy(buf, saved_jvm_path);
1358     return;
1359   }
1360 
1361   Dl_info dlinfo;
1362   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1363   assert(ret != 0, "cannot locate libjvm");
1364   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1365   assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
1366 
1367   if (Arguments::sun_java_launcher_is_altjvm()) {
1368     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1369     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1370     // If "/jre/lib/" appears at the right place in the string, then
1371     // assume we are installed in a JDK and we're done. Otherwise, check
1372     // for a JAVA_HOME environment variable and fix up the path so it
1373     // looks like libjvm.so is installed there (append a fake suffix
1374     // hotspot/libjvm.so).
1375     const char *p = buf + strlen(buf) - 1;
1376     for (int count = 0; p > buf && count < 4; ++count) {
1377       for (--p; p > buf && *p != '/'; --p)
1378         /* empty */ ;
1379     }
1380 
1381     if (strncmp(p, "/jre/lib/", 9) != 0) {
1382       // Look for JAVA_HOME in the environment.
1383       char* java_home_var = ::getenv("JAVA_HOME");
1384       if (java_home_var != nullptr && java_home_var[0] != 0) {
1385         char* jrelib_p;
1386         int len;
1387 
1388         // Check the current module name "libjvm.so".
1389         p = strrchr(buf, '/');
1390         if (p == nullptr) {
1391           return;
1392         }
1393         assert(strstr(p, "/libjvm") == p, "invalid library name");
1394 
1395         rp = os::Posix::realpath(java_home_var, buf, buflen);
1396         if (rp == nullptr) {
1397           return;
1398         }
1399 
1400         // determine if this is a legacy image or modules image
1401         // modules image doesn't have "jre" subdirectory
1402         len = strlen(buf);
1403         assert(len < buflen, "Ran out of buffer room");
1404         jrelib_p = buf + len;
1405         snprintf(jrelib_p, buflen-len, "/jre/lib");
1406         if (0 != access(buf, F_OK)) {
1407           snprintf(jrelib_p, buflen-len, "/lib");
1408         }
1409 
1410         if (0 == access(buf, F_OK)) {
1411           // Use current module name "libjvm.so"
1412           len = strlen(buf);
1413           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1414         } else {
1415           // Go back to path of .so
1416           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1417           if (rp == nullptr) {
1418             return;
1419           }
1420         }
1421       }
1422     }
1423   }
1424 
1425   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1426   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1427 }
1428 
1429 ////////////////////////////////////////////////////////////////////////////////
1430 // Virtual Memory
1431 
1432 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1433 
1434 #define VMEM_MAPPED  1
1435 #define VMEM_SHMATED 2
1436 
1437 struct vmembk_t {
1438   int type;         // 1 - mmap, 2 - shmat
1439   char* addr;
1440   size_t size;      // Real size, may be larger than usersize.
1441   size_t pagesize;  // page size of area
1442   vmembk_t* next;
1443 
1444   bool contains_addr(char* p) const {
1445     return p >= addr && p < (addr + size);
1446   }
1447 
1448   bool contains_range(char* p, size_t s) const {
1449     return contains_addr(p) && contains_addr(p + s - 1);
1450   }
1451 
1452   void print_on(outputStream* os) const {
1453     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1454       " bytes, %d %s pages), %s",
1455       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1456       (type == VMEM_SHMATED ? "shmat" : "mmap")
1457     );
1458   }
1459 
1460   // Check that range is a sub range of memory block (or equal to memory block);
1461   // also check that range is fully page aligned to the page size if the block.
1462   void assert_is_valid_subrange(char* p, size_t s) const {
1463     if (!contains_range(p, s)) {
1464       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1465               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1466               p2i(p), p2i(p + s), p2i(addr), p2i(addr + size));
1467       guarantee0(false);
1468     }
1469     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1470       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1471               " aligned to pagesize (%lu)", p2i(p), p2i(p + s), (unsigned long) pagesize);
1472       guarantee0(false);
1473     }
1474   }
1475 };
1476 
1477 static struct {
1478   vmembk_t* first;
1479   MiscUtils::CritSect cs;
1480 } vmem;
1481 
1482 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1483   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1484   assert0(p);
1485   if (p) {
1486     MiscUtils::AutoCritSect lck(&vmem.cs);
1487     p->addr = addr; p->size = size;
1488     p->pagesize = pagesize;
1489     p->type = type;
1490     p->next = vmem.first;
1491     vmem.first = p;
1492   }
1493 }
1494 
1495 static vmembk_t* vmembk_find(char* addr) {
1496   MiscUtils::AutoCritSect lck(&vmem.cs);
1497   for (vmembk_t* p = vmem.first; p; p = p->next) {
1498     if (p->addr <= addr && (p->addr + p->size) > addr) {
1499       return p;
1500     }
1501   }
1502   return nullptr;
1503 }
1504 
1505 static void vmembk_remove(vmembk_t* p0) {
1506   MiscUtils::AutoCritSect lck(&vmem.cs);
1507   assert0(p0);
1508   assert0(vmem.first); // List should not be empty.
1509   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1510     if (*pp == p0) {
1511       *pp = p0->next;
1512       ::free(p0);
1513       return;
1514     }
1515   }
1516   assert0(false); // Not found?
1517 }
1518 
1519 static void vmembk_print_on(outputStream* os) {
1520   MiscUtils::AutoCritSect lck(&vmem.cs);
1521   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1522     vmi->print_on(os);
1523     os->cr();
1524   }
1525 }
1526 
1527 // Reserve and attach a section of System V memory.
1528 // If <requested_addr> is not null, function will attempt to attach the memory at the given
1529 // address. Failing that, it will attach the memory anywhere.
1530 // If <requested_addr> is null, function will attach the memory anywhere.
1531 static char* reserve_shmated_memory (size_t bytes, char* requested_addr) {
1532 
1533   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1534     PTR_FORMAT "...", bytes, p2i(requested_addr));
1535 
1536   // We must prevent anyone from attaching too close to the
1537   // BRK because that may cause malloc OOM.
1538   if (requested_addr != nullptr && is_close_to_brk((address)requested_addr)) {
1539     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment.", p2i(requested_addr));
1540     // Since we treat an attach to the wrong address as an error later anyway,
1541     // we return null here
1542     return nullptr;
1543   }
1544 
1545   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1546   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1547   if (os::Aix::on_pase_V5R4_or_older()) {
1548     ShouldNotReachHere();
1549   }
1550 
1551   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1552   const size_t size = align_up(bytes, 64*K);
1553 
1554   // Reserve the shared segment.
1555   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1556   if (shmid == -1) {
1557     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1558     return nullptr;
1559   }
1560 
1561   // Important note:
1562   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1563   // We must right after attaching it remove it from the system. System V shm segments are global and
1564   // survive the process.
1565   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1566 
1567   struct shmid_ds shmbuf;
1568   memset(&shmbuf, 0, sizeof(shmbuf));
1569   shmbuf.shm_pagesize = 64*K;
1570   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1571     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1572                size / (64*K), errno);
1573     // I want to know if this ever happens.
1574     assert(false, "failed to set page size for shmat");
1575   }
1576 
1577   // Now attach the shared segment.
1578   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1579   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1580   // were not a segment boundary.
1581   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1582   const int errno_shmat = errno;
1583 
1584   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1585   if (::shmctl(shmid, IPC_RMID, nullptr) == -1) {
1586     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1587     assert(false, "failed to remove shared memory segment!");
1588   }
1589 
1590   // Handle shmat error. If we failed to attach, just return.
1591   if (addr == (char*)-1) {
1592     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", p2i(requested_addr), errno_shmat);
1593     return nullptr;
1594   }
1595 
1596   // Just for info: query the real page size. In case setting the page size did not
1597   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1598   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1599   if (real_pagesize != (size_t)shmbuf.shm_pagesize) {
1600     trcVerbose("pagesize is, surprisingly, " SIZE_FORMAT, real_pagesize);
1601   }
1602 
1603   if (addr) {
1604     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1605       p2i(addr), p2i(addr + size - 1), size, size/real_pagesize, describe_pagesize(real_pagesize));
1606   } else {
1607     if (requested_addr != nullptr) {
1608       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, p2i(requested_addr));
1609     } else {
1610       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1611     }
1612   }
1613 
1614   // book-keeping
1615   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1616   assert0(is_aligned_to(addr, os::vm_page_size()));
1617 
1618   return addr;
1619 }
1620 
1621 static bool release_shmated_memory(char* addr, size_t size) {
1622 
1623   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1624     p2i(addr), p2i(addr + size - 1));
1625 
1626   bool rc = false;
1627 
1628   // TODO: is there a way to verify shm size without doing bookkeeping?
1629   if (::shmdt(addr) != 0) {
1630     trcVerbose("error (%d).", errno);
1631   } else {
1632     trcVerbose("ok.");
1633     rc = true;
1634   }
1635   return rc;
1636 }
1637 
1638 static bool uncommit_shmated_memory(char* addr, size_t size) {
1639   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1640     p2i(addr), p2i(addr + size - 1));
1641 
1642   const bool rc = my_disclaim64(addr, size);
1643 
1644   if (!rc) {
1645     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", p2i(addr), size);
1646     return false;
1647   }
1648   return true;
1649 }
1650 
1651 ////////////////////////////////  mmap-based routines /////////////////////////////////
1652 
1653 // Reserve memory via mmap.
1654 // If <requested_addr> is given, an attempt is made to attach at the given address.
1655 // Failing that, memory is allocated at any address.
1656 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
1657   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT "...",
1658     bytes, p2i(requested_addr));
1659 
1660   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
1661     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", p2i(requested_addr));
1662     return nullptr;
1663   }
1664 
1665   // We must prevent anyone from attaching too close to the
1666   // BRK because that may cause malloc OOM.
1667   if (requested_addr != nullptr && is_close_to_brk((address)requested_addr)) {
1668     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment.", p2i(requested_addr));
1669     // Since we treat an attach to the wrong address as an error later anyway,
1670     // we return null here
1671     return nullptr;
1672   }
1673 
1674   // In 64K mode, we lie and claim the global page size (os::vm_page_size()) is 64K
1675   //  (complicated story). This mostly works just fine since 64K is a multiple of the
1676   //  actual 4K lowest page size. Only at a few seams light shines thru, e.g. when
1677   //  calling mmap. mmap will return memory aligned to the lowest pages size - 4K -
1678   //  so we must make sure - transparently - that the caller only ever sees 64K
1679   //  aligned mapping start addresses.
1680   const size_t alignment = os::vm_page_size();
1681 
1682   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
1683   const size_t size = align_up(bytes, os::vm_page_size());
1684 
1685   // alignment: Allocate memory large enough to include an aligned range of the right size and
1686   // cut off the leading and trailing waste pages.
1687   assert0(alignment != 0 && is_aligned_to(alignment, os::vm_page_size())); // see above
1688   const size_t extra_size = size + alignment;
1689 
1690   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
1691   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
1692   int flags = MAP_ANONYMOUS | MAP_SHARED;
1693 
1694   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
1695   // it means if wishaddress is given but MAP_FIXED is not set.
1696   //
1697   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
1698   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
1699   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
1700   // get clobbered.
1701   if (requested_addr != nullptr) {
1702     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
1703       flags |= MAP_FIXED;
1704     }
1705   }
1706 
1707   char* addr = (char*)::mmap(requested_addr, extra_size,
1708       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
1709 
1710   if (addr == MAP_FAILED) {
1711     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", p2i(requested_addr), size, errno);
1712     return nullptr;
1713   } else if (requested_addr != nullptr && addr != requested_addr) {
1714     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) succeeded, but at a different address than requested (" PTR_FORMAT "), will unmap",
1715                p2i(requested_addr), size, p2i(addr));
1716     ::munmap(addr, extra_size);
1717     return nullptr;
1718   }
1719 
1720   // Handle alignment.
1721   char* const addr_aligned = align_up(addr, alignment);
1722   const size_t waste_pre = addr_aligned - addr;
1723   char* const addr_aligned_end = addr_aligned + size;
1724   const size_t waste_post = extra_size - waste_pre - size;
1725   if (waste_pre > 0) {
1726     ::munmap(addr, waste_pre);
1727   }
1728   if (waste_post > 0) {
1729     ::munmap(addr_aligned_end, waste_post);
1730   }
1731   addr = addr_aligned;
1732 
1733   trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
1734     p2i(addr), p2i(addr + bytes), bytes);
1735 
1736   // bookkeeping
1737   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
1738 
1739   // Test alignment, see above.
1740   assert0(is_aligned_to(addr, os::vm_page_size()));
1741 
1742   return addr;
1743 }
1744 
1745 static bool release_mmaped_memory(char* addr, size_t size) {
1746   assert0(is_aligned_to(addr, os::vm_page_size()));
1747   assert0(is_aligned_to(size, os::vm_page_size()));
1748 
1749   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1750     p2i(addr), p2i(addr + size - 1));
1751   bool rc = false;
1752 
1753   if (::munmap(addr, size) != 0) {
1754     trcVerbose("failed (%d)\n", errno);
1755     rc = false;
1756   } else {
1757     trcVerbose("ok.");
1758     rc = true;
1759   }
1760 
1761   return rc;
1762 }
1763 
1764 static bool uncommit_mmaped_memory(char* addr, size_t size) {
1765 
1766   assert0(is_aligned_to(addr, os::vm_page_size()));
1767   assert0(is_aligned_to(size, os::vm_page_size()));
1768 
1769   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1770     p2i(addr), p2i(addr + size - 1));
1771   bool rc = false;
1772 
1773   // Uncommit mmap memory with msync MS_INVALIDATE.
1774   if (::msync(addr, size, MS_INVALIDATE) != 0) {
1775     trcVerbose("failed (%d)\n", errno);
1776     rc = false;
1777   } else {
1778     trcVerbose("ok.");
1779     rc = true;
1780   }
1781 
1782   return rc;
1783 }
1784 
1785 #ifdef PRODUCT
1786 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1787                                     int err) {
1788   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
1789           ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
1790           os::errno_name(err), err);
1791 }
1792 #endif
1793 
1794 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
1795                                   const char* mesg) {
1796   assert(mesg != nullptr, "mesg must be specified");
1797   if (!pd_commit_memory(addr, size, exec)) {
1798     // Add extra info in product mode for vm_exit_out_of_memory():
1799     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
1800     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
1801   }
1802 }
1803 
1804 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1805 
1806   assert(is_aligned_to(addr, os::vm_page_size()),
1807     "addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1808     p2i(addr), os::vm_page_size());
1809   assert(is_aligned_to(size, os::vm_page_size()),
1810     "size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1811     size, os::vm_page_size());
1812 
1813   vmembk_t* const vmi = vmembk_find(addr);
1814   guarantee0(vmi);
1815   vmi->assert_is_valid_subrange(addr, size);
1816 
1817   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", p2i(addr), p2i(addr + size - 1));
1818 
1819   if (UseExplicitCommit) {
1820     // AIX commits memory on touch. So, touch all pages to be committed.
1821     for (char* p = addr; p < (addr + size); p += 4*K) {
1822       *p = '\0';
1823     }
1824   }
1825 
1826   return true;
1827 }
1828 
1829 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
1830   return pd_commit_memory(addr, size, exec);
1831 }
1832 
1833 void os::pd_commit_memory_or_exit(char* addr, size_t size,
1834                                   size_t alignment_hint, bool exec,
1835                                   const char* mesg) {
1836   // Alignment_hint is ignored on this OS.
1837   pd_commit_memory_or_exit(addr, size, exec, mesg);
1838 }
1839 
1840 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
1841   assert(is_aligned_to(addr, os::vm_page_size()),
1842     "addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1843     p2i(addr), os::vm_page_size());
1844   assert(is_aligned_to(size, os::vm_page_size()),
1845     "size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
1846     size, os::vm_page_size());
1847 
1848   // Dynamically do different things for mmap/shmat.
1849   const vmembk_t* const vmi = vmembk_find(addr);
1850   guarantee0(vmi);
1851   vmi->assert_is_valid_subrange(addr, size);
1852 
1853   if (vmi->type == VMEM_SHMATED) {
1854     return uncommit_shmated_memory(addr, size);
1855   } else {
1856     return uncommit_mmaped_memory(addr, size);
1857   }
1858 }
1859 
1860 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
1861   // Do not call this; no need to commit stack pages on AIX.
1862   ShouldNotReachHere();
1863   return true;
1864 }
1865 
1866 bool os::remove_stack_guard_pages(char* addr, size_t size) {
1867   // Do not call this; no need to commit stack pages on AIX.
1868   ShouldNotReachHere();
1869   return true;
1870 }
1871 
1872 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
1873 }
1874 
1875 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
1876 }
1877 
1878 void os::numa_make_global(char *addr, size_t bytes) {
1879 }
1880 
1881 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
1882 }
1883 
1884 bool os::numa_topology_changed() {
1885   return false;
1886 }
1887 
1888 size_t os::numa_get_groups_num() {
1889   return 1;
1890 }
1891 
1892 int os::numa_get_group_id() {
1893   return 0;
1894 }
1895 
1896 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
1897   if (size > 0) {
1898     ids[0] = 0;
1899     return 1;
1900   }
1901   return 0;
1902 }
1903 
1904 int os::numa_get_group_id_for_address(const void* address) {
1905   return 0;
1906 }
1907 
1908 bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, size_t count) {
1909   return false;
1910 }
1911 
1912 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
1913   return end;
1914 }
1915 
1916 // Reserves and attaches a shared memory segment.
1917 char* os::pd_reserve_memory(size_t bytes, bool exec) {
1918   // Always round to os::vm_page_size(), which may be larger than 4K.
1919   bytes = align_up(bytes, os::vm_page_size());
1920 
1921   // In 4K mode always use mmap.
1922   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
1923   if (os::vm_page_size() == 4*K) {
1924     return reserve_mmaped_memory(bytes, nullptr /* requested_addr */);
1925   } else {
1926     if (bytes >= Use64KPagesThreshold) {
1927       return reserve_shmated_memory(bytes, nullptr /* requested_addr */);
1928     } else {
1929       return reserve_mmaped_memory(bytes, nullptr /* requested_addr */);
1930     }
1931   }
1932 }
1933 
1934 bool os::pd_release_memory(char* addr, size_t size) {
1935 
1936   // Dynamically do different things for mmap/shmat.
1937   vmembk_t* const vmi = vmembk_find(addr);
1938   guarantee0(vmi);
1939   vmi->assert_is_valid_subrange(addr, size);
1940 
1941   // Always round to os::vm_page_size(), which may be larger than 4K.
1942   size = align_up(size, os::vm_page_size());
1943   addr = align_up(addr, os::vm_page_size());
1944 
1945   bool rc = false;
1946   bool remove_bookkeeping = false;
1947   if (vmi->type == VMEM_SHMATED) {
1948     // For shmatted memory, we do:
1949     // - If user wants to release the whole range, release the memory (shmdt).
1950     // - If user only wants to release a partial range, uncommit (disclaim) that
1951     //   range. That way, at least, we do not use memory anymore (bust still page
1952     //   table space).
1953     if (addr == vmi->addr && size == vmi->size) {
1954       rc = release_shmated_memory(addr, size);
1955       remove_bookkeeping = true;
1956     } else {
1957       rc = uncommit_shmated_memory(addr, size);
1958     }
1959   } else {
1960     // In mmap-mode:
1961     //  - If the user wants to release the full range, we do that and remove the mapping.
1962     //  - If the user wants to release part of the range, we release that part, but need
1963     //    to adjust bookkeeping.
1964     assert(is_aligned(size, 4 * K), "Sanity");
1965     rc = release_mmaped_memory(addr, size);
1966     if (addr == vmi->addr && size == vmi->size) {
1967       remove_bookkeeping = true;
1968     } else {
1969       if (addr == vmi->addr && size < vmi->size) {
1970         // Chopped from head
1971         vmi->addr += size;
1972         vmi->size -= size;
1973       } else if (addr + size == vmi->addr + vmi->size) {
1974         // Chopped from tail
1975         vmi->size -= size;
1976       } else {
1977         // releasing a mapping in the middle of the original mapping:
1978         // For now we forbid this, since this is an invalid scenario
1979         // (the bookkeeping is easy enough to fix if needed but there
1980         //  is no use case for it; any occurrence is likely an error.
1981         ShouldNotReachHere();
1982       }
1983     }
1984   }
1985 
1986   // update bookkeeping
1987   if (rc && remove_bookkeeping) {
1988     vmembk_remove(vmi);
1989   }
1990 
1991   return rc;
1992 }
1993 
1994 static bool checked_mprotect(char* addr, size_t size, int prot) {
1995 
1996   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
1997   // not tell me if protection failed when trying to protect an un-protectable range.
1998   //
1999   // This means if the memory was allocated using shmget/shmat, protection won't work
2000   // but mprotect will still return 0:
2001   //
2002   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2003 
2004   Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
2005   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2006 
2007   if (!rc) {
2008     const char* const s_errno = os::errno_name(errno);
2009     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2010     return false;
2011   }
2012 
2013   // mprotect success check
2014   //
2015   // Mprotect said it changed the protection but can I believe it?
2016   //
2017   // To be sure I need to check the protection afterwards. Try to
2018   // read from protected memory and check whether that causes a segfault.
2019   //
2020   if (!os::Aix::xpg_sus_mode()) {
2021 
2022     const bool read_protected =
2023       (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2024        SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2025 
2026     if (prot & PROT_READ) {
2027       rc = !read_protected;
2028     } else {
2029       rc = read_protected;
2030     }
2031 
2032     if (!rc) {
2033       if (os::Aix::on_pase()) {
2034         // There is an issue on older PASE systems where mprotect() will return success but the
2035         // memory will not be protected.
2036         // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2037         // machines; we only see it rarely, when using mprotect() to protect the guard page of
2038         // a stack. It is an OS error.
2039         //
2040         // A valid strategy is just to try again. This usually works. :-/
2041 
2042         ::usleep(1000);
2043         Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
2044         if (::mprotect(addr, size, prot) == 0) {
2045           const bool read_protected_2 =
2046             (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2047             SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2048           rc = true;
2049         }
2050       }
2051     }
2052   }
2053 
2054   assert(rc == true, "mprotect failed.");
2055 
2056   return rc;
2057 }
2058 
2059 // Set protections specified
2060 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2061   unsigned int p = 0;
2062   switch (prot) {
2063   case MEM_PROT_NONE: p = PROT_NONE; break;
2064   case MEM_PROT_READ: p = PROT_READ; break;
2065   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2066   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2067   default:
2068     ShouldNotReachHere();
2069   }
2070   // is_committed is unused.
2071   return checked_mprotect(addr, size, p);
2072 }
2073 
2074 bool os::guard_memory(char* addr, size_t size) {
2075   return checked_mprotect(addr, size, PROT_NONE);
2076 }
2077 
2078 bool os::unguard_memory(char* addr, size_t size) {
2079   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2080 }
2081 
2082 // Large page support
2083 
2084 static size_t _large_page_size = 0;
2085 
2086 // Enable large page support if OS allows that.
2087 void os::large_page_init() {
2088   return; // Nothing to do. See query_multipage_support and friends.
2089 }
2090 
2091 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
2092   fatal("os::reserve_memory_special should not be called on AIX.");
2093   return nullptr;
2094 }
2095 
2096 bool os::pd_release_memory_special(char* base, size_t bytes) {
2097   fatal("os::release_memory_special should not be called on AIX.");
2098   return false;
2099 }
2100 
2101 size_t os::large_page_size() {
2102   return _large_page_size;
2103 }
2104 
2105 bool os::can_commit_large_page_memory() {
2106   // Does not matter, we do not support huge pages.
2107   return false;
2108 }
2109 
2110 bool os::can_execute_large_page_memory() {
2111   // Does not matter, we do not support huge pages.
2112   return false;
2113 }
2114 
2115 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
2116   assert(file_desc >= 0, "file_desc is not valid");
2117   char* result = nullptr;
2118 
2119   // Always round to os::vm_page_size(), which may be larger than 4K.
2120   bytes = align_up(bytes, os::vm_page_size());
2121   result = reserve_mmaped_memory(bytes, requested_addr);
2122 
2123   if (result != nullptr) {
2124     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
2125       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2126     }
2127   }
2128   return result;
2129 }
2130 
2131 // Reserve memory at an arbitrary address, only if that area is
2132 // available (and not reserved for something else).
2133 char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
2134   char* addr = nullptr;
2135 
2136   // Always round to os::vm_page_size(), which may be larger than 4K.
2137   bytes = align_up(bytes, os::vm_page_size());
2138 
2139   // In 4K mode always use mmap.
2140   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2141   if (os::vm_page_size() == 4*K) {
2142     return reserve_mmaped_memory(bytes, requested_addr);
2143   } else {
2144     if (bytes >= Use64KPagesThreshold) {
2145       return reserve_shmated_memory(bytes, requested_addr);
2146     } else {
2147       return reserve_mmaped_memory(bytes, requested_addr);
2148     }
2149   }
2150 
2151   return addr;
2152 }
2153 
2154 // Used to convert frequent JVM_Yield() to nops
2155 bool os::dont_yield() {
2156   return DontYieldALot;
2157 }
2158 
2159 void os::naked_yield() {
2160   sched_yield();
2161 }
2162 
2163 ////////////////////////////////////////////////////////////////////////////////
2164 // thread priority support
2165 
2166 // From AIX manpage to pthread_setschedparam
2167 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2168 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2169 //
2170 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2171 // range from 40 to 80, where 40 is the least favored priority and 80
2172 // is the most favored."
2173 //
2174 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2175 // scheduling there; however, this still leaves iSeries.)
2176 //
2177 // We use the same values for AIX and PASE.
2178 int os::java_to_os_priority[CriticalPriority + 1] = {
2179   54,             // 0 Entry should never be used
2180 
2181   55,             // 1 MinPriority
2182   55,             // 2
2183   56,             // 3
2184 
2185   56,             // 4
2186   57,             // 5 NormPriority
2187   57,             // 6
2188 
2189   58,             // 7
2190   58,             // 8
2191   59,             // 9 NearMaxPriority
2192 
2193   60,             // 10 MaxPriority
2194 
2195   60              // 11 CriticalPriority
2196 };
2197 
2198 static int prio_init() {
2199   if (ThreadPriorityPolicy == 1) {
2200     if (geteuid() != 0) {
2201       if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {
2202         warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
2203                 "e.g., being the root user. If the necessary permission is not " \
2204                 "possessed, changes to priority will be silently ignored.");
2205       }
2206     }
2207   }
2208   if (UseCriticalJavaThreadPriority) {
2209     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
2210   }
2211   return 0;
2212 }
2213 
2214 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2215   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
2216   pthread_t thr = thread->osthread()->pthread_id();
2217   int policy = SCHED_OTHER;
2218   struct sched_param param;
2219   param.sched_priority = newpri;
2220   int ret = pthread_setschedparam(thr, policy, &param);
2221 
2222   if (ret != 0) {
2223     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2224         (int)thr, newpri, ret, os::errno_name(ret));
2225   }
2226   return (ret == 0) ? OS_OK : OS_ERR;
2227 }
2228 
2229 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2230   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
2231     *priority_ptr = java_to_os_priority[NormPriority];
2232     return OS_OK;
2233   }
2234   pthread_t thr = thread->osthread()->pthread_id();
2235   int policy = SCHED_OTHER;
2236   struct sched_param param;
2237   int ret = pthread_getschedparam(thr, &policy, &param);
2238   *priority_ptr = param.sched_priority;
2239 
2240   return (ret == 0) ? OS_OK : OS_ERR;
2241 }
2242 
2243 // To install functions for atexit system call
2244 extern "C" {
2245   static void perfMemory_exit_helper() {
2246     perfMemory_exit();
2247   }
2248 }
2249 
2250 static void set_page_size(size_t page_size) {
2251   OSInfo::set_vm_page_size(page_size);
2252   OSInfo::set_vm_allocation_granularity(page_size);
2253 }
2254 
2255 // This is called _before_ the most of global arguments have been parsed.
2256 void os::init(void) {
2257   // This is basic, we want to know if that ever changes.
2258   // (Shared memory boundary is supposed to be a 256M aligned.)
2259   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
2260 
2261   // Record process break at startup.
2262   g_brk_at_startup = (address) ::sbrk(0);
2263   assert(g_brk_at_startup != (address) -1, "sbrk failed");
2264 
2265   // First off, we need to know whether we run on AIX or PASE, and
2266   // the OS level we run on.
2267   os::Aix::initialize_os_info();
2268 
2269   // Scan environment (SPEC1170 behaviour, etc).
2270   os::Aix::scan_environment();
2271 
2272   // Probe multipage support.
2273   query_multipage_support();
2274 
2275   // Act like we only have one page size by eliminating corner cases which
2276   // we did not support very well anyway.
2277   // We have two input conditions:
2278   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
2279   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
2280   //    setting.
2281   //    Data segment page size is important for us because it defines the thread stack page
2282   //    size, which is needed for guard page handling, stack banging etc.
2283   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
2284   //    and should be allocated with 64k pages.
2285   //
2286   // So, we do the following:
2287   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
2288   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
2289   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
2290   // 64k          no              --- AIX 5.2 ? ---
2291   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
2292 
2293   // We explicitly leave no option to change page size, because only upgrading would work,
2294   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
2295 
2296   if (g_multipage_support.datapsize == 4*K) {
2297     // datapsize = 4K. Data segment, thread stacks are 4K paged.
2298     if (g_multipage_support.can_use_64K_pages) {
2299       // .. but we are able to use 64K pages dynamically.
2300       // This would be typical for java launchers which are not linked
2301       // with datapsize=64K (like, any other launcher but our own).
2302       //
2303       // In this case it would be smart to allocate the java heap with 64K
2304       // to get the performance benefit, and to fake 64k pages for the
2305       // data segment (when dealing with thread stacks).
2306       //
2307       // However, leave a possibility to downgrade to 4K, using
2308       // -XX:-Use64KPages.
2309       if (Use64KPages) {
2310         trcVerbose("64K page mode (faked for data segment)");
2311         set_page_size(64*K);
2312       } else {
2313         trcVerbose("4K page mode (Use64KPages=off)");
2314         set_page_size(4*K);
2315       }
2316     } else {
2317       // .. and not able to allocate 64k pages dynamically. Here, just
2318       // fall back to 4K paged mode and use mmap for everything.
2319       trcVerbose("4K page mode");
2320       set_page_size(4*K);
2321       FLAG_SET_ERGO(Use64KPages, false);
2322     }
2323   } else {
2324     // datapsize = 64k. Data segment, thread stacks are 64k paged.
2325     // This normally means that we can allocate 64k pages dynamically.
2326     // (There is one special case where this may be false: EXTSHM=on.
2327     // but we decided to not support that mode).
2328     assert0(g_multipage_support.can_use_64K_pages);
2329     set_page_size(64*K);
2330     trcVerbose("64K page mode");
2331     FLAG_SET_ERGO(Use64KPages, true);
2332   }
2333 
2334   // For now UseLargePages is just ignored.
2335   FLAG_SET_ERGO(UseLargePages, false);
2336   _page_sizes.add(os::vm_page_size());
2337 
2338   // debug trace
2339   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
2340 
2341   // Next, we need to initialize libo4 and libperfstat libraries.
2342   if (os::Aix::on_pase()) {
2343     os::Aix::initialize_libo4();
2344   } else {
2345     os::Aix::initialize_libperfstat();
2346   }
2347 
2348   // Reset the perfstat information provided by ODM.
2349   if (os::Aix::on_aix()) {
2350     libperfstat::perfstat_reset();
2351   }
2352 
2353   // Now initialize basic system properties. Note that for some of the values we
2354   // need libperfstat etc.
2355   os::Aix::initialize_system_info();
2356 
2357   // _main_thread points to the thread that created/loaded the JVM.
2358   Aix::_main_thread = pthread_self();
2359 
2360   os::Posix::init();
2361 }
2362 
2363 // This is called _after_ the global arguments have been parsed.
2364 jint os::init_2(void) {
2365 
2366   // This could be set after os::Posix::init() but all platforms
2367   // have to set it the same so we have to mirror Solaris.
2368   DEBUG_ONLY(os::set_mutex_init_done();)
2369 
2370   os::Posix::init_2();
2371 
2372   if (os::Aix::on_pase()) {
2373     trcVerbose("Running on PASE.");
2374   } else {
2375     trcVerbose("Running on AIX (not PASE).");
2376   }
2377 
2378   trcVerbose("processor count: %d", os::_processor_count);
2379   trcVerbose("physical memory: %lu", Aix::_physical_memory);
2380 
2381   // Initially build up the loaded dll map.
2382   LoadedLibraries::reload();
2383   if (Verbose) {
2384     trcVerbose("Loaded Libraries: ");
2385     LoadedLibraries::print(tty);
2386   }
2387 
2388   if (PosixSignals::init() == JNI_ERR) {
2389     return JNI_ERR;
2390   }
2391 
2392   // Check and sets minimum stack sizes against command line options
2393   if (set_minimum_stack_sizes() == JNI_ERR) {
2394     return JNI_ERR;
2395   }
2396 
2397   // Not supported.
2398   FLAG_SET_ERGO(UseNUMA, false);
2399   FLAG_SET_ERGO(UseNUMAInterleaving, false);
2400 
2401   if (MaxFDLimit) {
2402     // Set the number of file descriptors to max. print out error
2403     // if getrlimit/setrlimit fails but continue regardless.
2404     struct rlimit nbr_files;
2405     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
2406     if (status != 0) {
2407       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
2408     } else {
2409       nbr_files.rlim_cur = nbr_files.rlim_max;
2410       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
2411       if (status != 0) {
2412         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
2413       }
2414     }
2415   }
2416 
2417   if (PerfAllowAtExitRegistration) {
2418     // Only register atexit functions if PerfAllowAtExitRegistration is set.
2419     // At exit functions can be delayed until process exit time, which
2420     // can be problematic for embedded VM situations. Embedded VMs should
2421     // call DestroyJavaVM() to assure that VM resources are released.
2422 
2423     // Note: perfMemory_exit_helper atexit function may be removed in
2424     // the future if the appropriate cleanup code can be added to the
2425     // VM_Exit VMOperation's doit method.
2426     if (atexit(perfMemory_exit_helper) != 0) {
2427       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
2428     }
2429   }
2430 
2431   // initialize thread priority policy
2432   prio_init();
2433 
2434   return JNI_OK;
2435 }
2436 
2437 int os::active_processor_count() {
2438   // User has overridden the number of active processors
2439   if (ActiveProcessorCount > 0) {
2440     log_trace(os)("active_processor_count: "
2441                   "active processor count set by user : %d",
2442                   ActiveProcessorCount);
2443     return ActiveProcessorCount;
2444   }
2445 
2446   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
2447   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
2448   return online_cpus;
2449 }
2450 
2451 void os::set_native_thread_name(const char *name) {
2452   // Not yet implemented.
2453   return;
2454 }
2455 
2456 ////////////////////////////////////////////////////////////////////////////////
2457 // debug support
2458 
2459 bool os::find(address addr, outputStream* st) {
2460 
2461   st->print(PTR_FORMAT ": ", addr);
2462 
2463   loaded_module_t lm;
2464   if (LoadedLibraries::find_for_text_address(addr, &lm) ||
2465       LoadedLibraries::find_for_data_address(addr, &lm)) {
2466     st->print_cr("%s", lm.path);
2467     return true;
2468   }
2469 
2470   return false;
2471 }
2472 
2473 ////////////////////////////////////////////////////////////////////////////////
2474 // misc
2475 
2476 // This does not do anything on Aix. This is basically a hook for being
2477 // able to use structured exception handling (thread-local exception filters)
2478 // on, e.g., Win32.
2479 void
2480 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
2481                          JavaCallArguments* args, JavaThread* thread) {
2482   f(value, method, args, thread);
2483 }
2484 
2485 // This code originates from JDK's sysOpen and open64_w
2486 // from src/solaris/hpi/src/system_md.c
2487 
2488 int os::open(const char *path, int oflag, int mode) {
2489 
2490   if (strlen(path) > MAX_PATH - 1) {
2491     errno = ENAMETOOLONG;
2492     return -1;
2493   }
2494   // AIX 7.X now supports O_CLOEXEC too, like modern Linux; but we have to be careful, see
2495   // IV90804: OPENING A FILE IN AFS WITH O_CLOEXEC FAILS WITH AN EINVAL ERROR APPLIES TO AIX 7100-04 17/04/14 PTF PECHANGE
2496   int oflag_with_o_cloexec = oflag | O_CLOEXEC;
2497 
2498   int fd = ::open64(path, oflag_with_o_cloexec, mode);
2499   if (fd == -1) {
2500     // we might fail in the open call when O_CLOEXEC is set, so try again without (see IV90804)
2501     fd = ::open64(path, oflag, mode);
2502     if (fd == -1) {
2503       return -1;
2504     }
2505   }
2506 
2507   // If the open succeeded, the file might still be a directory.
2508   {
2509     struct stat64 buf64;
2510     int ret = ::fstat64(fd, &buf64);
2511     int st_mode = buf64.st_mode;
2512 
2513     if (ret != -1) {
2514       if ((st_mode & S_IFMT) == S_IFDIR) {
2515         errno = EISDIR;
2516         ::close(fd);
2517         return -1;
2518       }
2519     } else {
2520       ::close(fd);
2521       return -1;
2522     }
2523   }
2524 
2525   // All file descriptors that are opened in the JVM and not
2526   // specifically destined for a subprocess should have the
2527   // close-on-exec flag set. If we don't set it, then careless 3rd
2528   // party native code might fork and exec without closing all
2529   // appropriate file descriptors (e.g. as we do in closeDescriptors in
2530   // UNIXProcess.c), and this in turn might:
2531   //
2532   // - cause end-of-file to fail to be detected on some file
2533   //   descriptors, resulting in mysterious hangs, or
2534   //
2535   // - might cause an fopen in the subprocess to fail on a system
2536   //   suffering from bug 1085341.
2537 
2538   // Validate that the use of the O_CLOEXEC flag on open above worked.
2539   static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
2540   if (O_CLOEXEC_is_known_to_work == 0) {
2541     int flags = ::fcntl(fd, F_GETFD);
2542     if (flags != -1) {
2543       if ((flags & FD_CLOEXEC) != 0) {
2544         O_CLOEXEC_is_known_to_work = 1;
2545       } else { // it does not work
2546         ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
2547         O_CLOEXEC_is_known_to_work = -1;
2548       }
2549     }
2550   } else if (O_CLOEXEC_is_known_to_work == -1) {
2551     int flags = ::fcntl(fd, F_GETFD);
2552     if (flags != -1) {
2553       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
2554     }
2555   }
2556 
2557   return fd;
2558 }
2559 
2560 // create binary file, rewriting existing file if required
2561 int os::create_binary_file(const char* path, bool rewrite_existing) {
2562   int oflags = O_WRONLY | O_CREAT;
2563   oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
2564   return ::open64(path, oflags, S_IREAD | S_IWRITE);
2565 }
2566 
2567 // return current position of file pointer
2568 jlong os::current_file_offset(int fd) {
2569   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
2570 }
2571 
2572 // move file pointer to the specified offset
2573 jlong os::seek_to_file_offset(int fd, jlong offset) {
2574   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
2575 }
2576 
2577 // Map a block of memory.
2578 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
2579                         char *addr, size_t bytes, bool read_only,
2580                         bool allow_exec) {
2581   int prot;
2582   int flags = MAP_PRIVATE;
2583 
2584   if (read_only) {
2585     prot = PROT_READ;
2586     flags = MAP_SHARED;
2587   } else {
2588     prot = PROT_READ | PROT_WRITE;
2589     flags = MAP_PRIVATE;
2590   }
2591 
2592   if (allow_exec) {
2593     prot |= PROT_EXEC;
2594   }
2595 
2596   if (addr != nullptr) {
2597     flags |= MAP_FIXED;
2598   }
2599 
2600   // Allow anonymous mappings if 'fd' is -1.
2601   if (fd == -1) {
2602     flags |= MAP_ANONYMOUS;
2603   }
2604 
2605   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
2606                                      fd, file_offset);
2607   if (mapped_address == MAP_FAILED) {
2608     return nullptr;
2609   }
2610   return mapped_address;
2611 }
2612 
2613 // Remap a block of memory.
2614 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
2615                           char *addr, size_t bytes, bool read_only,
2616                           bool allow_exec) {
2617   // same as map_memory() on this OS
2618   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
2619                         allow_exec);
2620 }
2621 
2622 // Unmap a block of memory.
2623 bool os::pd_unmap_memory(char* addr, size_t bytes) {
2624   return munmap(addr, bytes) == 0;
2625 }
2626 
2627 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
2628 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
2629 // of a thread.
2630 //
2631 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
2632 // the fast estimate available on the platform.
2633 
2634 jlong os::current_thread_cpu_time() {
2635   // return user + sys since the cost is the same
2636   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
2637   assert(n >= 0, "negative CPU time");
2638   return n;
2639 }
2640 
2641 jlong os::thread_cpu_time(Thread* thread) {
2642   // consistent with what current_thread_cpu_time() returns
2643   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
2644   assert(n >= 0, "negative CPU time");
2645   return n;
2646 }
2647 
2648 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
2649   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
2650   assert(n >= 0, "negative CPU time");
2651   return n;
2652 }
2653 
2654 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
2655   bool error = false;
2656 
2657   jlong sys_time = 0;
2658   jlong user_time = 0;
2659 
2660   // Reimplemented using getthrds64().
2661   //
2662   // Works like this:
2663   // For the thread in question, get the kernel thread id. Then get the
2664   // kernel thread statistics using that id.
2665   //
2666   // This only works of course when no pthread scheduling is used,
2667   // i.e. there is a 1:1 relationship to kernel threads.
2668   // On AIX, see AIXTHREAD_SCOPE variable.
2669 
2670   pthread_t pthtid = thread->osthread()->pthread_id();
2671 
2672   // retrieve kernel thread id for the pthread:
2673   tid64_t tid = 0;
2674   struct __pthrdsinfo pinfo;
2675   // I just love those otherworldly IBM APIs which force me to hand down
2676   // dummy buffers for stuff I dont care for...
2677   char dummy[1];
2678   int dummy_size = sizeof(dummy);
2679   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
2680                           dummy, &dummy_size) == 0) {
2681     tid = pinfo.__pi_tid;
2682   } else {
2683     tty->print_cr("pthread_getthrds_np failed.");
2684     error = true;
2685   }
2686 
2687   // retrieve kernel timing info for that kernel thread
2688   if (!error) {
2689     struct thrdentry64 thrdentry;
2690     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
2691       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
2692       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
2693     } else {
2694       tty->print_cr("pthread_getthrds_np failed.");
2695       error = true;
2696     }
2697   }
2698 
2699   if (p_sys_time) {
2700     *p_sys_time = sys_time;
2701   }
2702 
2703   if (p_user_time) {
2704     *p_user_time = user_time;
2705   }
2706 
2707   if (error) {
2708     return false;
2709   }
2710 
2711   return true;
2712 }
2713 
2714 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
2715   jlong sys_time;
2716   jlong user_time;
2717 
2718   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
2719     return -1;
2720   }
2721 
2722   return user_sys_cpu_time ? sys_time + user_time : user_time;
2723 }
2724 
2725 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
2726   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
2727   info_ptr->may_skip_backward = false;     // elapsed time not wall time
2728   info_ptr->may_skip_forward = false;      // elapsed time not wall time
2729   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
2730 }
2731 
2732 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
2733   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
2734   info_ptr->may_skip_backward = false;     // elapsed time not wall time
2735   info_ptr->may_skip_forward = false;      // elapsed time not wall time
2736   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
2737 }
2738 
2739 bool os::is_thread_cpu_time_supported() {
2740   return true;
2741 }
2742 
2743 // System loadavg support. Returns -1 if load average cannot be obtained.
2744 // For now just return the system wide load average (no processor sets).
2745 int os::loadavg(double values[], int nelem) {
2746 
2747   guarantee(nelem >= 0 && nelem <= 3, "argument error");
2748   guarantee(values, "argument error");
2749 
2750   if (os::Aix::on_pase()) {
2751 
2752     // AS/400 PASE: use libo4 porting library
2753     double v[3] = { 0.0, 0.0, 0.0 };
2754 
2755     if (libo4::get_load_avg(v, v + 1, v + 2)) {
2756       for (int i = 0; i < nelem; i ++) {
2757         values[i] = v[i];
2758       }
2759       return nelem;
2760     } else {
2761       return -1;
2762     }
2763 
2764   } else {
2765 
2766     // AIX: use libperfstat
2767     libperfstat::cpuinfo_t ci;
2768     if (libperfstat::get_cpuinfo(&ci)) {
2769       for (int i = 0; i < nelem; i++) {
2770         values[i] = ci.loadavg[i];
2771       }
2772     } else {
2773       return -1;
2774     }
2775     return nelem;
2776   }
2777 }
2778 
2779 bool os::is_primordial_thread(void) {
2780   if (pthread_self() == (pthread_t)1) {
2781     return true;
2782   } else {
2783     return false;
2784   }
2785 }
2786 
2787 // OS recognitions (PASE/AIX, OS level) call this before calling any
2788 // one of Aix::on_pase(), Aix::os_version() static
2789 void os::Aix::initialize_os_info() {
2790 
2791   assert(_on_pase == -1 && _os_version == 0, "already called.");
2792 
2793   struct utsname uts;
2794   memset(&uts, 0, sizeof(uts));
2795   strcpy(uts.sysname, "?");
2796   if (::uname(&uts) == -1) {
2797     trcVerbose("uname failed (%d)", errno);
2798     guarantee(0, "Could not determine whether we run on AIX or PASE");
2799   } else {
2800     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
2801                "node \"%s\" machine \"%s\"\n",
2802                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
2803     const int major = atoi(uts.version);
2804     assert(major > 0, "invalid OS version");
2805     const int minor = atoi(uts.release);
2806     assert(minor > 0, "invalid OS release");
2807     _os_version = (major << 24) | (minor << 16);
2808     char ver_str[20] = {0};
2809     const char* name_str = "unknown OS";
2810     if (strcmp(uts.sysname, "OS400") == 0) {
2811       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
2812       _on_pase = 1;
2813       if (os_version_short() < 0x0504) {
2814         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
2815         assert(false, "OS/400 release too old.");
2816       }
2817       name_str = "OS/400 (pase)";
2818       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
2819     } else if (strcmp(uts.sysname, "AIX") == 0) {
2820       // We run on AIX. We do not support versions older than AIX 7.1.
2821       _on_pase = 0;
2822       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
2823       odmWrapper::determine_os_kernel_version(&_os_version);
2824       if (os_version_short() < 0x0701) {
2825         trcVerbose("AIX releases older than AIX 7.1 are not supported.");
2826         assert(false, "AIX release too old.");
2827       }
2828       name_str = "AIX";
2829       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
2830                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
2831     } else {
2832       assert(false, "%s", name_str);
2833     }
2834     trcVerbose("We run on %s %s", name_str, ver_str);
2835   }
2836 
2837   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
2838 } // end: os::Aix::initialize_os_info()
2839 
2840 // Scan environment for important settings which might effect the VM.
2841 // Trace out settings. Warn about invalid settings and/or correct them.
2842 //
2843 // Must run after os::Aix::initialue_os_info().
2844 void os::Aix::scan_environment() {
2845 
2846   char* p;
2847   int rc;
2848 
2849   // Warn explicitly if EXTSHM=ON is used. That switch changes how
2850   // System V shared memory behaves. One effect is that page size of
2851   // shared memory cannot be change dynamically, effectivly preventing
2852   // large pages from working.
2853   // This switch was needed on AIX 32bit, but on AIX 64bit the general
2854   // recommendation is (in OSS notes) to switch it off.
2855   p = ::getenv("EXTSHM");
2856   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
2857   if (p && strcasecmp(p, "ON") == 0) {
2858     _extshm = 1;
2859     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
2860     if (!AllowExtshm) {
2861       // We allow under certain conditions the user to continue. However, we want this
2862       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
2863       // that the VM is not able to allocate 64k pages for the heap.
2864       // We do not want to run with reduced performance.
2865       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
2866     }
2867   } else {
2868     _extshm = 0;
2869   }
2870 
2871   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
2872   // Not tested, not supported.
2873   //
2874   // Note that it might be worth the trouble to test and to require it, if only to
2875   // get useful return codes for mprotect.
2876   //
2877   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
2878   // exec() ? before loading the libjvm ? ....)
2879   p = ::getenv("XPG_SUS_ENV");
2880   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
2881   if (p && strcmp(p, "ON") == 0) {
2882     _xpg_sus_mode = 1;
2883     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
2884     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
2885     // clobber address ranges. If we ever want to support that, we have to do some
2886     // testing first.
2887     guarantee(false, "XPG_SUS_ENV=ON not supported");
2888   } else {
2889     _xpg_sus_mode = 0;
2890   }
2891 
2892   if (os::Aix::on_pase()) {
2893     p = ::getenv("QIBM_MULTI_THREADED");
2894     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
2895   }
2896 
2897   p = ::getenv("LDR_CNTRL");
2898   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
2899   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
2900     if (p && ::strstr(p, "TEXTPSIZE")) {
2901       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
2902         "you may experience hangs or crashes on OS/400 V7R1.");
2903     }
2904   }
2905 
2906   p = ::getenv("AIXTHREAD_GUARDPAGES");
2907   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
2908 
2909 } // end: os::Aix::scan_environment()
2910 
2911 // PASE: initialize the libo4 library (PASE porting library).
2912 void os::Aix::initialize_libo4() {
2913   guarantee(os::Aix::on_pase(), "OS/400 only.");
2914   if (!libo4::init()) {
2915     trcVerbose("libo4 initialization failed.");
2916     assert(false, "libo4 initialization failed");
2917   } else {
2918     trcVerbose("libo4 initialized.");
2919   }
2920 }
2921 
2922 // AIX: initialize the libperfstat library.
2923 void os::Aix::initialize_libperfstat() {
2924   assert(os::Aix::on_aix(), "AIX only");
2925   if (!libperfstat::init()) {
2926     trcVerbose("libperfstat initialization failed.");
2927     assert(false, "libperfstat initialization failed");
2928   } else {
2929     trcVerbose("libperfstat initialized.");
2930   }
2931 }
2932 
2933 /////////////////////////////////////////////////////////////////////////////
2934 // thread stack
2935 
2936 // Get the current stack base from the OS (actually, the pthread library).
2937 // Note: usually not page aligned.
2938 address os::current_stack_base() {
2939   AixMisc::stackbounds_t bounds;
2940   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
2941   guarantee(rc, "Unable to retrieve stack bounds.");
2942   return bounds.base;
2943 }
2944 
2945 // Get the current stack size from the OS (actually, the pthread library).
2946 // Returned size is such that (base - size) is always aligned to page size.
2947 size_t os::current_stack_size() {
2948   AixMisc::stackbounds_t bounds;
2949   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
2950   guarantee(rc, "Unable to retrieve stack bounds.");
2951   // Align the returned stack size such that the stack low address
2952   // is aligned to page size (Note: base is usually not and we do not care).
2953   // We need to do this because caller code will assume stack low address is
2954   // page aligned and will place guard pages without checking.
2955   address low = bounds.base - bounds.size;
2956   address low_aligned = (address)align_up(low, os::vm_page_size());
2957   size_t s = bounds.base - low_aligned;
2958   return s;
2959 }
2960 
2961 // Get the default path to the core file
2962 // Returns the length of the string
2963 int os::get_core_path(char* buffer, size_t bufferSize) {
2964   const char* p = get_current_directory(buffer, bufferSize);
2965 
2966   if (p == nullptr) {
2967     assert(p != nullptr, "failed to get current directory");
2968     return 0;
2969   }
2970 
2971   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
2972                                                p, current_process_id());
2973 
2974   return strlen(buffer);
2975 }
2976 
2977 bool os::start_debugging(char *buf, int buflen) {
2978   int len = (int)strlen(buf);
2979   char *p = &buf[len];
2980 
2981   jio_snprintf(p, buflen -len,
2982                  "\n\n"
2983                  "Do you want to debug the problem?\n\n"
2984                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
2985                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
2986                  "Otherwise, press RETURN to abort...",
2987                  os::current_process_id(),
2988                  os::current_thread_id(), thread_self());
2989 
2990   bool yes = os::message_box("Unexpected Error", buf);
2991 
2992   if (yes) {
2993     // yes, user asked VM to launch debugger
2994     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
2995 
2996     os::fork_and_exec(buf);
2997     yes = false;
2998   }
2999   return yes;
3000 }
3001 
3002 static inline time_t get_mtime(const char* filename) {
3003   struct stat st;
3004   int ret = os::stat(filename, &st);
3005   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
3006   return st.st_mtime;
3007 }
3008 
3009 int os::compare_file_modified_times(const char* file1, const char* file2) {
3010   time_t t1 = get_mtime(file1);
3011   time_t t2 = get_mtime(file2);
3012   return t1 - t2;
3013 }
3014 
3015 bool os::supports_map_sync() {
3016   return false;
3017 }
3018 
3019 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {}
3020 
3021 #if INCLUDE_JFR
3022 
3023 void os::jfr_report_memory_info() {}
3024 
3025 #endif // INCLUDE_JFR
3026