1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvmtifiles/jvmti.h"
  39 #include "logging/log.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/globals_extension.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safefetch.inline.hpp"
  60 #include "runtime/safepointMechanism.hpp"
  61 #include "runtime/semaphore.inline.hpp"
  62 #include "runtime/sharedRuntime.hpp"
  63 #include "runtime/statSampler.hpp"
  64 #include "runtime/thread.inline.hpp"
  65 #include "runtime/threadCritical.hpp"
  66 #include "runtime/timer.hpp"
  67 #include "runtime/vm_version.hpp"
  68 #include "services/attachListener.hpp"
  69 #include "services/memTracker.hpp"
  70 #include "services/runtimeService.hpp"
  71 #include "utilities/align.hpp"
  72 #include "utilities/decoder.hpp"
  73 #include "utilities/defaultStream.hpp"
  74 #include "utilities/events.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/vmError.hpp"
  77 #include "symbolengine.hpp"
  78 #include "windbghelp.hpp"
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 #include <windows.h>
  85 #include <sys/types.h>
  86 #include <sys/stat.h>
  87 #include <sys/timeb.h>
  88 #include <objidl.h>
  89 #include <shlobj.h>
  90 
  91 #include <malloc.h>
  92 #include <signal.h>
  93 #include <direct.h>
  94 #include <errno.h>
  95 #include <fcntl.h>
  96 #include <io.h>
  97 #include <process.h>              // For _beginthreadex(), _endthreadex()
  98 #include <imagehlp.h>             // For os::dll_address_to_function_name
  99 // for enumerating dll libraries
 100 #include <vdmdbg.h>
 101 #include <psapi.h>
 102 #include <mmsystem.h>
 103 #include <winsock2.h>
 104 
 105 // for timer info max values which include all bits
 106 #define ALL_64_BITS CONST64(-1)
 107 
 108 // For DLL loading/load error detection
 109 // Values of PE COFF
 110 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 111 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 112 
 113 static HANDLE main_process;
 114 static HANDLE main_thread;
 115 static int    main_thread_id;
 116 
 117 static FILETIME process_creation_time;
 118 static FILETIME process_exit_time;
 119 static FILETIME process_user_time;
 120 static FILETIME process_kernel_time;
 121 
 122 #if defined(_M_ARM64)
 123   #define __CPU__ aarch64
 124 #elif defined(_M_AMD64)
 125   #define __CPU__ amd64
 126 #else
 127   #define __CPU__ i486
 128 #endif
 129 
 130 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
 131 PVOID  topLevelVectoredExceptionHandler = NULL;
 132 LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
 133 #endif
 134 
 135 // save DLL module handle, used by GetModuleFileName
 136 
 137 HINSTANCE vm_lib_handle;
 138 
 139 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 140   switch (reason) {
 141   case DLL_PROCESS_ATTACH:
 142     vm_lib_handle = hinst;
 143     if (ForceTimeHighResolution) {
 144       timeBeginPeriod(1L);
 145     }
 146     WindowsDbgHelp::pre_initialize();
 147     SymbolEngine::pre_initialize();
 148     break;
 149   case DLL_PROCESS_DETACH:
 150     if (ForceTimeHighResolution) {
 151       timeEndPeriod(1L);
 152     }
 153 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
 154     if (topLevelVectoredExceptionHandler != NULL) {
 155       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 156       topLevelVectoredExceptionHandler = NULL;
 157     }
 158 #endif
 159     break;
 160   default:
 161     break;
 162   }
 163   return true;
 164 }
 165 
 166 static inline double fileTimeAsDouble(FILETIME* time) {
 167   const double high  = (double) ((unsigned int) ~0);
 168   const double split = 10000000.0;
 169   double result = (time->dwLowDateTime / split) +
 170                    time->dwHighDateTime * (high/split);
 171   return result;
 172 }
 173 
 174 // Implementation of os
 175 
 176 #define RANGE_FORMAT                "[" PTR_FORMAT "-" PTR_FORMAT ")"
 177 #define RANGE_FORMAT_ARGS(p, len)   p2i(p), p2i((address)p + len)
 178 
 179 // A number of wrappers for more frequently used system calls, to add standard logging.
 180 
 181 struct PreserveLastError {
 182   const DWORD v;
 183   PreserveLastError() : v(::GetLastError()) {}
 184   ~PreserveLastError() { ::SetLastError(v); }
 185 };
 186 
 187 // Logging wrapper for VirtualAlloc
 188 static LPVOID virtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
 189   LPVOID result = ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
 190   if (result != NULL) {
 191     log_trace(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) returned " PTR_FORMAT "%s.",
 192                   p2i(lpAddress), dwSize, flAllocationType, flProtect, p2i(result),
 193                   ((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
 194   } else {
 195     PreserveLastError ple;
 196     log_info(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) failed (%u).",
 197                   p2i(lpAddress), dwSize, flAllocationType, flProtect, ple.v);
 198   }
 199   return result;
 200 }
 201 
 202 // Logging wrapper for VirtualFree
 203 static BOOL virtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD  dwFreeType) {
 204   BOOL result = ::VirtualFree(lpAddress, dwSize, dwFreeType);
 205   if (result != FALSE) {
 206     log_trace(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) succeeded",
 207                   p2i(lpAddress), dwSize, dwFreeType);
 208   } else {
 209     PreserveLastError ple;
 210     log_info(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) failed (%u).",
 211                  p2i(lpAddress), dwSize, dwFreeType, ple.v);
 212   }
 213   return result;
 214 }
 215 
 216 // Logging wrapper for VirtualAllocExNuma
 217 static LPVOID virtualAllocExNuma(HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD  flAllocationType,
 218                                  DWORD  flProtect, DWORD  nndPreferred) {
 219   LPVOID result = ::VirtualAllocExNuma(hProcess, lpAddress, dwSize, flAllocationType, flProtect, nndPreferred);
 220   if (result != NULL) {
 221     log_trace(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) returned " PTR_FORMAT "%s.",
 222                   p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, p2i(result),
 223                   ((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
 224   } else {
 225     PreserveLastError ple;
 226     log_info(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) failed (%u).",
 227                  p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, ple.v);
 228   }
 229   return result;
 230 }
 231 
 232 // Logging wrapper for MapViewOfFileEx
 233 static LPVOID mapViewOfFileEx(HANDLE hFileMappingObject, DWORD  dwDesiredAccess, DWORD  dwFileOffsetHigh,
 234                               DWORD  dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap, LPVOID lpBaseAddress) {
 235   LPVOID result = ::MapViewOfFileEx(hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh,
 236                                     dwFileOffsetLow, dwNumberOfBytesToMap, lpBaseAddress);
 237   if (result != NULL) {
 238     log_trace(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") returned " PTR_FORMAT "%s.",
 239                   p2i(lpBaseAddress), dwNumberOfBytesToMap, p2i(result),
 240                   ((lpBaseAddress != NULL && result != lpBaseAddress) ? " <different base!>" : ""));
 241   } else {
 242     PreserveLastError ple;
 243     log_info(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") failed (%u).",
 244                  p2i(lpBaseAddress), dwNumberOfBytesToMap, ple.v);
 245   }
 246   return result;
 247 }
 248 
 249 // Logging wrapper for UnmapViewOfFile
 250 static BOOL unmapViewOfFile(LPCVOID lpBaseAddress) {
 251   BOOL result = ::UnmapViewOfFile(lpBaseAddress);
 252   if (result != FALSE) {
 253     log_trace(os)("UnmapViewOfFile(" PTR_FORMAT ") succeeded", p2i(lpBaseAddress));
 254   } else {
 255     PreserveLastError ple;
 256     log_info(os)("UnmapViewOfFile(" PTR_FORMAT ") failed (%u).",  p2i(lpBaseAddress), ple.v);
 257   }
 258   return result;
 259 }
 260 
 261 char** os::get_environ() { return _environ; }
 262 
 263 // No setuid programs under Windows.
 264 bool os::have_special_privileges() {
 265   return false;
 266 }
 267 
 268 
 269 // This method is  a periodic task to check for misbehaving JNI applications
 270 // under CheckJNI, we can add any periodic checks here.
 271 // For Windows at the moment does nothing
 272 void os::run_periodic_checks() {
 273   return;
 274 }
 275 
 276 // previous UnhandledExceptionFilter, if there is one
 277 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 278 
 279 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 280 
 281 void os::init_system_properties_values() {
 282   // sysclasspath, java_home, dll_dir
 283   {
 284     char *home_path;
 285     char *dll_path;
 286     char *pslash;
 287     const char *bin = "\\bin";
 288     char home_dir[MAX_PATH + 1];
 289     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 290 
 291     if (alt_home_dir != NULL)  {
 292       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 293       home_dir[MAX_PATH] = '\0';
 294     } else {
 295       os::jvm_path(home_dir, sizeof(home_dir));
 296       // Found the full path to jvm.dll.
 297       // Now cut the path to <java_home>/jre if we can.
 298       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 299       pslash = strrchr(home_dir, '\\');
 300       if (pslash != NULL) {
 301         *pslash = '\0';                   // get rid of \{client|server}
 302         pslash = strrchr(home_dir, '\\');
 303         if (pslash != NULL) {
 304           *pslash = '\0';                 // get rid of \bin
 305         }
 306       }
 307     }
 308 
 309     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 310     strcpy(home_path, home_dir);
 311     Arguments::set_java_home(home_path);
 312     FREE_C_HEAP_ARRAY(char, home_path);
 313 
 314     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 315                                 mtInternal);
 316     strcpy(dll_path, home_dir);
 317     strcat(dll_path, bin);
 318     Arguments::set_dll_dir(dll_path);
 319     FREE_C_HEAP_ARRAY(char, dll_path);
 320 
 321     if (!set_boot_path('\\', ';')) {
 322       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 323     }
 324   }
 325 
 326 // library_path
 327 #define EXT_DIR "\\lib\\ext"
 328 #define BIN_DIR "\\bin"
 329 #define PACKAGE_DIR "\\Sun\\Java"
 330   {
 331     // Win32 library search order (See the documentation for LoadLibrary):
 332     //
 333     // 1. The directory from which application is loaded.
 334     // 2. The system wide Java Extensions directory (Java only)
 335     // 3. System directory (GetSystemDirectory)
 336     // 4. Windows directory (GetWindowsDirectory)
 337     // 5. The PATH environment variable
 338     // 6. The current directory
 339 
 340     char *library_path;
 341     char tmp[MAX_PATH];
 342     char *path_str = ::getenv("PATH");
 343 
 344     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 345                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 346 
 347     library_path[0] = '\0';
 348 
 349     GetModuleFileName(NULL, tmp, sizeof(tmp));
 350     *(strrchr(tmp, '\\')) = '\0';
 351     strcat(library_path, tmp);
 352 
 353     GetWindowsDirectory(tmp, sizeof(tmp));
 354     strcat(library_path, ";");
 355     strcat(library_path, tmp);
 356     strcat(library_path, PACKAGE_DIR BIN_DIR);
 357 
 358     GetSystemDirectory(tmp, sizeof(tmp));
 359     strcat(library_path, ";");
 360     strcat(library_path, tmp);
 361 
 362     GetWindowsDirectory(tmp, sizeof(tmp));
 363     strcat(library_path, ";");
 364     strcat(library_path, tmp);
 365 
 366     if (path_str) {
 367       strcat(library_path, ";");
 368       strcat(library_path, path_str);
 369     }
 370 
 371     strcat(library_path, ";.");
 372 
 373     Arguments::set_library_path(library_path);
 374     FREE_C_HEAP_ARRAY(char, library_path);
 375   }
 376 
 377   // Default extensions directory
 378   {
 379     char path[MAX_PATH];
 380     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 381     GetWindowsDirectory(path, MAX_PATH);
 382     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 383             path, PACKAGE_DIR, EXT_DIR);
 384     Arguments::set_ext_dirs(buf);
 385   }
 386   #undef EXT_DIR
 387   #undef BIN_DIR
 388   #undef PACKAGE_DIR
 389 
 390 #ifndef _WIN64
 391   // set our UnhandledExceptionFilter and save any previous one
 392   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 393 #endif
 394 
 395   // Done
 396   return;
 397 }
 398 
 399 void os::breakpoint() {
 400   DebugBreak();
 401 }
 402 
 403 // Invoked from the BREAKPOINT Macro
 404 extern "C" void breakpoint() {
 405   os::breakpoint();
 406 }
 407 
 408 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 409 // So far, this method is only used by Native Memory Tracking, which is
 410 // only supported on Windows XP or later.
 411 //
 412 int os::get_native_stack(address* stack, int frames, int toSkip) {
 413   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 414   for (int index = captured; index < frames; index ++) {
 415     stack[index] = NULL;
 416   }
 417   return captured;
 418 }
 419 
 420 // os::current_stack_base()
 421 //
 422 //   Returns the base of the stack, which is the stack's
 423 //   starting address.  This function must be called
 424 //   while running on the stack of the thread being queried.
 425 
 426 address os::current_stack_base() {
 427   MEMORY_BASIC_INFORMATION minfo;
 428   address stack_bottom;
 429   size_t stack_size;
 430 
 431   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 432   stack_bottom =  (address)minfo.AllocationBase;
 433   stack_size = minfo.RegionSize;
 434 
 435   // Add up the sizes of all the regions with the same
 436   // AllocationBase.
 437   while (1) {
 438     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 439     if (stack_bottom == (address)minfo.AllocationBase) {
 440       stack_size += minfo.RegionSize;
 441     } else {
 442       break;
 443     }
 444   }
 445   return stack_bottom + stack_size;
 446 }
 447 
 448 size_t os::current_stack_size() {
 449   size_t sz;
 450   MEMORY_BASIC_INFORMATION minfo;
 451   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 452   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 453   return sz;
 454 }
 455 
 456 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 457   MEMORY_BASIC_INFORMATION minfo;
 458   committed_start = NULL;
 459   committed_size = 0;
 460   address top = start + size;
 461   const address start_addr = start;
 462   while (start < top) {
 463     VirtualQuery(start, &minfo, sizeof(minfo));
 464     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 465       if (committed_start != NULL) {
 466         break;
 467       }
 468     } else {  // committed
 469       if (committed_start == NULL) {
 470         committed_start = start;
 471       }
 472       size_t offset = start - (address)minfo.BaseAddress;
 473       committed_size += minfo.RegionSize - offset;
 474     }
 475     start = (address)minfo.BaseAddress + minfo.RegionSize;
 476   }
 477 
 478   if (committed_start == NULL) {
 479     assert(committed_size == 0, "Sanity");
 480     return false;
 481   } else {
 482     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 483     // current region may go beyond the limit, trim to the limit
 484     committed_size = MIN2(committed_size, size_t(top - committed_start));
 485     return true;
 486   }
 487 }
 488 
 489 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 490   const struct tm* time_struct_ptr = localtime(clock);
 491   if (time_struct_ptr != NULL) {
 492     *res = *time_struct_ptr;
 493     return res;
 494   }
 495   return NULL;
 496 }
 497 
 498 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 499   const struct tm* time_struct_ptr = gmtime(clock);
 500   if (time_struct_ptr != NULL) {
 501     *res = *time_struct_ptr;
 502     return res;
 503   }
 504   return NULL;
 505 }
 506 
 507 JNIEXPORT
 508 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 509 
 510 // Thread start routine for all newly created threads
 511 static unsigned __stdcall thread_native_entry(Thread* thread) {
 512 
 513   thread->record_stack_base_and_size();
 514   thread->initialize_thread_current();
 515 
 516   OSThread* osthr = thread->osthread();
 517   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 518 
 519   if (UseNUMA) {
 520     int lgrp_id = os::numa_get_group_id();
 521     if (lgrp_id != -1) {
 522       thread->set_lgrp_id(lgrp_id);
 523     }
 524   }
 525 
 526   // Diagnostic code to investigate JDK-6573254
 527   int res = 30115;  // non-java thread
 528   if (thread->is_Java_thread()) {
 529     res = 20115;    // java thread
 530   }
 531 
 532   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 533 
 534 #ifdef USE_VECTORED_EXCEPTION_HANDLING
 535   // Any exception is caught by the Vectored Exception Handler, so VM can
 536   // generate error dump when an exception occurred in non-Java thread
 537   // (e.g. VM thread).
 538   thread->call_run();
 539 #else
 540   // Install a win32 structured exception handler around every thread created
 541   // by VM, so VM can generate error dump when an exception occurred in non-
 542   // Java thread (e.g. VM thread).
 543   __try {
 544     thread->call_run();
 545   } __except(topLevelExceptionFilter(
 546                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 547     // Nothing to do.
 548   }
 549 #endif
 550 
 551   // Note: at this point the thread object may already have deleted itself.
 552   // Do not dereference it from here on out.
 553 
 554   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 555 
 556   // One less thread is executing
 557   // When the VMThread gets here, the main thread may have already exited
 558   // which frees the CodeHeap containing the Atomic::add code
 559   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 560     Atomic::dec(&os::win32::_os_thread_count);
 561   }
 562 
 563   // Thread must not return from exit_process_or_thread(), but if it does,
 564   // let it proceed to exit normally
 565   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 566 }
 567 
 568 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 569                                   int thread_id) {
 570   // Allocate the OSThread object
 571   OSThread* osthread = new OSThread(NULL, NULL);
 572   if (osthread == NULL) return NULL;
 573 
 574   // Initialize the JDK library's interrupt event.
 575   // This should really be done when OSThread is constructed,
 576   // but there is no way for a constructor to report failure to
 577   // allocate the event.
 578   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 579   if (interrupt_event == NULL) {
 580     delete osthread;
 581     return NULL;
 582   }
 583   osthread->set_interrupt_event(interrupt_event);
 584 
 585   // Store info on the Win32 thread into the OSThread
 586   osthread->set_thread_handle(thread_handle);
 587   osthread->set_thread_id(thread_id);
 588 
 589   if (UseNUMA) {
 590     int lgrp_id = os::numa_get_group_id();
 591     if (lgrp_id != -1) {
 592       thread->set_lgrp_id(lgrp_id);
 593     }
 594   }
 595 
 596   // Initial thread state is INITIALIZED, not SUSPENDED
 597   osthread->set_state(INITIALIZED);
 598 
 599   return osthread;
 600 }
 601 
 602 
 603 bool os::create_attached_thread(JavaThread* thread) {
 604 #ifdef ASSERT
 605   thread->verify_not_published();
 606 #endif
 607   HANDLE thread_h;
 608   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 609                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 610     fatal("DuplicateHandle failed\n");
 611   }
 612   OSThread* osthread = create_os_thread(thread, thread_h,
 613                                         (int)current_thread_id());
 614   if (osthread == NULL) {
 615     return false;
 616   }
 617 
 618   // Initial thread state is RUNNABLE
 619   osthread->set_state(RUNNABLE);
 620 
 621   thread->set_osthread(osthread);
 622 
 623   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 624     os::current_thread_id());
 625 
 626   return true;
 627 }
 628 
 629 bool os::create_main_thread(JavaThread* thread) {
 630 #ifdef ASSERT
 631   thread->verify_not_published();
 632 #endif
 633   if (_starting_thread == NULL) {
 634     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 635     if (_starting_thread == NULL) {
 636       return false;
 637     }
 638   }
 639 
 640   // The primordial thread is runnable from the start)
 641   _starting_thread->set_state(RUNNABLE);
 642 
 643   thread->set_osthread(_starting_thread);
 644   return true;
 645 }
 646 
 647 // Helper function to trace _beginthreadex attributes,
 648 //  similar to os::Posix::describe_pthread_attr()
 649 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 650                                                size_t stacksize, unsigned initflag) {
 651   stringStream ss(buf, buflen);
 652   if (stacksize == 0) {
 653     ss.print("stacksize: default, ");
 654   } else {
 655     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 656   }
 657   ss.print("flags: ");
 658   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 659   #define ALL(X) \
 660     X(CREATE_SUSPENDED) \
 661     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 662   ALL(PRINT_FLAG)
 663   #undef ALL
 664   #undef PRINT_FLAG
 665   return buf;
 666 }
 667 
 668 // Allocate and initialize a new OSThread
 669 bool os::create_thread(Thread* thread, ThreadType thr_type,
 670                        size_t stack_size) {
 671   unsigned thread_id;
 672 
 673   // Allocate the OSThread object
 674   OSThread* osthread = new OSThread(NULL, NULL);
 675   if (osthread == NULL) {
 676     return false;
 677   }
 678 
 679   // Initialize the JDK library's interrupt event.
 680   // This should really be done when OSThread is constructed,
 681   // but there is no way for a constructor to report failure to
 682   // allocate the event.
 683   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 684   if (interrupt_event == NULL) {
 685     delete osthread;
 686     return false;
 687   }
 688   osthread->set_interrupt_event(interrupt_event);
 689   // We don't call set_interrupted(false) as it will trip the assert in there
 690   // as we are not operating on the current thread. We don't need to call it
 691   // because the initial state is already correct.
 692 
 693   thread->set_osthread(osthread);
 694 
 695   if (stack_size == 0) {
 696     switch (thr_type) {
 697     case os::java_thread:
 698       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 699       if (JavaThread::stack_size_at_create() > 0) {
 700         stack_size = JavaThread::stack_size_at_create();
 701       }
 702       break;
 703     case os::compiler_thread:
 704       if (CompilerThreadStackSize > 0) {
 705         stack_size = (size_t)(CompilerThreadStackSize * K);
 706         break;
 707       } // else fall through:
 708         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 709     case os::vm_thread:
 710     case os::gc_thread:
 711     case os::asynclog_thread:
 712     case os::watcher_thread:
 713       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 714       break;
 715     }
 716   }
 717 
 718   // Create the Win32 thread
 719   //
 720   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 721   // does not specify stack size. Instead, it specifies the size of
 722   // initially committed space. The stack size is determined by
 723   // PE header in the executable. If the committed "stack_size" is larger
 724   // than default value in the PE header, the stack is rounded up to the
 725   // nearest multiple of 1MB. For example if the launcher has default
 726   // stack size of 320k, specifying any size less than 320k does not
 727   // affect the actual stack size at all, it only affects the initial
 728   // commitment. On the other hand, specifying 'stack_size' larger than
 729   // default value may cause significant increase in memory usage, because
 730   // not only the stack space will be rounded up to MB, but also the
 731   // entire space is committed upfront.
 732   //
 733   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 734   // for CreateThread() that can treat 'stack_size' as stack size. However we
 735   // are not supposed to call CreateThread() directly according to MSDN
 736   // document because JVM uses C runtime library. The good news is that the
 737   // flag appears to work with _beginthredex() as well.
 738 
 739   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 740   HANDLE thread_handle;
 741   int limit = 3;
 742   do {
 743     thread_handle =
 744       (HANDLE)_beginthreadex(NULL,
 745                              (unsigned)stack_size,
 746                              (unsigned (__stdcall *)(void*)) thread_native_entry,
 747                              thread,
 748                              initflag,
 749                              &thread_id);
 750   } while (thread_handle == NULL && errno == EAGAIN && limit-- > 0);
 751 
 752   ResourceMark rm;
 753   char buf[64];
 754   if (thread_handle != NULL) {
 755     log_info(os, thread)("Thread \"%s\" started (tid: %u, attributes: %s)",
 756                          thread->name(), thread_id,
 757                          describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 758   } else {
 759     log_warning(os, thread)("Failed to start thread \"%s\" - _beginthreadex failed (%s) for attributes: %s.",
 760                             thread->name(), os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 761     // Log some OS information which might explain why creating the thread failed.
 762     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 763     LogStream st(Log(os, thread)::info());
 764     os::print_memory_info(&st);
 765   }
 766 
 767   if (thread_handle == NULL) {
 768     // Need to clean up stuff we've allocated so far
 769     thread->set_osthread(NULL);
 770     delete osthread;
 771     return false;
 772   }
 773 
 774   Atomic::inc(&os::win32::_os_thread_count);
 775 
 776   // Store info on the Win32 thread into the OSThread
 777   osthread->set_thread_handle(thread_handle);
 778   osthread->set_thread_id(thread_id);
 779 
 780   // Initial thread state is INITIALIZED, not SUSPENDED
 781   osthread->set_state(INITIALIZED);
 782 
 783   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 784   return true;
 785 }
 786 
 787 
 788 // Free Win32 resources related to the OSThread
 789 void os::free_thread(OSThread* osthread) {
 790   assert(osthread != NULL, "osthread not set");
 791 
 792   // We are told to free resources of the argument thread,
 793   // but we can only really operate on the current thread.
 794   assert(Thread::current()->osthread() == osthread,
 795          "os::free_thread but not current thread");
 796 
 797   CloseHandle(osthread->thread_handle());
 798   delete osthread;
 799 }
 800 
 801 static jlong first_filetime;
 802 static jlong initial_performance_count;
 803 static jlong performance_frequency;
 804 
 805 
 806 jlong as_long(LARGE_INTEGER x) {
 807   jlong result = 0; // initialization to avoid warning
 808   set_high(&result, x.HighPart);
 809   set_low(&result, x.LowPart);
 810   return result;
 811 }
 812 
 813 
 814 jlong os::elapsed_counter() {
 815   LARGE_INTEGER count;
 816   QueryPerformanceCounter(&count);
 817   return as_long(count) - initial_performance_count;
 818 }
 819 
 820 
 821 jlong os::elapsed_frequency() {
 822   return performance_frequency;
 823 }
 824 
 825 
 826 julong os::available_memory() {
 827   return win32::available_memory();
 828 }
 829 
 830 julong os::win32::available_memory() {
 831   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 832   // value if total memory is larger than 4GB
 833   MEMORYSTATUSEX ms;
 834   ms.dwLength = sizeof(ms);
 835   GlobalMemoryStatusEx(&ms);
 836 
 837   return (julong)ms.ullAvailPhys;
 838 }
 839 
 840 julong os::physical_memory() {
 841   return win32::physical_memory();
 842 }
 843 
 844 bool os::has_allocatable_memory_limit(size_t* limit) {
 845   MEMORYSTATUSEX ms;
 846   ms.dwLength = sizeof(ms);
 847   GlobalMemoryStatusEx(&ms);
 848 #ifdef _LP64
 849   *limit = (size_t)ms.ullAvailVirtual;
 850   return true;
 851 #else
 852   // Limit to 1400m because of the 2gb address space wall
 853   *limit = MIN2((size_t)1400*M, (size_t)ms.ullAvailVirtual);
 854   return true;
 855 #endif
 856 }
 857 
 858 int os::active_processor_count() {
 859   // User has overridden the number of active processors
 860   if (ActiveProcessorCount > 0) {
 861     log_trace(os)("active_processor_count: "
 862                   "active processor count set by user : %d",
 863                   ActiveProcessorCount);
 864     return ActiveProcessorCount;
 865   }
 866 
 867   DWORD_PTR lpProcessAffinityMask = 0;
 868   DWORD_PTR lpSystemAffinityMask = 0;
 869   int proc_count = processor_count();
 870   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 871       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 872     // Nof active processors is number of bits in process affinity mask
 873     int bitcount = 0;
 874     while (lpProcessAffinityMask != 0) {
 875       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 876       bitcount++;
 877     }
 878     return bitcount;
 879   } else {
 880     return proc_count;
 881   }
 882 }
 883 
 884 uint os::processor_id() {
 885   return (uint)GetCurrentProcessorNumber();
 886 }
 887 
 888 // For dynamic lookup of SetThreadDescription API
 889 typedef HRESULT (WINAPI *SetThreadDescriptionFnPtr)(HANDLE, PCWSTR);
 890 typedef HRESULT (WINAPI *GetThreadDescriptionFnPtr)(HANDLE, PWSTR*);
 891 static SetThreadDescriptionFnPtr _SetThreadDescription = NULL;
 892 DEBUG_ONLY(static GetThreadDescriptionFnPtr _GetThreadDescription = NULL;)
 893 
 894 // forward decl.
 895 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path);
 896 
 897 void os::set_native_thread_name(const char *name) {
 898 
 899   // From Windows 10 and Windows 2016 server, we have a direct API
 900   // for setting the thread name/description:
 901   // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
 902 
 903   if (_SetThreadDescription != NULL) {
 904     // SetThreadDescription takes a PCWSTR but we have conversion routines that produce
 905     // LPWSTR. The only difference is that PCWSTR is a pointer to const WCHAR.
 906     LPWSTR unicode_name;
 907     errno_t err = convert_to_unicode(name, &unicode_name);
 908     if (err == ERROR_SUCCESS) {
 909       HANDLE current = GetCurrentThread();
 910       HRESULT hr = _SetThreadDescription(current, unicode_name);
 911       if (FAILED(hr)) {
 912         log_debug(os, thread)("set_native_thread_name: SetThreadDescription failed - falling back to debugger method");
 913         FREE_C_HEAP_ARRAY(WCHAR, unicode_name);
 914       } else {
 915         log_trace(os, thread)("set_native_thread_name: SetThreadDescription succeeded - new name: %s", name);
 916 
 917 #ifdef ASSERT
 918         // For verification purposes in a debug build we read the thread name back and check it.
 919         PWSTR thread_name;
 920         HRESULT hr2 = _GetThreadDescription(current, &thread_name);
 921         if (FAILED(hr2)) {
 922           log_debug(os, thread)("set_native_thread_name: GetThreadDescription failed!");
 923         } else {
 924           int res = CompareStringW(LOCALE_USER_DEFAULT,
 925                                    0, // no special comparison rules
 926                                    unicode_name,
 927                                    -1, // null-terminated
 928                                    thread_name,
 929                                    -1  // null-terminated
 930                                    );
 931           assert(res == CSTR_EQUAL,
 932                  "Name strings were not the same - set: %ls, but read: %ls", unicode_name, thread_name);
 933           LocalFree(thread_name);
 934         }
 935 #endif
 936         FREE_C_HEAP_ARRAY(WCHAR, unicode_name);
 937         return;
 938       }
 939     } else {
 940       log_debug(os, thread)("set_native_thread_name: convert_to_unicode failed - falling back to debugger method");
 941     }
 942   }
 943 
 944   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 945   //
 946   // Note that unfortunately this only works if the process
 947   // is already attached to a debugger; debugger must observe
 948   // the exception below to show the correct name.
 949 
 950   // If there is no debugger attached skip raising the exception
 951   if (!IsDebuggerPresent()) {
 952     log_debug(os, thread)("set_native_thread_name: no debugger present so unable to set thread name");
 953     return;
 954   }
 955 
 956   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 957   struct {
 958     DWORD dwType;     // must be 0x1000
 959     LPCSTR szName;    // pointer to name (in user addr space)
 960     DWORD dwThreadID; // thread ID (-1=caller thread)
 961     DWORD dwFlags;    // reserved for future use, must be zero
 962   } info;
 963 
 964   info.dwType = 0x1000;
 965   info.szName = name;
 966   info.dwThreadID = -1;
 967   info.dwFlags = 0;
 968 
 969   __try {
 970     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 971   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 972 }
 973 
 974 void os::win32::initialize_performance_counter() {
 975   LARGE_INTEGER count;
 976   QueryPerformanceFrequency(&count);
 977   performance_frequency = as_long(count);
 978   QueryPerformanceCounter(&count);
 979   initial_performance_count = as_long(count);
 980 }
 981 
 982 
 983 double os::elapsedTime() {
 984   return (double) elapsed_counter() / (double) elapsed_frequency();
 985 }
 986 
 987 
 988 // Windows format:
 989 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 990 // Java format:
 991 //   Java standards require the number of milliseconds since 1/1/1970
 992 
 993 // Constant offset - calculated using offset()
 994 static jlong  _offset   = 116444736000000000;
 995 // Fake time counter for reproducible results when debugging
 996 static jlong  fake_time = 0;
 997 
 998 #ifdef ASSERT
 999 // Just to be safe, recalculate the offset in debug mode
1000 static jlong _calculated_offset = 0;
1001 static int   _has_calculated_offset = 0;
1002 
1003 jlong offset() {
1004   if (_has_calculated_offset) return _calculated_offset;
1005   SYSTEMTIME java_origin;
1006   java_origin.wYear          = 1970;
1007   java_origin.wMonth         = 1;
1008   java_origin.wDayOfWeek     = 0; // ignored
1009   java_origin.wDay           = 1;
1010   java_origin.wHour          = 0;
1011   java_origin.wMinute        = 0;
1012   java_origin.wSecond        = 0;
1013   java_origin.wMilliseconds  = 0;
1014   FILETIME jot;
1015   if (!SystemTimeToFileTime(&java_origin, &jot)) {
1016     fatal("Error = %d\nWindows error", GetLastError());
1017   }
1018   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
1019   _has_calculated_offset = 1;
1020   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
1021   return _calculated_offset;
1022 }
1023 #else
1024 jlong offset() {
1025   return _offset;
1026 }
1027 #endif
1028 
1029 jlong windows_to_java_time(FILETIME wt) {
1030   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
1031   return (a - offset()) / 10000;
1032 }
1033 
1034 // Returns time ticks in (10th of micro seconds)
1035 jlong windows_to_time_ticks(FILETIME wt) {
1036   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
1037   return (a - offset());
1038 }
1039 
1040 FILETIME java_to_windows_time(jlong l) {
1041   jlong a = (l * 10000) + offset();
1042   FILETIME result;
1043   result.dwHighDateTime = high(a);
1044   result.dwLowDateTime  = low(a);
1045   return result;
1046 }
1047 
1048 bool os::supports_vtime() { return true; }
1049 
1050 double os::elapsedVTime() {
1051   FILETIME created;
1052   FILETIME exited;
1053   FILETIME kernel;
1054   FILETIME user;
1055   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
1056     // the resolution of windows_to_java_time() should be sufficient (ms)
1057     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
1058   } else {
1059     return elapsedTime();
1060   }
1061 }
1062 
1063 jlong os::javaTimeMillis() {
1064   FILETIME wt;
1065   GetSystemTimeAsFileTime(&wt);
1066   return windows_to_java_time(wt);
1067 }
1068 
1069 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1070   FILETIME wt;
1071   GetSystemTimeAsFileTime(&wt);
1072   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
1073   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
1074   seconds = secs;
1075   nanos = jlong(ticks - (secs*10000000)) * 100;
1076 }
1077 
1078 jlong os::javaTimeNanos() {
1079     LARGE_INTEGER current_count;
1080     QueryPerformanceCounter(&current_count);
1081     double current = as_long(current_count);
1082     double freq = performance_frequency;
1083     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
1084     return time;
1085 }
1086 
1087 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1088   jlong freq = performance_frequency;
1089   if (freq < NANOSECS_PER_SEC) {
1090     // the performance counter is 64 bits and we will
1091     // be multiplying it -- so no wrap in 64 bits
1092     info_ptr->max_value = ALL_64_BITS;
1093   } else if (freq > NANOSECS_PER_SEC) {
1094     // use the max value the counter can reach to
1095     // determine the max value which could be returned
1096     julong max_counter = (julong)ALL_64_BITS;
1097     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
1098   } else {
1099     // the performance counter is 64 bits and we will
1100     // be using it directly -- so no wrap in 64 bits
1101     info_ptr->max_value = ALL_64_BITS;
1102   }
1103 
1104   // using a counter, so no skipping
1105   info_ptr->may_skip_backward = false;
1106   info_ptr->may_skip_forward = false;
1107 
1108   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
1109 }
1110 
1111 char* os::local_time_string(char *buf, size_t buflen) {
1112   SYSTEMTIME st;
1113   GetLocalTime(&st);
1114   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1115                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
1116   return buf;
1117 }
1118 
1119 bool os::getTimesSecs(double* process_real_time,
1120                       double* process_user_time,
1121                       double* process_system_time) {
1122   HANDLE h_process = GetCurrentProcess();
1123   FILETIME create_time, exit_time, kernel_time, user_time;
1124   BOOL result = GetProcessTimes(h_process,
1125                                 &create_time,
1126                                 &exit_time,
1127                                 &kernel_time,
1128                                 &user_time);
1129   if (result != 0) {
1130     FILETIME wt;
1131     GetSystemTimeAsFileTime(&wt);
1132     jlong rtc_millis = windows_to_java_time(wt);
1133     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1134     *process_user_time =
1135       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1136     *process_system_time =
1137       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1138     return true;
1139   } else {
1140     return false;
1141   }
1142 }
1143 
1144 void os::shutdown() {
1145   // allow PerfMemory to attempt cleanup of any persistent resources
1146   perfMemory_exit();
1147 
1148   // flush buffered output, finish log files
1149   ostream_abort();
1150 
1151   // Check for abort hook
1152   abort_hook_t abort_hook = Arguments::abort_hook();
1153   if (abort_hook != NULL) {
1154     abort_hook();
1155   }
1156 }
1157 
1158 
1159 static HANDLE dumpFile = NULL;
1160 
1161 // Check if dump file can be created.
1162 void os::check_dump_limit(char* buffer, size_t buffsz) {
1163   bool status = true;
1164   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1165     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1166     status = false;
1167   }
1168 
1169 #ifndef ASSERT
1170   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1171     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1172     status = false;
1173   }
1174 #endif
1175 
1176   if (status) {
1177     const char* cwd = get_current_directory(NULL, 0);
1178     int pid = current_process_id();
1179     if (cwd != NULL) {
1180       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1181     } else {
1182       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1183     }
1184 
1185     if (dumpFile == NULL &&
1186        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1187                  == INVALID_HANDLE_VALUE) {
1188       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1189       status = false;
1190     }
1191   }
1192   VMError::record_coredump_status(buffer, status);
1193 }
1194 
1195 void os::abort(bool dump_core, void* siginfo, const void* context) {
1196   EXCEPTION_POINTERS ep;
1197   MINIDUMP_EXCEPTION_INFORMATION mei;
1198   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1199 
1200   HANDLE hProcess = GetCurrentProcess();
1201   DWORD processId = GetCurrentProcessId();
1202   MINIDUMP_TYPE dumpType;
1203 
1204   shutdown();
1205   if (!dump_core || dumpFile == NULL) {
1206     if (dumpFile != NULL) {
1207       CloseHandle(dumpFile);
1208     }
1209     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1210   }
1211 
1212   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1213     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1214 
1215   if (siginfo != NULL && context != NULL) {
1216     ep.ContextRecord = (PCONTEXT) context;
1217     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1218 
1219     mei.ThreadId = GetCurrentThreadId();
1220     mei.ExceptionPointers = &ep;
1221     pmei = &mei;
1222   } else {
1223     pmei = NULL;
1224   }
1225 
1226   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1227   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1228   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1229       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1230     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1231   }
1232   CloseHandle(dumpFile);
1233   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1234 }
1235 
1236 // Die immediately, no exit hook, no abort hook, no cleanup.
1237 void os::die() {
1238   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1239 }
1240 
1241 const char* os::dll_file_extension() { return ".dll"; }
1242 
1243 void  os::dll_unload(void *lib) {
1244   ::FreeLibrary((HMODULE)lib);
1245 }
1246 
1247 void* os::dll_lookup(void *lib, const char *name) {
1248   return (void*)::GetProcAddress((HMODULE)lib, name);
1249 }
1250 
1251 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1252 //  * dirent_md.c       1.15 00/02/02
1253 //
1254 // The declarations for DIR and struct dirent are in jvm_win32.h.
1255 
1256 // Caller must have already run dirname through JVM_NativePath, which removes
1257 // duplicate slashes and converts all instances of '/' into '\\'.
1258 
1259 DIR * os::opendir(const char *dirname) {
1260   assert(dirname != NULL, "just checking");   // hotspot change
1261   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1262   DWORD fattr;                                // hotspot change
1263   char alt_dirname[4] = { 0, 0, 0, 0 };
1264 
1265   if (dirp == 0) {
1266     errno = ENOMEM;
1267     return 0;
1268   }
1269 
1270   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1271   // as a directory in FindFirstFile().  We detect this case here and
1272   // prepend the current drive name.
1273   //
1274   if (dirname[1] == '\0' && dirname[0] == '\\') {
1275     alt_dirname[0] = _getdrive() + 'A' - 1;
1276     alt_dirname[1] = ':';
1277     alt_dirname[2] = '\\';
1278     alt_dirname[3] = '\0';
1279     dirname = alt_dirname;
1280   }
1281 
1282   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1283   if (dirp->path == 0) {
1284     free(dirp);
1285     errno = ENOMEM;
1286     return 0;
1287   }
1288   strcpy(dirp->path, dirname);
1289 
1290   fattr = GetFileAttributes(dirp->path);
1291   if (fattr == 0xffffffff) {
1292     free(dirp->path);
1293     free(dirp);
1294     errno = ENOENT;
1295     return 0;
1296   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1297     free(dirp->path);
1298     free(dirp);
1299     errno = ENOTDIR;
1300     return 0;
1301   }
1302 
1303   // Append "*.*", or possibly "\\*.*", to path
1304   if (dirp->path[1] == ':' &&
1305       (dirp->path[2] == '\0' ||
1306       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1307     // No '\\' needed for cases like "Z:" or "Z:\"
1308     strcat(dirp->path, "*.*");
1309   } else {
1310     strcat(dirp->path, "\\*.*");
1311   }
1312 
1313   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1314   if (dirp->handle == INVALID_HANDLE_VALUE) {
1315     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1316       free(dirp->path);
1317       free(dirp);
1318       errno = EACCES;
1319       return 0;
1320     }
1321   }
1322   return dirp;
1323 }
1324 
1325 struct dirent * os::readdir(DIR *dirp) {
1326   assert(dirp != NULL, "just checking");      // hotspot change
1327   if (dirp->handle == INVALID_HANDLE_VALUE) {
1328     return NULL;
1329   }
1330 
1331   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1332 
1333   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1334     if (GetLastError() == ERROR_INVALID_HANDLE) {
1335       errno = EBADF;
1336       return NULL;
1337     }
1338     FindClose(dirp->handle);
1339     dirp->handle = INVALID_HANDLE_VALUE;
1340   }
1341 
1342   return &dirp->dirent;
1343 }
1344 
1345 int os::closedir(DIR *dirp) {
1346   assert(dirp != NULL, "just checking");      // hotspot change
1347   if (dirp->handle != INVALID_HANDLE_VALUE) {
1348     if (!FindClose(dirp->handle)) {
1349       errno = EBADF;
1350       return -1;
1351     }
1352     dirp->handle = INVALID_HANDLE_VALUE;
1353   }
1354   free(dirp->path);
1355   free(dirp);
1356   return 0;
1357 }
1358 
1359 // This must be hard coded because it's the system's temporary
1360 // directory not the java application's temp directory, ala java.io.tmpdir.
1361 const char* os::get_temp_directory() {
1362   static char path_buf[MAX_PATH];
1363   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1364     return path_buf;
1365   } else {
1366     path_buf[0] = '\0';
1367     return path_buf;
1368   }
1369 }
1370 
1371 // Needs to be in os specific directory because windows requires another
1372 // header file <direct.h>
1373 const char* os::get_current_directory(char *buf, size_t buflen) {
1374   int n = static_cast<int>(buflen);
1375   if (buflen > INT_MAX)  n = INT_MAX;
1376   return _getcwd(buf, n);
1377 }
1378 
1379 //-----------------------------------------------------------
1380 // Helper functions for fatal error handler
1381 #ifdef _WIN64
1382 // Helper routine which returns true if address in
1383 // within the NTDLL address space.
1384 //
1385 static bool _addr_in_ntdll(address addr) {
1386   HMODULE hmod;
1387   MODULEINFO minfo;
1388 
1389   hmod = GetModuleHandle("NTDLL.DLL");
1390   if (hmod == NULL) return false;
1391   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1392                                           &minfo, sizeof(MODULEINFO))) {
1393     return false;
1394   }
1395 
1396   if ((addr >= minfo.lpBaseOfDll) &&
1397       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1398     return true;
1399   } else {
1400     return false;
1401   }
1402 }
1403 #endif
1404 
1405 struct _modinfo {
1406   address addr;
1407   char*   full_path;   // point to a char buffer
1408   int     buflen;      // size of the buffer
1409   address base_addr;
1410 };
1411 
1412 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1413                                   address top_address, void * param) {
1414   struct _modinfo *pmod = (struct _modinfo *)param;
1415   if (!pmod) return -1;
1416 
1417   if (base_addr   <= pmod->addr &&
1418       top_address > pmod->addr) {
1419     // if a buffer is provided, copy path name to the buffer
1420     if (pmod->full_path) {
1421       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1422     }
1423     pmod->base_addr = base_addr;
1424     return 1;
1425   }
1426   return 0;
1427 }
1428 
1429 bool os::dll_address_to_library_name(address addr, char* buf,
1430                                      int buflen, int* offset) {
1431   // buf is not optional, but offset is optional
1432   assert(buf != NULL, "sanity check");
1433 
1434 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1435 //       return the full path to the DLL file, sometimes it returns path
1436 //       to the corresponding PDB file (debug info); sometimes it only
1437 //       returns partial path, which makes life painful.
1438 
1439   struct _modinfo mi;
1440   mi.addr      = addr;
1441   mi.full_path = buf;
1442   mi.buflen    = buflen;
1443   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1444     // buf already contains path name
1445     if (offset) *offset = addr - mi.base_addr;
1446     return true;
1447   }
1448 
1449   buf[0] = '\0';
1450   if (offset) *offset = -1;
1451   return false;
1452 }
1453 
1454 bool os::dll_address_to_function_name(address addr, char *buf,
1455                                       int buflen, int *offset,
1456                                       bool demangle) {
1457   // buf is not optional, but offset is optional
1458   assert(buf != NULL, "sanity check");
1459 
1460   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1461     return true;
1462   }
1463   if (offset != NULL)  *offset  = -1;
1464   buf[0] = '\0';
1465   return false;
1466 }
1467 
1468 // save the start and end address of jvm.dll into param[0] and param[1]
1469 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1470                            address top_address, void * param) {
1471   if (!param) return -1;
1472 
1473   if (base_addr   <= (address)_locate_jvm_dll &&
1474       top_address > (address)_locate_jvm_dll) {
1475     ((address*)param)[0] = base_addr;
1476     ((address*)param)[1] = top_address;
1477     return 1;
1478   }
1479   return 0;
1480 }
1481 
1482 address vm_lib_location[2];    // start and end address of jvm.dll
1483 
1484 // check if addr is inside jvm.dll
1485 bool os::address_is_in_vm(address addr) {
1486   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1487     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1488       assert(false, "Can't find jvm module.");
1489       return false;
1490     }
1491   }
1492 
1493   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1494 }
1495 
1496 // print module info; param is outputStream*
1497 static int _print_module(const char* fname, address base_address,
1498                          address top_address, void* param) {
1499   if (!param) return -1;
1500 
1501   outputStream* st = (outputStream*)param;
1502 
1503   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1504   return 0;
1505 }
1506 
1507 // Loads .dll/.so and
1508 // in case of error it checks if .dll/.so was built for the
1509 // same architecture as Hotspot is running on
1510 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1511   log_info(os)("attempting shared library load of %s", name);
1512 
1513   void * result = LoadLibrary(name);
1514   if (result != NULL) {
1515     Events::log(NULL, "Loaded shared library %s", name);
1516     // Recalculate pdb search path if a DLL was loaded successfully.
1517     SymbolEngine::recalc_search_path();
1518     log_info(os)("shared library load of %s was successful", name);
1519     return result;
1520   }
1521   DWORD errcode = GetLastError();
1522   // Read system error message into ebuf
1523   // It may or may not be overwritten below (in the for loop and just above)
1524   lasterror(ebuf, (size_t) ebuflen);
1525   ebuf[ebuflen - 1] = '\0';
1526   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1527   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1528 
1529   if (errcode == ERROR_MOD_NOT_FOUND) {
1530     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1531     ebuf[ebuflen - 1] = '\0';
1532     return NULL;
1533   }
1534 
1535   // Parsing dll below
1536   // If we can read dll-info and find that dll was built
1537   // for an architecture other than Hotspot is running in
1538   // - then print to buffer "DLL was built for a different architecture"
1539   // else call os::lasterror to obtain system error message
1540   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1541   if (fd < 0) {
1542     return NULL;
1543   }
1544 
1545   uint32_t signature_offset;
1546   uint16_t lib_arch = 0;
1547   bool failed_to_get_lib_arch =
1548     ( // Go to position 3c in the dll
1549      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1550      ||
1551      // Read location of signature
1552      (sizeof(signature_offset) !=
1553      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1554      ||
1555      // Go to COFF File Header in dll
1556      // that is located after "signature" (4 bytes long)
1557      (os::seek_to_file_offset(fd,
1558      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1559      ||
1560      // Read field that contains code of architecture
1561      // that dll was built for
1562      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1563     );
1564 
1565   ::close(fd);
1566   if (failed_to_get_lib_arch) {
1567     // file i/o error - report os::lasterror(...) msg
1568     return NULL;
1569   }
1570 
1571   typedef struct {
1572     uint16_t arch_code;
1573     char* arch_name;
1574   } arch_t;
1575 
1576   static const arch_t arch_array[] = {
1577     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1578     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1579     {IMAGE_FILE_MACHINE_ARM64,     (char*)"ARM 64"}
1580   };
1581 #if (defined _M_ARM64)
1582   static const uint16_t running_arch = IMAGE_FILE_MACHINE_ARM64;
1583 #elif (defined _M_AMD64)
1584   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1585 #elif (defined _M_IX86)
1586   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1587 #else
1588   #error Method os::dll_load requires that one of following \
1589          is defined :_M_AMD64 or _M_IX86 or _M_ARM64
1590 #endif
1591 
1592 
1593   // Obtain a string for printf operation
1594   // lib_arch_str shall contain string what platform this .dll was built for
1595   // running_arch_str shall string contain what platform Hotspot was built for
1596   char *running_arch_str = NULL, *lib_arch_str = NULL;
1597   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1598     if (lib_arch == arch_array[i].arch_code) {
1599       lib_arch_str = arch_array[i].arch_name;
1600     }
1601     if (running_arch == arch_array[i].arch_code) {
1602       running_arch_str = arch_array[i].arch_name;
1603     }
1604   }
1605 
1606   assert(running_arch_str,
1607          "Didn't find running architecture code in arch_array");
1608 
1609   // If the architecture is right
1610   // but some other error took place - report os::lasterror(...) msg
1611   if (lib_arch == running_arch) {
1612     return NULL;
1613   }
1614 
1615   if (lib_arch_str != NULL) {
1616     ::_snprintf(ebuf, ebuflen - 1,
1617                 "Can't load %s-bit .dll on a %s-bit platform",
1618                 lib_arch_str, running_arch_str);
1619   } else {
1620     // don't know what architecture this dll was build for
1621     ::_snprintf(ebuf, ebuflen - 1,
1622                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1623                 lib_arch, running_arch_str);
1624   }
1625 
1626   return NULL;
1627 }
1628 
1629 void os::print_dll_info(outputStream *st) {
1630   st->print_cr("Dynamic libraries:");
1631   get_loaded_modules_info(_print_module, (void *)st);
1632 }
1633 
1634 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1635   HANDLE   hProcess;
1636 
1637 # define MAX_NUM_MODULES 128
1638   HMODULE     modules[MAX_NUM_MODULES];
1639   static char filename[MAX_PATH];
1640   int         result = 0;
1641 
1642   int pid = os::current_process_id();
1643   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1644                          FALSE, pid);
1645   if (hProcess == NULL) return 0;
1646 
1647   DWORD size_needed;
1648   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1649     CloseHandle(hProcess);
1650     return 0;
1651   }
1652 
1653   // number of modules that are currently loaded
1654   int num_modules = size_needed / sizeof(HMODULE);
1655 
1656   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1657     // Get Full pathname:
1658     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1659       filename[0] = '\0';
1660     }
1661 
1662     MODULEINFO modinfo;
1663     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1664       modinfo.lpBaseOfDll = NULL;
1665       modinfo.SizeOfImage = 0;
1666     }
1667 
1668     // Invoke callback function
1669     result = callback(filename, (address)modinfo.lpBaseOfDll,
1670                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1671     if (result) break;
1672   }
1673 
1674   CloseHandle(hProcess);
1675   return result;
1676 }
1677 
1678 bool os::get_host_name(char* buf, size_t buflen) {
1679   DWORD size = (DWORD)buflen;
1680   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1681 }
1682 
1683 void os::get_summary_os_info(char* buf, size_t buflen) {
1684   stringStream sst(buf, buflen);
1685   os::win32::print_windows_version(&sst);
1686   // chop off newline character
1687   char* nl = strchr(buf, '\n');
1688   if (nl != NULL) *nl = '\0';
1689 }
1690 
1691 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1692 #if _MSC_VER >= 1900
1693   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1694   int result = ::vsnprintf(buf, len, fmt, args);
1695   // If an encoding error occurred (result < 0) then it's not clear
1696   // whether the buffer is NUL terminated, so ensure it is.
1697   if ((result < 0) && (len > 0)) {
1698     buf[len - 1] = '\0';
1699   }
1700   return result;
1701 #else
1702   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1703   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1704   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1705   // go straight to _vscprintf.  The output is going to be truncated in
1706   // that case, except in the unusual case of empty output.  More
1707   // importantly, the documentation for various versions of Visual Studio
1708   // are inconsistent about the behavior of _vsnprintf when len == 0,
1709   // including it possibly being an error.
1710   int result = -1;
1711   if (len > 0) {
1712     result = _vsnprintf(buf, len, fmt, args);
1713     // If output (including NUL terminator) is truncated, the buffer
1714     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1715     if ((result < 0) || ((size_t)result >= len)) {
1716       buf[len - 1] = '\0';
1717     }
1718   }
1719   if (result < 0) {
1720     result = _vscprintf(fmt, args);
1721   }
1722   return result;
1723 #endif // _MSC_VER dispatch
1724 }
1725 
1726 static inline time_t get_mtime(const char* filename) {
1727   struct stat st;
1728   int ret = os::stat(filename, &st);
1729   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1730   return st.st_mtime;
1731 }
1732 
1733 int os::compare_file_modified_times(const char* file1, const char* file2) {
1734   time_t t1 = get_mtime(file1);
1735   time_t t2 = get_mtime(file2);
1736   return t1 - t2;
1737 }
1738 
1739 void os::print_os_info_brief(outputStream* st) {
1740   os::print_os_info(st);
1741 }
1742 
1743 void os::win32::print_uptime_info(outputStream* st) {
1744   unsigned long long ticks = GetTickCount64();
1745   os::print_dhm(st, "OS uptime:", ticks/1000);
1746 }
1747 
1748 void os::print_os_info(outputStream* st) {
1749 #ifdef ASSERT
1750   char buffer[1024];
1751   st->print("HostName: ");
1752   if (get_host_name(buffer, sizeof(buffer))) {
1753     st->print_cr(buffer);
1754   } else {
1755     st->print_cr("N/A");
1756   }
1757 #endif
1758   st->print_cr("OS:");
1759   os::win32::print_windows_version(st);
1760 
1761   os::win32::print_uptime_info(st);
1762 
1763   VM_Version::print_platform_virtualization_info(st);
1764 }
1765 
1766 void os::win32::print_windows_version(outputStream* st) {
1767   OSVERSIONINFOEX osvi;
1768   VS_FIXEDFILEINFO *file_info;
1769   TCHAR kernel32_path[MAX_PATH];
1770   UINT len, ret;
1771 
1772   // Use the GetVersionEx information to see if we're on a server or
1773   // workstation edition of Windows. Starting with Windows 8.1 we can't
1774   // trust the OS version information returned by this API.
1775   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1776   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1777   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1778     st->print_cr("Call to GetVersionEx failed");
1779     return;
1780   }
1781   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1782 
1783   // Get the full path to \Windows\System32\kernel32.dll and use that for
1784   // determining what version of Windows we're running on.
1785   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1786   ret = GetSystemDirectory(kernel32_path, len);
1787   if (ret == 0 || ret > len) {
1788     st->print_cr("Call to GetSystemDirectory failed");
1789     return;
1790   }
1791   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1792 
1793   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1794   if (version_size == 0) {
1795     st->print_cr("Call to GetFileVersionInfoSize failed");
1796     return;
1797   }
1798 
1799   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1800   if (version_info == NULL) {
1801     st->print_cr("Failed to allocate version_info");
1802     return;
1803   }
1804 
1805   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1806     os::free(version_info);
1807     st->print_cr("Call to GetFileVersionInfo failed");
1808     return;
1809   }
1810 
1811   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1812     os::free(version_info);
1813     st->print_cr("Call to VerQueryValue failed");
1814     return;
1815   }
1816 
1817   int major_version = HIWORD(file_info->dwProductVersionMS);
1818   int minor_version = LOWORD(file_info->dwProductVersionMS);
1819   int build_number = HIWORD(file_info->dwProductVersionLS);
1820   int build_minor = LOWORD(file_info->dwProductVersionLS);
1821   int os_vers = major_version * 1000 + minor_version;
1822   os::free(version_info);
1823 
1824   st->print(" Windows ");
1825   switch (os_vers) {
1826 
1827   case 6000:
1828     if (is_workstation) {
1829       st->print("Vista");
1830     } else {
1831       st->print("Server 2008");
1832     }
1833     break;
1834 
1835   case 6001:
1836     if (is_workstation) {
1837       st->print("7");
1838     } else {
1839       st->print("Server 2008 R2");
1840     }
1841     break;
1842 
1843   case 6002:
1844     if (is_workstation) {
1845       st->print("8");
1846     } else {
1847       st->print("Server 2012");
1848     }
1849     break;
1850 
1851   case 6003:
1852     if (is_workstation) {
1853       st->print("8.1");
1854     } else {
1855       st->print("Server 2012 R2");
1856     }
1857     break;
1858 
1859   case 10000:
1860     if (is_workstation) {
1861       if (build_number >= 22000) {
1862         st->print("11");
1863       } else {
1864         st->print("10");
1865       }
1866     } else {
1867       // distinguish Windows Server by build number
1868       // - 2016 GA 10/2016 build: 14393
1869       // - 2019 GA 11/2018 build: 17763
1870       // - 2022 GA 08/2021 build: 20348
1871       if (build_number > 20347) {
1872         st->print("Server 2022");
1873       } else if (build_number > 17762) {
1874         st->print("Server 2019");
1875       } else {
1876         st->print("Server 2016");
1877       }
1878     }
1879     break;
1880 
1881   default:
1882     // Unrecognized windows, print out its major and minor versions
1883     st->print("%d.%d", major_version, minor_version);
1884     break;
1885   }
1886 
1887   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1888   // find out whether we are running on 64 bit processor or not
1889   SYSTEM_INFO si;
1890   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1891   GetNativeSystemInfo(&si);
1892   if ((si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) ||
1893       (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM64)) {
1894     st->print(" , 64 bit");
1895   }
1896 
1897   st->print(" Build %d", build_number);
1898   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1899   st->cr();
1900 }
1901 
1902 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1903   // Nothing to do for now.
1904 }
1905 
1906 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1907   HKEY key;
1908   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1909                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1910   if (status == ERROR_SUCCESS) {
1911     DWORD size = (DWORD)buflen;
1912     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1913     if (status != ERROR_SUCCESS) {
1914         strncpy(buf, "## __CPU__", buflen);
1915     }
1916     RegCloseKey(key);
1917   } else {
1918     // Put generic cpu info to return
1919     strncpy(buf, "## __CPU__", buflen);
1920   }
1921 }
1922 
1923 void os::print_memory_info(outputStream* st) {
1924   st->print("Memory:");
1925   st->print(" %dk page", os::vm_page_size()>>10);
1926 
1927   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1928   // value if total memory is larger than 4GB
1929   MEMORYSTATUSEX ms;
1930   ms.dwLength = sizeof(ms);
1931   int r1 = GlobalMemoryStatusEx(&ms);
1932 
1933   if (r1 != 0) {
1934     st->print(", system-wide physical " INT64_FORMAT "M ",
1935              (int64_t) ms.ullTotalPhys >> 20);
1936     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1937 
1938     st->print("TotalPageFile size " INT64_FORMAT "M ",
1939              (int64_t) ms.ullTotalPageFile >> 20);
1940     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1941              (int64_t) ms.ullAvailPageFile >> 20);
1942 
1943     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1944 #if defined(_M_IX86)
1945     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1946              (int64_t) ms.ullTotalVirtual >> 20);
1947     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1948 #endif
1949   } else {
1950     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1951   }
1952 
1953   // extended memory statistics for a process
1954   PROCESS_MEMORY_COUNTERS_EX pmex;
1955   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1956   pmex.cb = sizeof(pmex);
1957   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1958 
1959   if (r2 != 0) {
1960     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1961              (int64_t) pmex.WorkingSetSize >> 20);
1962     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1963 
1964     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1965              (int64_t) pmex.PrivateUsage >> 20);
1966     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1967   } else {
1968     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1969   }
1970 
1971   st->cr();
1972 }
1973 
1974 bool os::signal_sent_by_kill(const void* siginfo) {
1975   // TODO: Is this possible?
1976   return false;
1977 }
1978 
1979 void os::print_siginfo(outputStream *st, const void* siginfo) {
1980   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1981   st->print("siginfo:");
1982 
1983   char tmp[64];
1984   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1985     strcpy(tmp, "EXCEPTION_??");
1986   }
1987   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1988 
1989   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1990        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1991        er->NumberParameters >= 2) {
1992     switch (er->ExceptionInformation[0]) {
1993     case 0: st->print(", reading address"); break;
1994     case 1: st->print(", writing address"); break;
1995     case 8: st->print(", data execution prevention violation at address"); break;
1996     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1997                        er->ExceptionInformation[0]);
1998     }
1999     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
2000   } else {
2001     int num = er->NumberParameters;
2002     if (num > 0) {
2003       st->print(", ExceptionInformation=");
2004       for (int i = 0; i < num; i++) {
2005         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
2006       }
2007     }
2008   }
2009   st->cr();
2010 }
2011 
2012 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
2013   // TODO: Can we kill thread?
2014   return false;
2015 }
2016 
2017 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2018   // do nothing
2019 }
2020 
2021 static char saved_jvm_path[MAX_PATH] = {0};
2022 
2023 // Find the full path to the current module, jvm.dll
2024 void os::jvm_path(char *buf, jint buflen) {
2025   // Error checking.
2026   if (buflen < MAX_PATH) {
2027     assert(false, "must use a large-enough buffer");
2028     buf[0] = '\0';
2029     return;
2030   }
2031   // Lazy resolve the path to current module.
2032   if (saved_jvm_path[0] != 0) {
2033     strcpy(buf, saved_jvm_path);
2034     return;
2035   }
2036 
2037   buf[0] = '\0';
2038   if (Arguments::sun_java_launcher_is_altjvm()) {
2039     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
2040     // for a JAVA_HOME environment variable and fix up the path so it
2041     // looks like jvm.dll is installed there (append a fake suffix
2042     // hotspot/jvm.dll).
2043     char* java_home_var = ::getenv("JAVA_HOME");
2044     if (java_home_var != NULL && java_home_var[0] != 0 &&
2045         strlen(java_home_var) < (size_t)buflen) {
2046       strncpy(buf, java_home_var, buflen);
2047 
2048       // determine if this is a legacy image or modules image
2049       // modules image doesn't have "jre" subdirectory
2050       size_t len = strlen(buf);
2051       char* jrebin_p = buf + len;
2052       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
2053       if (0 != _access(buf, 0)) {
2054         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
2055       }
2056       len = strlen(buf);
2057       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
2058     }
2059   }
2060 
2061   if (buf[0] == '\0') {
2062     GetModuleFileName(vm_lib_handle, buf, buflen);
2063   }
2064   strncpy(saved_jvm_path, buf, MAX_PATH);
2065   saved_jvm_path[MAX_PATH - 1] = '\0';
2066 }
2067 
2068 
2069 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2070 #ifndef _WIN64
2071   st->print("_");
2072 #endif
2073 }
2074 
2075 
2076 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2077 #ifndef _WIN64
2078   st->print("@%d", args_size  * sizeof(int));
2079 #endif
2080 }
2081 
2082 // This method is a copy of JDK's sysGetLastErrorString
2083 // from src/windows/hpi/src/system_md.c
2084 
2085 size_t os::lasterror(char* buf, size_t len) {
2086   DWORD errval;
2087 
2088   if ((errval = GetLastError()) != 0) {
2089     // DOS error
2090     size_t n = (size_t)FormatMessage(
2091                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
2092                                      NULL,
2093                                      errval,
2094                                      0,
2095                                      buf,
2096                                      (DWORD)len,
2097                                      NULL);
2098     if (n > 3) {
2099       // Drop final '.', CR, LF
2100       if (buf[n - 1] == '\n') n--;
2101       if (buf[n - 1] == '\r') n--;
2102       if (buf[n - 1] == '.') n--;
2103       buf[n] = '\0';
2104     }
2105     return n;
2106   }
2107 
2108   if (errno != 0) {
2109     // C runtime error that has no corresponding DOS error code
2110     const char* s = os::strerror(errno);
2111     size_t n = strlen(s);
2112     if (n >= len) n = len - 1;
2113     strncpy(buf, s, n);
2114     buf[n] = '\0';
2115     return n;
2116   }
2117 
2118   return 0;
2119 }
2120 
2121 int os::get_last_error() {
2122   DWORD error = GetLastError();
2123   if (error == 0) {
2124     error = errno;
2125   }
2126   return (int)error;
2127 }
2128 
2129 // sun.misc.Signal
2130 // NOTE that this is a workaround for an apparent kernel bug where if
2131 // a signal handler for SIGBREAK is installed then that signal handler
2132 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
2133 // See bug 4416763.
2134 static void (*sigbreakHandler)(int) = NULL;
2135 
2136 static void UserHandler(int sig, void *siginfo, void *context) {
2137   os::signal_notify(sig);
2138   // We need to reinstate the signal handler each time...
2139   os::signal(sig, (void*)UserHandler);
2140 }
2141 
2142 void* os::user_handler() {
2143   return (void*) UserHandler;
2144 }
2145 
2146 void* os::signal(int signal_number, void* handler) {
2147   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2148     void (*oldHandler)(int) = sigbreakHandler;
2149     sigbreakHandler = (void (*)(int)) handler;
2150     return (void*) oldHandler;
2151   } else {
2152     return (void*)::signal(signal_number, (void (*)(int))handler);
2153   }
2154 }
2155 
2156 void os::signal_raise(int signal_number) {
2157   raise(signal_number);
2158 }
2159 
2160 // The Win32 C runtime library maps all console control events other than ^C
2161 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2162 // logoff, and shutdown events.  We therefore install our own console handler
2163 // that raises SIGTERM for the latter cases.
2164 //
2165 static BOOL WINAPI consoleHandler(DWORD event) {
2166   switch (event) {
2167   case CTRL_C_EVENT:
2168     if (VMError::is_error_reported()) {
2169       // Ctrl-C is pressed during error reporting, likely because the error
2170       // handler fails to abort. Let VM die immediately.
2171       os::die();
2172     }
2173 
2174     os::signal_raise(SIGINT);
2175     return TRUE;
2176     break;
2177   case CTRL_BREAK_EVENT:
2178     if (sigbreakHandler != NULL) {
2179       (*sigbreakHandler)(SIGBREAK);
2180     }
2181     return TRUE;
2182     break;
2183   case CTRL_LOGOFF_EVENT: {
2184     // Don't terminate JVM if it is running in a non-interactive session,
2185     // such as a service process.
2186     USEROBJECTFLAGS flags;
2187     HANDLE handle = GetProcessWindowStation();
2188     if (handle != NULL &&
2189         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2190         sizeof(USEROBJECTFLAGS), NULL)) {
2191       // If it is a non-interactive session, let next handler to deal
2192       // with it.
2193       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2194         return FALSE;
2195       }
2196     }
2197   }
2198   case CTRL_CLOSE_EVENT:
2199   case CTRL_SHUTDOWN_EVENT:
2200     os::signal_raise(SIGTERM);
2201     return TRUE;
2202     break;
2203   default:
2204     break;
2205   }
2206   return FALSE;
2207 }
2208 
2209 // The following code is moved from os.cpp for making this
2210 // code platform specific, which it is by its very nature.
2211 
2212 // Return maximum OS signal used + 1 for internal use only
2213 // Used as exit signal for signal_thread
2214 int os::sigexitnum_pd() {
2215   return NSIG;
2216 }
2217 
2218 // a counter for each possible signal value, including signal_thread exit signal
2219 static volatile jint pending_signals[NSIG+1] = { 0 };
2220 static Semaphore* sig_sem = NULL;
2221 
2222 static void jdk_misc_signal_init() {
2223   // Initialize signal structures
2224   memset((void*)pending_signals, 0, sizeof(pending_signals));
2225 
2226   // Initialize signal semaphore
2227   sig_sem = new Semaphore();
2228 
2229   // Programs embedding the VM do not want it to attempt to receive
2230   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2231   // shutdown hooks mechanism introduced in 1.3.  For example, when
2232   // the VM is run as part of a Windows NT service (i.e., a servlet
2233   // engine in a web server), the correct behavior is for any console
2234   // control handler to return FALSE, not TRUE, because the OS's
2235   // "final" handler for such events allows the process to continue if
2236   // it is a service (while terminating it if it is not a service).
2237   // To make this behavior uniform and the mechanism simpler, we
2238   // completely disable the VM's usage of these console events if -Xrs
2239   // (=ReduceSignalUsage) is specified.  This means, for example, that
2240   // the CTRL-BREAK thread dump mechanism is also disabled in this
2241   // case.  See bugs 4323062, 4345157, and related bugs.
2242 
2243   // Add a CTRL-C handler
2244   SetConsoleCtrlHandler(consoleHandler, TRUE);
2245 }
2246 
2247 void os::signal_notify(int sig) {
2248   if (sig_sem != NULL) {
2249     Atomic::inc(&pending_signals[sig]);
2250     sig_sem->signal();
2251   } else {
2252     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2253     // initialization isn't called.
2254     assert(ReduceSignalUsage, "signal semaphore should be created");
2255   }
2256 }
2257 
2258 static int check_pending_signals() {
2259   while (true) {
2260     for (int i = 0; i < NSIG + 1; i++) {
2261       jint n = pending_signals[i];
2262       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2263         return i;
2264       }
2265     }
2266     sig_sem->wait_with_safepoint_check(JavaThread::current());
2267   }
2268   ShouldNotReachHere();
2269   return 0; // Satisfy compiler
2270 }
2271 
2272 int os::signal_wait() {
2273   return check_pending_signals();
2274 }
2275 
2276 // Implicit OS exception handling
2277 
2278 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2279                       address handler) {
2280   Thread* thread = Thread::current_or_null();
2281 
2282 #if defined(_M_ARM64)
2283   #define PC_NAME Pc
2284 #elif defined(_M_AMD64)
2285   #define PC_NAME Rip
2286 #elif defined(_M_IX86)
2287   #define PC_NAME Eip
2288 #else
2289   #error unknown architecture
2290 #endif
2291 
2292   // Save pc in thread
2293   if (thread != nullptr && thread->is_Java_thread()) {
2294     JavaThread::cast(thread)->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->PC_NAME);
2295   }
2296 
2297   // Set pc to handler
2298   exceptionInfo->ContextRecord->PC_NAME = (DWORD64)handler;
2299 
2300   // Continue the execution
2301   return EXCEPTION_CONTINUE_EXECUTION;
2302 }
2303 
2304 
2305 // Used for PostMortemDump
2306 extern "C" void safepoints();
2307 extern "C" void find(int x);
2308 extern "C" void events();
2309 
2310 // According to Windows API documentation, an illegal instruction sequence should generate
2311 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2312 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2313 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2314 
2315 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2316 
2317 // From "Execution Protection in the Windows Operating System" draft 0.35
2318 // Once a system header becomes available, the "real" define should be
2319 // included or copied here.
2320 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2321 
2322 // Windows Vista/2008 heap corruption check
2323 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2324 
2325 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2326 // C++ compiler contain this error code. Because this is a compiler-generated
2327 // error, the code is not listed in the Win32 API header files.
2328 // The code is actually a cryptic mnemonic device, with the initial "E"
2329 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2330 // ASCII values of "msc".
2331 
2332 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2333 
2334 #define def_excpt(val) { #val, (val) }
2335 
2336 static const struct { const char* name; uint number; } exceptlabels[] = {
2337     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2338     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2339     def_excpt(EXCEPTION_BREAKPOINT),
2340     def_excpt(EXCEPTION_SINGLE_STEP),
2341     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2342     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2343     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2344     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2345     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2346     def_excpt(EXCEPTION_FLT_OVERFLOW),
2347     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2348     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2349     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2350     def_excpt(EXCEPTION_INT_OVERFLOW),
2351     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2352     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2353     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2354     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2355     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2356     def_excpt(EXCEPTION_STACK_OVERFLOW),
2357     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2358     def_excpt(EXCEPTION_GUARD_PAGE),
2359     def_excpt(EXCEPTION_INVALID_HANDLE),
2360     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2361     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2362 };
2363 
2364 #undef def_excpt
2365 
2366 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2367   uint code = static_cast<uint>(exception_code);
2368   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2369     if (exceptlabels[i].number == code) {
2370       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2371       return buf;
2372     }
2373   }
2374 
2375   return NULL;
2376 }
2377 
2378 //-----------------------------------------------------------------------------
2379 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2380   // handle exception caused by idiv; should only happen for -MinInt/-1
2381   // (division by zero is handled explicitly)
2382 #if defined(_M_ARM64)
2383   PCONTEXT ctx = exceptionInfo->ContextRecord;
2384   address pc = (address)ctx->Sp;
2385   assert(pc[0] == 0x83, "not an sdiv opcode"); //Fixme did i get the right opcode?
2386   assert(ctx->X4 == min_jint, "unexpected idiv exception");
2387   // set correct result values and continue after idiv instruction
2388   ctx->Pc = (uint64_t)pc + 4;        // idiv reg, reg, reg  is 4 bytes
2389   ctx->X4 = (uint64_t)min_jint;      // result
2390   ctx->X5 = (uint64_t)0;             // remainder
2391   // Continue the execution
2392 #elif defined(_M_AMD64)
2393   PCONTEXT ctx = exceptionInfo->ContextRecord;
2394   address pc = (address)ctx->Rip;
2395   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2396   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2397   if (pc[0] == 0xF7) {
2398     // set correct result values and continue after idiv instruction
2399     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2400   } else {
2401     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2402   }
2403   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2404   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2405   // idiv opcode (0xF7).
2406   ctx->Rdx = (DWORD)0;             // remainder
2407   // Continue the execution
2408 #else
2409   PCONTEXT ctx = exceptionInfo->ContextRecord;
2410   address pc = (address)ctx->Eip;
2411   assert(pc[0] == 0xF7, "not an idiv opcode");
2412   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2413   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2414   // set correct result values and continue after idiv instruction
2415   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2416   ctx->Eax = (DWORD)min_jint;      // result
2417   ctx->Edx = (DWORD)0;             // remainder
2418   // Continue the execution
2419 #endif
2420   return EXCEPTION_CONTINUE_EXECUTION;
2421 }
2422 
2423 #if defined(_M_AMD64) || defined(_M_IX86)
2424 //-----------------------------------------------------------------------------
2425 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2426   PCONTEXT ctx = exceptionInfo->ContextRecord;
2427 #ifndef  _WIN64
2428   // handle exception caused by native method modifying control word
2429   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2430 
2431   switch (exception_code) {
2432   case EXCEPTION_FLT_DENORMAL_OPERAND:
2433   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2434   case EXCEPTION_FLT_INEXACT_RESULT:
2435   case EXCEPTION_FLT_INVALID_OPERATION:
2436   case EXCEPTION_FLT_OVERFLOW:
2437   case EXCEPTION_FLT_STACK_CHECK:
2438   case EXCEPTION_FLT_UNDERFLOW:
2439     jint fp_control_word = (* (jint*) StubRoutines::x86::addr_fpu_cntrl_wrd_std());
2440     if (fp_control_word != ctx->FloatSave.ControlWord) {
2441       // Restore FPCW and mask out FLT exceptions
2442       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2443       // Mask out pending FLT exceptions
2444       ctx->FloatSave.StatusWord &=  0xffffff00;
2445       return EXCEPTION_CONTINUE_EXECUTION;
2446     }
2447   }
2448 
2449   if (prev_uef_handler != NULL) {
2450     // We didn't handle this exception so pass it to the previous
2451     // UnhandledExceptionFilter.
2452     return (prev_uef_handler)(exceptionInfo);
2453   }
2454 #else // !_WIN64
2455   // On Windows, the mxcsr control bits are non-volatile across calls
2456   // See also CR 6192333
2457   //
2458   jint MxCsr = INITIAL_MXCSR;
2459   // we can't use StubRoutines::x86::addr_mxcsr_std()
2460   // because in Win64 mxcsr is not saved there
2461   if (MxCsr != ctx->MxCsr) {
2462     ctx->MxCsr = MxCsr;
2463     return EXCEPTION_CONTINUE_EXECUTION;
2464   }
2465 #endif // !_WIN64
2466 
2467   return EXCEPTION_CONTINUE_SEARCH;
2468 }
2469 #endif
2470 
2471 static inline void report_error(Thread* t, DWORD exception_code,
2472                                 address addr, void* siginfo, void* context) {
2473   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2474 
2475   // If UseOSErrorReporting, this will return here and save the error file
2476   // somewhere where we can find it in the minidump.
2477 }
2478 
2479 //-----------------------------------------------------------------------------
2480 JNIEXPORT
2481 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2482   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2483   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2484   DWORD exception_code = exception_record->ExceptionCode;
2485 #if defined(_M_ARM64)
2486   address pc = (address) exceptionInfo->ContextRecord->Pc;
2487 #elif defined(_M_AMD64)
2488   address pc = (address) exceptionInfo->ContextRecord->Rip;
2489 #else
2490   address pc = (address) exceptionInfo->ContextRecord->Eip;
2491 #endif
2492   Thread* t = Thread::current_or_null_safe();
2493 
2494   // Handle SafeFetch32 and SafeFetchN exceptions.
2495   if (StubRoutines::is_safefetch_fault(pc)) {
2496     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2497   }
2498 
2499 #ifndef _WIN64
2500   // Execution protection violation - win32 running on AMD64 only
2501   // Handled first to avoid misdiagnosis as a "normal" access violation;
2502   // This is safe to do because we have a new/unique ExceptionInformation
2503   // code for this condition.
2504   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2505     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2506     address addr = (address) exception_record->ExceptionInformation[1];
2507 
2508     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2509       int page_size = os::vm_page_size();
2510 
2511       // Make sure the pc and the faulting address are sane.
2512       //
2513       // If an instruction spans a page boundary, and the page containing
2514       // the beginning of the instruction is executable but the following
2515       // page is not, the pc and the faulting address might be slightly
2516       // different - we still want to unguard the 2nd page in this case.
2517       //
2518       // 15 bytes seems to be a (very) safe value for max instruction size.
2519       bool pc_is_near_addr =
2520         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2521       bool instr_spans_page_boundary =
2522         (align_down((intptr_t) pc ^ (intptr_t) addr,
2523                          (intptr_t) page_size) > 0);
2524 
2525       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2526         static volatile address last_addr =
2527           (address) os::non_memory_address_word();
2528 
2529         // In conservative mode, don't unguard unless the address is in the VM
2530         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2531             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2532 
2533           // Set memory to RWX and retry
2534           address page_start = align_down(addr, page_size);
2535           bool res = os::protect_memory((char*) page_start, page_size,
2536                                         os::MEM_PROT_RWX);
2537 
2538           log_debug(os)("Execution protection violation "
2539                         "at " INTPTR_FORMAT
2540                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2541                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2542 
2543           // Set last_addr so if we fault again at the same address, we don't
2544           // end up in an endless loop.
2545           //
2546           // There are two potential complications here.  Two threads trapping
2547           // at the same address at the same time could cause one of the
2548           // threads to think it already unguarded, and abort the VM.  Likely
2549           // very rare.
2550           //
2551           // The other race involves two threads alternately trapping at
2552           // different addresses and failing to unguard the page, resulting in
2553           // an endless loop.  This condition is probably even more unlikely
2554           // than the first.
2555           //
2556           // Although both cases could be avoided by using locks or thread
2557           // local last_addr, these solutions are unnecessary complication:
2558           // this handler is a best-effort safety net, not a complete solution.
2559           // It is disabled by default and should only be used as a workaround
2560           // in case we missed any no-execute-unsafe VM code.
2561 
2562           last_addr = addr;
2563 
2564           return EXCEPTION_CONTINUE_EXECUTION;
2565         }
2566       }
2567 
2568       // Last unguard failed or not unguarding
2569       tty->print_raw_cr("Execution protection violation");
2570 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2571       report_error(t, exception_code, addr, exception_record,
2572                    exceptionInfo->ContextRecord);
2573 #endif
2574       return EXCEPTION_CONTINUE_SEARCH;
2575     }
2576   }
2577 #endif // _WIN64
2578 
2579 #if defined(_M_AMD64) || defined(_M_IX86)
2580   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2581       VM_Version::is_cpuinfo_segv_addr(pc)) {
2582     // Verify that OS save/restore AVX registers.
2583     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2584   }
2585 #endif
2586 
2587   if (t != NULL && t->is_Java_thread()) {
2588     JavaThread* thread = JavaThread::cast(t);
2589     bool in_java = thread->thread_state() == _thread_in_Java;
2590     bool in_native = thread->thread_state() == _thread_in_native;
2591     bool in_vm = thread->thread_state() == _thread_in_vm;
2592 
2593     // Handle potential stack overflows up front.
2594     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2595       StackOverflow* overflow_state = thread->stack_overflow_state();
2596       if (overflow_state->stack_guards_enabled()) {
2597         if (in_java) {
2598           frame fr;
2599           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2600             assert(fr.is_java_frame(), "Must be a Java frame");
2601             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2602           }
2603         }
2604         // Yellow zone violation.  The o/s has unprotected the first yellow
2605         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2606         // update the enabled status, even if the zone contains only one page.
2607         assert(!in_vm, "Undersized StackShadowPages");
2608         overflow_state->disable_stack_yellow_reserved_zone();
2609         // If not in java code, return and hope for the best.
2610         return in_java
2611             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2612             :  EXCEPTION_CONTINUE_EXECUTION;
2613       } else {
2614         // Fatal red zone violation.
2615         overflow_state->disable_stack_red_zone();
2616         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2617 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2618         report_error(t, exception_code, pc, exception_record,
2619                       exceptionInfo->ContextRecord);
2620 #endif
2621         return EXCEPTION_CONTINUE_SEARCH;
2622       }
2623     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2624       if (in_java) {
2625         // Either stack overflow or null pointer exception.
2626         address addr = (address) exception_record->ExceptionInformation[1];
2627         address stack_end = thread->stack_end();
2628         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2629           // Stack overflow.
2630           assert(!os::uses_stack_guard_pages(),
2631                  "should be caught by red zone code above.");
2632           return Handle_Exception(exceptionInfo,
2633                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2634         }
2635         // Check for safepoint polling and implicit null
2636         // We only expect null pointers in the stubs (vtable)
2637         // the rest are checked explicitly now.
2638         CodeBlob* cb = CodeCache::find_blob(pc);
2639         if (cb != NULL) {
2640           if (SafepointMechanism::is_poll_address(addr)) {
2641             address stub = SharedRuntime::get_poll_stub(pc);
2642             return Handle_Exception(exceptionInfo, stub);
2643           }
2644         }
2645 #ifdef _WIN64
2646         // If it's a legal stack address map the entire region in
2647         if (thread->is_in_usable_stack(addr)) {
2648           addr = (address)((uintptr_t)addr &
2649                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2650           os::commit_memory((char *)addr, thread->stack_base() - addr,
2651                             !ExecMem);
2652           return EXCEPTION_CONTINUE_EXECUTION;
2653         }
2654 #endif
2655         // Null pointer exception.
2656         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2657           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2658           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2659         }
2660         report_error(t, exception_code, pc, exception_record,
2661                       exceptionInfo->ContextRecord);
2662         return EXCEPTION_CONTINUE_SEARCH;
2663       }
2664 
2665 #ifdef _WIN64
2666       // Special care for fast JNI field accessors.
2667       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2668       // in and the heap gets shrunk before the field access.
2669       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2670       if (slowcase_pc != (address)-1) {
2671         return Handle_Exception(exceptionInfo, slowcase_pc);
2672       }
2673 #endif
2674 
2675       // Stack overflow or null pointer exception in native code.
2676 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2677       report_error(t, exception_code, pc, exception_record,
2678                    exceptionInfo->ContextRecord);
2679 #endif
2680       return EXCEPTION_CONTINUE_SEARCH;
2681     } // /EXCEPTION_ACCESS_VIOLATION
2682     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2683 
2684     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2685       CompiledMethod* nm = NULL;
2686       if (in_java) {
2687         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2688         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2689       }
2690 
2691       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2692       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2693           (nm != NULL && nm->has_unsafe_access())) {
2694         address next_pc =  Assembler::locate_next_instruction(pc);
2695         if (is_unsafe_arraycopy) {
2696           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2697         }
2698         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2699       }
2700     }
2701 
2702 #ifdef _M_ARM64
2703     if (in_java &&
2704         (exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2705           exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2706       if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
2707         if (TraceTraps) {
2708           tty->print_cr("trap: zombie_not_entrant");
2709         }
2710         return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
2711       }
2712     }
2713 #endif
2714 
2715     if (in_java) {
2716       switch (exception_code) {
2717       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2718         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2719 
2720       case EXCEPTION_INT_OVERFLOW:
2721         return Handle_IDiv_Exception(exceptionInfo);
2722 
2723       } // switch
2724     }
2725 
2726 #if defined(_M_AMD64) || defined(_M_IX86)
2727     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2728       LONG result=Handle_FLT_Exception(exceptionInfo);
2729       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2730     }
2731 #endif




















2732   }
2733 

2734 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2735   if (exception_code != EXCEPTION_BREAKPOINT) {
2736     report_error(t, exception_code, pc, exception_record,
2737                  exceptionInfo->ContextRecord);
2738   }
2739 #endif
2740   return EXCEPTION_CONTINUE_SEARCH;
2741 }
2742 
2743 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
2744 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2745   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2746 #if defined(_M_ARM64)
2747   address pc = (address) exceptionInfo->ContextRecord->Pc;
2748 #elif defined(_M_AMD64)
2749   address pc = (address) exceptionInfo->ContextRecord->Rip;
2750 #else
2751   address pc = (address) exceptionInfo->ContextRecord->Eip;
2752 #endif
2753 
2754   // Fast path for code part of the code cache
2755   if (CodeCache::low_bound() <= pc && pc < CodeCache::high_bound()) {
2756     return topLevelExceptionFilter(exceptionInfo);
2757   }
2758 
2759   // If the exception occurred in the codeCache, pass control
2760   // to our normal exception handler.
2761   CodeBlob* cb = CodeCache::find_blob(pc);
2762   if (cb != NULL) {
2763     return topLevelExceptionFilter(exceptionInfo);
2764   }
2765 
2766   return EXCEPTION_CONTINUE_SEARCH;
2767 }
2768 #endif
2769 
2770 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
2771 LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2772   if (InterceptOSException) goto exit;
2773   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2774 #if defined(_M_ARM64)
2775   address pc = (address)exceptionInfo->ContextRecord->Pc;
2776 #elif defined(_M_AMD64)
2777   address pc = (address) exceptionInfo->ContextRecord->Rip;
2778 #else
2779   address pc = (address) exceptionInfo->ContextRecord->Eip;
2780 #endif
2781   Thread* t = Thread::current_or_null_safe();
2782 
2783   if (exception_code != EXCEPTION_BREAKPOINT) {
2784     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2785                 exceptionInfo->ContextRecord);
2786   }
2787 exit:
2788   return previousUnhandledExceptionFilter ? previousUnhandledExceptionFilter(exceptionInfo) : EXCEPTION_CONTINUE_SEARCH;
2789 }
2790 #endif
2791 
2792 #ifndef _WIN64
2793 // Special care for fast JNI accessors.
2794 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2795 // the heap gets shrunk before the field access.
2796 // Need to install our own structured exception handler since native code may
2797 // install its own.
2798 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2799   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2800   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2801     address pc = (address) exceptionInfo->ContextRecord->Eip;
2802     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2803     if (addr != (address)-1) {
2804       return Handle_Exception(exceptionInfo, addr);
2805     }
2806   }
2807   return EXCEPTION_CONTINUE_SEARCH;
2808 }
2809 
2810 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2811   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2812                                                      jobject obj,           \
2813                                                      jfieldID fieldID) {    \
2814     __try {                                                                 \
2815       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2816                                                                  obj,       \
2817                                                                  fieldID);  \
2818     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2819                                               _exception_info())) {         \
2820     }                                                                       \
2821     return 0;                                                               \
2822   }
2823 
2824 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2825 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2826 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2827 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2828 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2829 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2830 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2831 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2832 
2833 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2834   switch (type) {
2835   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2836   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2837   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2838   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2839   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2840   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2841   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2842   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2843   default:        ShouldNotReachHere();
2844   }
2845   return (address)-1;
2846 }
2847 #endif
2848 
2849 // Virtual Memory
2850 
2851 int os::vm_page_size() { return os::win32::vm_page_size(); }
2852 int os::vm_allocation_granularity() {
2853   return os::win32::vm_allocation_granularity();
2854 }
2855 
2856 // Windows large page support is available on Windows 2003. In order to use
2857 // large page memory, the administrator must first assign additional privilege
2858 // to the user:
2859 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2860 //   + select Local Policies -> User Rights Assignment
2861 //   + double click "Lock pages in memory", add users and/or groups
2862 //   + reboot
2863 // Note the above steps are needed for administrator as well, as administrators
2864 // by default do not have the privilege to lock pages in memory.
2865 //
2866 // Note about Windows 2003: although the API supports committing large page
2867 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2868 // scenario, I found through experiment it only uses large page if the entire
2869 // memory region is reserved and committed in a single VirtualAlloc() call.
2870 // This makes Windows large page support more or less like Solaris ISM, in
2871 // that the entire heap must be committed upfront. This probably will change
2872 // in the future, if so the code below needs to be revisited.
2873 
2874 #ifndef MEM_LARGE_PAGES
2875   #define MEM_LARGE_PAGES 0x20000000
2876 #endif
2877 
2878 // Container for NUMA node list info
2879 class NUMANodeListHolder {
2880  private:
2881   int *_numa_used_node_list;  // allocated below
2882   int _numa_used_node_count;
2883 
2884   void free_node_list() {
2885     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2886   }
2887 
2888  public:
2889   NUMANodeListHolder() {
2890     _numa_used_node_count = 0;
2891     _numa_used_node_list = NULL;
2892     // do rest of initialization in build routine (after function pointers are set up)
2893   }
2894 
2895   ~NUMANodeListHolder() {
2896     free_node_list();
2897   }
2898 
2899   bool build() {
2900     DWORD_PTR proc_aff_mask;
2901     DWORD_PTR sys_aff_mask;
2902     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2903     ULONG highest_node_number;
2904     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2905     free_node_list();
2906     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2907     for (unsigned int i = 0; i <= highest_node_number; i++) {
2908       ULONGLONG proc_mask_numa_node;
2909       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2910       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2911         _numa_used_node_list[_numa_used_node_count++] = i;
2912       }
2913     }
2914     return (_numa_used_node_count > 1);
2915   }
2916 
2917   int get_count() { return _numa_used_node_count; }
2918   int get_node_list_entry(int n) {
2919     // for indexes out of range, returns -1
2920     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2921   }
2922 
2923 } numa_node_list_holder;
2924 
2925 static size_t _large_page_size = 0;
2926 
2927 static bool request_lock_memory_privilege() {
2928   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2929                                 os::current_process_id());
2930 
2931   bool success = false;
2932   HANDLE hToken = NULL;
2933   LUID luid;
2934   if (hProcess != NULL &&
2935       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2936       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2937 
2938     TOKEN_PRIVILEGES tp;
2939     tp.PrivilegeCount = 1;
2940     tp.Privileges[0].Luid = luid;
2941     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2942 
2943     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2944     // privilege. Check GetLastError() too. See MSDN document.
2945     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2946         (GetLastError() == ERROR_SUCCESS)) {
2947       success = true;
2948     }
2949   }
2950 
2951   // Cleanup
2952   if (hProcess != NULL) {
2953     CloseHandle(hProcess);
2954   }
2955   if (hToken != NULL) {
2956     CloseHandle(hToken);
2957   }
2958 
2959   return success;
2960 }
2961 
2962 static bool numa_interleaving_init() {
2963   bool success = false;
2964 
2965   // print a warning if UseNUMAInterleaving flag is specified on command line
2966   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2967 
2968 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2969 
2970   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2971   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2972   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2973 
2974   if (!numa_node_list_holder.build()) {
2975     WARN("Process does not cover multiple NUMA nodes.");
2976     WARN("...Ignoring UseNUMAInterleaving flag.");
2977     return false;
2978   }
2979 
2980   if (log_is_enabled(Debug, os, cpu)) {
2981     Log(os, cpu) log;
2982     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2983     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2984       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2985     }
2986   }
2987 
2988 #undef WARN
2989 
2990   return true;
2991 }
2992 
2993 // this routine is used whenever we need to reserve a contiguous VA range
2994 // but we need to make separate VirtualAlloc calls for each piece of the range
2995 // Reasons for doing this:
2996 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2997 //  * UseNUMAInterleaving requires a separate node for each piece
2998 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2999                                          DWORD prot,
3000                                          bool should_inject_error = false) {
3001   char * p_buf;
3002   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
3003   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
3004   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
3005 
3006   // first reserve enough address space in advance since we want to be
3007   // able to break a single contiguous virtual address range into multiple
3008   // large page commits but WS2003 does not allow reserving large page space
3009   // so we just use 4K pages for reserve, this gives us a legal contiguous
3010   // address space. then we will deallocate that reservation, and re alloc
3011   // using large pages
3012   const size_t size_of_reserve = bytes + chunk_size;
3013   if (bytes > size_of_reserve) {
3014     // Overflowed.
3015     return NULL;
3016   }
3017   p_buf = (char *) virtualAlloc(addr,
3018                                 size_of_reserve,  // size of Reserve
3019                                 MEM_RESERVE,
3020                                 PAGE_READWRITE);
3021   // If reservation failed, return NULL
3022   if (p_buf == NULL) return NULL;
3023   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
3024   os::release_memory(p_buf, bytes + chunk_size);
3025 
3026   // we still need to round up to a page boundary (in case we are using large pages)
3027   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
3028   // instead we handle this in the bytes_to_rq computation below
3029   p_buf = align_up(p_buf, page_size);
3030 
3031   // now go through and allocate one chunk at a time until all bytes are
3032   // allocated
3033   size_t  bytes_remaining = bytes;
3034   // An overflow of align_up() would have been caught above
3035   // in the calculation of size_of_reserve.
3036   char * next_alloc_addr = p_buf;
3037   HANDLE hProc = GetCurrentProcess();
3038 
3039 #ifdef ASSERT
3040   // Variable for the failure injection
3041   int ran_num = os::random();
3042   size_t fail_after = ran_num % bytes;
3043 #endif
3044 
3045   int count=0;
3046   while (bytes_remaining) {
3047     // select bytes_to_rq to get to the next chunk_size boundary
3048 
3049     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3050     // Note allocate and commit
3051     char * p_new;
3052 
3053 #ifdef ASSERT
3054     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3055 #else
3056     const bool inject_error_now = false;
3057 #endif
3058 
3059     if (inject_error_now) {
3060       p_new = NULL;
3061     } else {
3062       if (!UseNUMAInterleaving) {
3063         p_new = (char *) virtualAlloc(next_alloc_addr,
3064                                       bytes_to_rq,
3065                                       flags,
3066                                       prot);
3067       } else {
3068         // get the next node to use from the used_node_list
3069         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3070         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3071         p_new = (char *)virtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3072       }
3073     }
3074 
3075     if (p_new == NULL) {
3076       // Free any allocated pages
3077       if (next_alloc_addr > p_buf) {
3078         // Some memory was committed so release it.
3079         size_t bytes_to_release = bytes - bytes_remaining;
3080         // NMT has yet to record any individual blocks, so it
3081         // need to create a dummy 'reserve' record to match
3082         // the release.
3083         MemTracker::record_virtual_memory_reserve((address)p_buf,
3084                                                   bytes_to_release, CALLER_PC);
3085         os::release_memory(p_buf, bytes_to_release);
3086       }
3087 #ifdef ASSERT
3088       if (should_inject_error) {
3089         log_develop_debug(pagesize)("Reserving pages individually failed.");
3090       }
3091 #endif
3092       return NULL;
3093     }
3094 
3095     bytes_remaining -= bytes_to_rq;
3096     next_alloc_addr += bytes_to_rq;
3097     count++;
3098   }
3099   // Although the memory is allocated individually, it is returned as one.
3100   // NMT records it as one block.
3101   if ((flags & MEM_COMMIT) != 0) {
3102     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3103   } else {
3104     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3105   }
3106 
3107   // made it this far, success
3108   return p_buf;
3109 }
3110 
3111 static size_t large_page_init_decide_size() {
3112   // print a warning if any large page related flag is specified on command line
3113   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3114                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3115 
3116 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3117 
3118   if (!request_lock_memory_privilege()) {
3119     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3120     return 0;
3121   }
3122 
3123   size_t size = GetLargePageMinimum();
3124   if (size == 0) {
3125     WARN("Large page is not supported by the processor.");
3126     return 0;
3127   }
3128 
3129 #if defined(IA32) || defined(AMD64)
3130   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3131     WARN("JVM cannot use large pages bigger than 4mb.");
3132     return 0;
3133   }
3134 #endif
3135 
3136   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3137     size = LargePageSizeInBytes;
3138   }
3139 
3140 #undef WARN
3141 
3142   return size;
3143 }
3144 
3145 void os::large_page_init() {
3146   if (!UseLargePages) {
3147     return;
3148   }
3149 
3150   _large_page_size = large_page_init_decide_size();
3151   const size_t default_page_size = (size_t) vm_page_size();
3152   if (_large_page_size > default_page_size) {
3153     _page_sizes.add(_large_page_size);
3154   }
3155 
3156   UseLargePages = _large_page_size != 0;
3157 }
3158 
3159 int os::create_file_for_heap(const char* dir) {
3160 
3161   const char name_template[] = "/jvmheap.XXXXXX";
3162 
3163   size_t fullname_len = strlen(dir) + strlen(name_template);
3164   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3165   if (fullname == NULL) {
3166     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3167     return -1;
3168   }
3169   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3170   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3171 
3172   os::native_path(fullname);
3173 
3174   char *path = _mktemp(fullname);
3175   if (path == NULL) {
3176     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3177     os::free(fullname);
3178     return -1;
3179   }
3180 
3181   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3182 
3183   os::free(fullname);
3184   if (fd < 0) {
3185     warning("Problem opening file for heap (%s)", os::strerror(errno));
3186     return -1;
3187   }
3188   return fd;
3189 }
3190 
3191 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3192 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3193   assert(fd != -1, "File descriptor is not valid");
3194 
3195   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3196 #ifdef _LP64
3197   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3198     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3199 #else
3200   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3201     0, (DWORD)size, NULL);
3202 #endif
3203   if (fileMapping == NULL) {
3204     if (GetLastError() == ERROR_DISK_FULL) {
3205       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3206     }
3207     else {
3208       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3209     }
3210 
3211     return NULL;
3212   }
3213 
3214   LPVOID addr = mapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3215 
3216   CloseHandle(fileMapping);
3217 
3218   return (char*)addr;
3219 }
3220 
3221 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3222   assert(fd != -1, "File descriptor is not valid");
3223   assert(base != NULL, "Base address cannot be NULL");
3224 
3225   release_memory(base, size);
3226   return map_memory_to_file(base, size, fd);
3227 }
3228 
3229 // Multiple threads can race in this code but it's not possible to unmap small sections of
3230 // virtual space to get requested alignment, like posix-like os's.
3231 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3232 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3233   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3234          "Alignment must be a multiple of allocation granularity (page size)");
3235   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3236 
3237   size_t extra_size = size + alignment;
3238   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3239 
3240   char* aligned_base = NULL;
3241   static const int max_attempts = 20;
3242 
3243   for (int attempt = 0; attempt < max_attempts && aligned_base == NULL; attempt ++) {
3244     char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc) :
3245                                          os::reserve_memory(extra_size);
3246     if (extra_base == NULL) {
3247       return NULL;
3248     }
3249     // Do manual alignment
3250     aligned_base = align_up(extra_base, alignment);
3251 
3252     bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
3253                                   os::release_memory(extra_base, extra_size);
3254     assert(rc, "release failed");
3255     if (!rc) {
3256       return NULL;
3257     }
3258 
3259     // Attempt to map, into the just vacated space, the slightly smaller aligned area.
3260     // Which may fail, hence the loop.
3261     aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc) :
3262                                      os::attempt_reserve_memory_at(aligned_base, size);
3263   }
3264 
3265   assert(aligned_base != NULL, "Did not manage to re-map after %d attempts?", max_attempts);
3266 
3267   return aligned_base;
3268 }
3269 
3270 char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
3271   // exec can be ignored
3272   return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
3273 }
3274 
3275 char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd) {
3276   return map_or_reserve_memory_aligned(size, alignment, fd);
3277 }
3278 
3279 char* os::pd_reserve_memory(size_t bytes, bool exec) {
3280   return pd_attempt_reserve_memory_at(NULL /* addr */, bytes, exec);
3281 }
3282 
3283 // Reserve memory at an arbitrary address, only if that area is
3284 // available (and not reserved for something else).
3285 char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec) {
3286   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3287          "reserve alignment");
3288   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3289   char* res;
3290   // note that if UseLargePages is on, all the areas that require interleaving
3291   // will go thru reserve_memory_special rather than thru here.
3292   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3293   if (!use_individual) {
3294     res = (char*)virtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3295   } else {
3296     elapsedTimer reserveTimer;
3297     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3298     // in numa interleaving, we have to allocate pages individually
3299     // (well really chunks of NUMAInterleaveGranularity size)
3300     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3301     if (res == NULL) {
3302       warning("NUMA page allocation failed");
3303     }
3304     if (Verbose && PrintMiscellaneous) {
3305       reserveTimer.stop();
3306       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3307                     reserveTimer.milliseconds(), reserveTimer.ticks());
3308     }
3309   }
3310   assert(res == NULL || addr == NULL || addr == res,
3311          "Unexpected address from reserve.");
3312 
3313   return res;
3314 }
3315 
3316 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
3317   assert(file_desc >= 0, "file_desc is not valid");
3318   return map_memory_to_file(requested_addr, bytes, file_desc);
3319 }
3320 
3321 size_t os::large_page_size() {
3322   return _large_page_size;
3323 }
3324 
3325 bool os::can_commit_large_page_memory() {
3326   // Windows only uses large page memory when the entire region is reserved
3327   // and committed in a single VirtualAlloc() call. This may change in the
3328   // future, but with Windows 2003 it's not possible to commit on demand.
3329   return false;
3330 }
3331 
3332 bool os::can_execute_large_page_memory() {
3333   return true;
3334 }
3335 
3336 static char* reserve_large_pages_individually(size_t size, char* req_addr, bool exec) {
3337   log_debug(pagesize)("Reserving large pages individually.");
3338 
3339   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3340   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3341 
3342   char * p_buf = allocate_pages_individually(size, req_addr, flags, prot, LargePagesIndividualAllocationInjectError);
3343   if (p_buf == NULL) {
3344     // give an appropriate warning message
3345     if (UseNUMAInterleaving) {
3346       warning("NUMA large page allocation failed, UseLargePages flag ignored");
3347     }
3348     if (UseLargePagesIndividualAllocation) {
3349       warning("Individually allocated large pages failed, "
3350               "use -XX:-UseLargePagesIndividualAllocation to turn off");
3351     }
3352     return NULL;
3353   }
3354   return p_buf;
3355 }
3356 
3357 static char* reserve_large_pages_single_range(size_t size, char* req_addr, bool exec) {
3358   log_debug(pagesize)("Reserving large pages in a single large chunk.");
3359 
3360   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3361   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3362 
3363   return (char *) virtualAlloc(req_addr, size, flags, prot);
3364 }
3365 
3366 static char* reserve_large_pages(size_t size, char* req_addr, bool exec) {
3367   // with large pages, there are two cases where we need to use Individual Allocation
3368   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3369   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3370   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3371     return reserve_large_pages_individually(size, req_addr, exec);
3372   }
3373   return reserve_large_pages_single_range(size, req_addr, exec);
3374 }
3375 
3376 static char* find_aligned_address(size_t size, size_t alignment) {
3377   // Temporary reserve memory large enough to ensure we can get the requested
3378   // alignment and still fit the reservation.
3379   char* addr = (char*) virtualAlloc(NULL, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
3380   // Align the address to the requested alignment.
3381   char* aligned_addr = align_up(addr, alignment);
3382   // Free the temporary reservation.
3383   virtualFree(addr, 0, MEM_RELEASE);
3384 
3385   return aligned_addr;
3386 }
3387 
3388 static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exec) {
3389   log_debug(pagesize)("Reserving large pages at an aligned address, alignment=" SIZE_FORMAT "%s",
3390                       byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
3391 
3392   // Will try to find a suitable address at most 20 times. The reason we need to try
3393   // multiple times is that between finding the aligned address and trying to commit
3394   // the large pages another thread might have reserved an overlapping region.
3395   const int attempts_limit = 20;
3396   for (int attempts = 0; attempts < attempts_limit; attempts++)  {
3397     // Find aligned address.
3398     char* aligned_address = find_aligned_address(size, alignment);
3399 
3400     // Try to do the large page reservation using the aligned address.
3401     aligned_address = reserve_large_pages(size, aligned_address, exec);
3402     if (aligned_address != NULL) {
3403       // Reservation at the aligned address succeeded.
3404       guarantee(is_aligned(aligned_address, alignment), "Must be aligned");
3405       return aligned_address;
3406     }
3407   }
3408 
3409   log_debug(pagesize)("Failed reserving large pages at aligned address");
3410   return NULL;
3411 }
3412 
3413 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* addr,
3414                                     bool exec) {
3415   assert(UseLargePages, "only for large pages");
3416   assert(page_size == os::large_page_size(), "Currently only support one large page size on Windows");
3417   assert(is_aligned(addr, alignment), "Must be");
3418   assert(is_aligned(addr, page_size), "Must be");
3419 
3420   if (!is_aligned(bytes, page_size)) {
3421     // Fallback to small pages, Windows does not support mixed mappings.
3422     return NULL;
3423   }
3424 
3425   // The requested alignment can be larger than the page size, for example with G1
3426   // the alignment is bound to the heap region size. So this reservation needs to
3427   // ensure that the requested alignment is met. When there is a requested address
3428   // this solves it self, since it must be properly aligned already.
3429   if (addr == NULL && alignment > page_size) {
3430     return reserve_large_pages_aligned(bytes, alignment, exec);
3431   }
3432 
3433   // No additional requirements, just reserve the large pages.
3434   return reserve_large_pages(bytes, addr, exec);
3435 }
3436 
3437 bool os::pd_release_memory_special(char* base, size_t bytes) {
3438   assert(base != NULL, "Sanity check");
3439   return pd_release_memory(base, bytes);
3440 }
3441 
3442 void os::print_statistics() {
3443 }
3444 
3445 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3446   int err = os::get_last_error();
3447   char buf[256];
3448   size_t buf_len = os::lasterror(buf, sizeof(buf));
3449   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3450           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3451           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3452 }
3453 
3454 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3455   if (bytes == 0) {
3456     // Don't bother the OS with noops.
3457     return true;
3458   }
3459   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3460   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3461   // Don't attempt to print anything if the OS call fails. We're
3462   // probably low on resources, so the print itself may cause crashes.
3463 
3464   // unless we have NUMAInterleaving enabled, the range of a commit
3465   // is always within a reserve covered by a single VirtualAlloc
3466   // in that case we can just do a single commit for the requested size
3467   if (!UseNUMAInterleaving) {
3468     if (virtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3469       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3470       return false;
3471     }
3472     if (exec) {
3473       DWORD oldprot;
3474       // Windows doc says to use VirtualProtect to get execute permissions
3475       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3476         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3477         return false;
3478       }
3479     }
3480     return true;
3481   } else {
3482 
3483     // when NUMAInterleaving is enabled, the commit might cover a range that
3484     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3485     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3486     // returns represents the number of bytes that can be committed in one step.
3487     size_t bytes_remaining = bytes;
3488     char * next_alloc_addr = addr;
3489     while (bytes_remaining > 0) {
3490       MEMORY_BASIC_INFORMATION alloc_info;
3491       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3492       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3493       if (virtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3494                        PAGE_READWRITE) == NULL) {
3495         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3496                                             exec);)
3497         return false;
3498       }
3499       if (exec) {
3500         DWORD oldprot;
3501         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3502                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3503           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3504                                               exec);)
3505           return false;
3506         }
3507       }
3508       bytes_remaining -= bytes_to_rq;
3509       next_alloc_addr += bytes_to_rq;
3510     }
3511   }
3512   // if we made it this far, return true
3513   return true;
3514 }
3515 
3516 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3517                           bool exec) {
3518   // alignment_hint is ignored on this OS
3519   return pd_commit_memory(addr, size, exec);
3520 }
3521 
3522 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3523                                   const char* mesg) {
3524   assert(mesg != NULL, "mesg must be specified");
3525   if (!pd_commit_memory(addr, size, exec)) {
3526     warn_fail_commit_memory(addr, size, exec);
3527     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3528   }
3529 }
3530 
3531 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3532                                   size_t alignment_hint, bool exec,
3533                                   const char* mesg) {
3534   // alignment_hint is ignored on this OS
3535   pd_commit_memory_or_exit(addr, size, exec, mesg);
3536 }
3537 
3538 bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) {
3539   if (bytes == 0) {
3540     // Don't bother the OS with noops.
3541     return true;
3542   }
3543   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3544   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3545   return (virtualFree(addr, bytes, MEM_DECOMMIT) == TRUE);
3546 }
3547 
3548 bool os::pd_release_memory(char* addr, size_t bytes) {
3549   // Given a range we are to release, we require a mapping to start at the beginning of that range;
3550   //  if NUMA or LP we allow the range to contain multiple mappings, which have to cover the range
3551   //  completely; otherwise the range must match an OS mapping exactly.
3552   address start = (address)addr;
3553   address end = start + bytes;
3554   os::win32::mapping_info_t mi;
3555   const bool multiple_mappings_allowed = UseLargePagesIndividualAllocation || UseNUMAInterleaving;
3556   address p = start;
3557   bool first_mapping = true;
3558 
3559   do {
3560     // Find mapping and check it
3561     const char* err = NULL;
3562     if (!os::win32::find_mapping(p, &mi)) {
3563       err = "no mapping found";
3564     } else {
3565       if (first_mapping) {
3566         if (mi.base != start) {
3567           err = "base address mismatch";
3568         }
3569         if (multiple_mappings_allowed ? (mi.size > bytes) : (mi.size != bytes)) {
3570           err = "size mismatch";
3571         }
3572       } else {
3573         assert(p == mi.base && mi.size > 0, "Sanity");
3574         if (mi.base + mi.size > end) {
3575           err = "mapping overlaps end";
3576         }
3577         if (mi.size == 0) {
3578           err = "zero length mapping?"; // Should never happen; just to prevent endlessly looping in release.
3579         }
3580       }
3581     }
3582     // Handle mapping error. We assert in debug, unconditionally print a warning in release.
3583     if (err != NULL) {
3584       log_warning(os)("bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3585 #ifdef ASSERT
3586       os::print_memory_mappings((char*)start, bytes, tty);
3587       assert(false, "bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3588 #endif
3589       return false;
3590     }
3591     // Free this range
3592     if (virtualFree(p, 0, MEM_RELEASE) == FALSE) {
3593       return false;
3594     }
3595     first_mapping = false;
3596     p = mi.base + mi.size;
3597   } while (p < end);
3598 
3599   return true;
3600 }
3601 
3602 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3603   return os::commit_memory(addr, size, !ExecMem);
3604 }
3605 
3606 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3607   return os::uncommit_memory(addr, size);
3608 }
3609 
3610 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3611   uint count = 0;
3612   bool ret = false;
3613   size_t bytes_remaining = bytes;
3614   char * next_protect_addr = addr;
3615 
3616   // Use VirtualQuery() to get the chunk size.
3617   while (bytes_remaining) {
3618     MEMORY_BASIC_INFORMATION alloc_info;
3619     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3620       return false;
3621     }
3622 
3623     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3624     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3625     // but we don't distinguish here as both cases are protected by same API.
3626     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3627     warning("Failed protecting pages individually for chunk #%u", count);
3628     if (!ret) {
3629       return false;
3630     }
3631 
3632     bytes_remaining -= bytes_to_protect;
3633     next_protect_addr += bytes_to_protect;
3634     count++;
3635   }
3636   return ret;
3637 }
3638 
3639 // Set protections specified
3640 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3641                         bool is_committed) {
3642   unsigned int p = 0;
3643   switch (prot) {
3644   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3645   case MEM_PROT_READ: p = PAGE_READONLY; break;
3646   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3647   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3648   default:
3649     ShouldNotReachHere();
3650   }
3651 
3652   DWORD old_status;
3653 
3654   // Strange enough, but on Win32 one can change protection only for committed
3655   // memory, not a big deal anyway, as bytes less or equal than 64K
3656   if (!is_committed) {
3657     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3658                           "cannot commit protection page");
3659   }
3660   // One cannot use os::guard_memory() here, as on Win32 guard page
3661   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3662   //
3663   // Pages in the region become guard pages. Any attempt to access a guard page
3664   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3665   // the guard page status. Guard pages thus act as a one-time access alarm.
3666   bool ret;
3667   if (UseNUMAInterleaving) {
3668     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3669     // so we must protect the chunks individually.
3670     ret = protect_pages_individually(addr, bytes, p, &old_status);
3671   } else {
3672     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3673   }
3674 #ifdef ASSERT
3675   if (!ret) {
3676     int err = os::get_last_error();
3677     char buf[256];
3678     size_t buf_len = os::lasterror(buf, sizeof(buf));
3679     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3680           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3681           buf_len != 0 ? buf : "<no_error_string>", err);
3682   }
3683 #endif
3684   return ret;
3685 }
3686 
3687 bool os::guard_memory(char* addr, size_t bytes) {
3688   DWORD old_status;
3689   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3690 }
3691 
3692 bool os::unguard_memory(char* addr, size_t bytes) {
3693   DWORD old_status;
3694   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3695 }
3696 
3697 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3698 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3699 void os::numa_make_global(char *addr, size_t bytes)    { }
3700 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3701 bool os::numa_topology_changed()                       { return false; }
3702 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3703 int os::numa_get_group_id()                            { return 0; }
3704 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3705   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3706     // Provide an answer for UMA systems
3707     ids[0] = 0;
3708     return 1;
3709   } else {
3710     // check for size bigger than actual groups_num
3711     size = MIN2(size, numa_get_groups_num());
3712     for (int i = 0; i < (int)size; i++) {
3713       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3714     }
3715     return size;
3716   }
3717 }
3718 
3719 int os::numa_get_group_id_for_address(const void* address) {
3720   return 0;
3721 }
3722 
3723 bool os::get_page_info(char *start, page_info* info) {
3724   return false;
3725 }
3726 
3727 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3728                      page_info* page_found) {
3729   return end;
3730 }
3731 
3732 char* os::non_memory_address_word() {
3733   // Must never look like an address returned by reserve_memory,
3734   // even in its subfields (as defined by the CPU immediate fields,
3735   // if the CPU splits constants across multiple instructions).
3736 #ifdef _M_ARM64
3737   // AArch64 has a maximum addressable space of 48-bits
3738   return (char*)((1ull << 48) - 1);
3739 #else
3740   return (char*)-1;
3741 #endif
3742 }
3743 
3744 #define MAX_ERROR_COUNT 100
3745 #define SYS_THREAD_ERROR 0xffffffffUL
3746 
3747 void os::pd_start_thread(Thread* thread) {
3748   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3749   // Returns previous suspend state:
3750   // 0:  Thread was not suspended
3751   // 1:  Thread is running now
3752   // >1: Thread is still suspended.
3753   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3754 }
3755 
3756 
3757 // Short sleep, direct OS call.
3758 //
3759 // ms = 0, means allow others (if any) to run.
3760 //
3761 void os::naked_short_sleep(jlong ms) {
3762   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3763   Sleep(ms);
3764 }
3765 
3766 // Windows does not provide sleep functionality with nanosecond resolution, so we
3767 // try to approximate this with spinning combined with yielding if another thread
3768 // is ready to run on the current processor.
3769 void os::naked_short_nanosleep(jlong ns) {
3770   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3771 
3772   int64_t start = os::javaTimeNanos();
3773   do {
3774     if (SwitchToThread() == 0) {
3775       // Nothing else is ready to run on this cpu, spin a little
3776       SpinPause();
3777     }
3778   } while (os::javaTimeNanos() - start < ns);
3779 }
3780 
3781 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3782 void os::infinite_sleep() {
3783   while (true) {    // sleep forever ...
3784     Sleep(100000);  // ... 100 seconds at a time
3785   }
3786 }
3787 
3788 typedef BOOL (WINAPI * STTSignature)(void);
3789 
3790 void os::naked_yield() {
3791   // Consider passing back the return value from SwitchToThread().
3792   SwitchToThread();
3793 }
3794 
3795 // Win32 only gives you access to seven real priorities at a time,
3796 // so we compress Java's ten down to seven.  It would be better
3797 // if we dynamically adjusted relative priorities.
3798 
3799 int os::java_to_os_priority[CriticalPriority + 1] = {
3800   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3801   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3802   THREAD_PRIORITY_LOWEST,                       // 2
3803   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3804   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3805   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3806   THREAD_PRIORITY_NORMAL,                       // 6
3807   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3808   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3809   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3810   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3811   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3812 };
3813 
3814 int prio_policy1[CriticalPriority + 1] = {
3815   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3816   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3817   THREAD_PRIORITY_LOWEST,                       // 2
3818   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3819   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3820   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3821   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3822   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3823   THREAD_PRIORITY_HIGHEST,                      // 8
3824   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3825   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3826   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3827 };
3828 
3829 static int prio_init() {
3830   // If ThreadPriorityPolicy is 1, switch tables
3831   if (ThreadPriorityPolicy == 1) {
3832     int i;
3833     for (i = 0; i < CriticalPriority + 1; i++) {
3834       os::java_to_os_priority[i] = prio_policy1[i];
3835     }
3836   }
3837   if (UseCriticalJavaThreadPriority) {
3838     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3839   }
3840   return 0;
3841 }
3842 
3843 OSReturn os::set_native_priority(Thread* thread, int priority) {
3844   if (!UseThreadPriorities) return OS_OK;
3845   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3846   return ret ? OS_OK : OS_ERR;
3847 }
3848 
3849 OSReturn os::get_native_priority(const Thread* const thread,
3850                                  int* priority_ptr) {
3851   if (!UseThreadPriorities) {
3852     *priority_ptr = java_to_os_priority[NormPriority];
3853     return OS_OK;
3854   }
3855   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3856   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3857     assert(false, "GetThreadPriority failed");
3858     return OS_ERR;
3859   }
3860   *priority_ptr = os_prio;
3861   return OS_OK;
3862 }
3863 
3864 // GetCurrentThreadId() returns DWORD
3865 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3866 
3867 static int _initial_pid = 0;
3868 
3869 int os::current_process_id() {
3870   return (_initial_pid ? _initial_pid : _getpid());
3871 }
3872 
3873 int    os::win32::_vm_page_size              = 0;
3874 int    os::win32::_vm_allocation_granularity = 0;
3875 int    os::win32::_processor_type            = 0;
3876 // Processor level is not available on non-NT systems, use vm_version instead
3877 int    os::win32::_processor_level           = 0;
3878 julong os::win32::_physical_memory           = 0;
3879 size_t os::win32::_default_stack_size        = 0;
3880 
3881 intx          os::win32::_os_thread_limit    = 0;
3882 volatile intx os::win32::_os_thread_count    = 0;
3883 
3884 bool   os::win32::_is_windows_server         = false;
3885 
3886 // 6573254
3887 // Currently, the bug is observed across all the supported Windows releases,
3888 // including the latest one (as of this writing - Windows Server 2012 R2)
3889 bool   os::win32::_has_exit_bug              = true;
3890 
3891 void os::win32::initialize_system_info() {
3892   SYSTEM_INFO si;
3893   GetSystemInfo(&si);
3894   _vm_page_size    = si.dwPageSize;
3895   _vm_allocation_granularity = si.dwAllocationGranularity;
3896   _processor_type  = si.dwProcessorType;
3897   _processor_level = si.wProcessorLevel;
3898   set_processor_count(si.dwNumberOfProcessors);
3899 
3900   MEMORYSTATUSEX ms;
3901   ms.dwLength = sizeof(ms);
3902 
3903   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3904   // dwMemoryLoad (% of memory in use)
3905   GlobalMemoryStatusEx(&ms);
3906   _physical_memory = ms.ullTotalPhys;
3907 
3908   if (FLAG_IS_DEFAULT(MaxRAM)) {
3909     // Adjust MaxRAM according to the maximum virtual address space available.
3910     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3911   }
3912 
3913   OSVERSIONINFOEX oi;
3914   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3915   GetVersionEx((OSVERSIONINFO*)&oi);
3916   switch (oi.dwPlatformId) {
3917   case VER_PLATFORM_WIN32_NT:
3918     {
3919       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3920       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3921           oi.wProductType == VER_NT_SERVER) {
3922         _is_windows_server = true;
3923       }
3924     }
3925     break;
3926   default: fatal("Unknown platform");
3927   }
3928 
3929   _default_stack_size = os::current_stack_size();
3930   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3931   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3932          "stack size not a multiple of page size");
3933 
3934   initialize_performance_counter();
3935 }
3936 
3937 
3938 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3939                                       int ebuflen) {
3940   char path[MAX_PATH];
3941   DWORD size;
3942   DWORD pathLen = (DWORD)sizeof(path);
3943   HINSTANCE result = NULL;
3944 
3945   // only allow library name without path component
3946   assert(strchr(name, '\\') == NULL, "path not allowed");
3947   assert(strchr(name, ':') == NULL, "path not allowed");
3948   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3949     jio_snprintf(ebuf, ebuflen,
3950                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3951     return NULL;
3952   }
3953 
3954   // search system directory
3955   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3956     if (size >= pathLen) {
3957       return NULL; // truncated
3958     }
3959     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3960       return NULL; // truncated
3961     }
3962     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3963       return result;
3964     }
3965   }
3966 
3967   // try Windows directory
3968   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3969     if (size >= pathLen) {
3970       return NULL; // truncated
3971     }
3972     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3973       return NULL; // truncated
3974     }
3975     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3976       return result;
3977     }
3978   }
3979 
3980   jio_snprintf(ebuf, ebuflen,
3981                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3982   return NULL;
3983 }
3984 
3985 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3986 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3987 
3988 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3989   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3990   return TRUE;
3991 }
3992 
3993 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3994   // Basic approach:
3995   //  - Each exiting thread registers its intent to exit and then does so.
3996   //  - A thread trying to terminate the process must wait for all
3997   //    threads currently exiting to complete their exit.
3998 
3999   if (os::win32::has_exit_bug()) {
4000     // The array holds handles of the threads that have started exiting by calling
4001     // _endthreadex().
4002     // Should be large enough to avoid blocking the exiting thread due to lack of
4003     // a free slot.
4004     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
4005     static int handle_count = 0;
4006 
4007     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
4008     static CRITICAL_SECTION crit_sect;
4009     static volatile DWORD process_exiting = 0;
4010     int i, j;
4011     DWORD res;
4012     HANDLE hproc, hthr;
4013 
4014     // We only attempt to register threads until a process exiting
4015     // thread manages to set the process_exiting flag. Any threads
4016     // that come through here after the process_exiting flag is set
4017     // are unregistered and will be caught in the SuspendThread()
4018     // infinite loop below.
4019     bool registered = false;
4020 
4021     // The first thread that reached this point, initializes the critical section.
4022     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
4023       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
4024     } else if (Atomic::load_acquire(&process_exiting) == 0) {
4025       if (what != EPT_THREAD) {
4026         // Atomically set process_exiting before the critical section
4027         // to increase the visibility between racing threads.
4028         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
4029       }
4030       EnterCriticalSection(&crit_sect);
4031 
4032       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
4033         // Remove from the array those handles of the threads that have completed exiting.
4034         for (i = 0, j = 0; i < handle_count; ++i) {
4035           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
4036           if (res == WAIT_TIMEOUT) {
4037             handles[j++] = handles[i];
4038           } else {
4039             if (res == WAIT_FAILED) {
4040               warning("WaitForSingleObject failed (%u) in %s: %d\n",
4041                       GetLastError(), __FILE__, __LINE__);
4042             }
4043             // Don't keep the handle, if we failed waiting for it.
4044             CloseHandle(handles[i]);
4045           }
4046         }
4047 
4048         // If there's no free slot in the array of the kept handles, we'll have to
4049         // wait until at least one thread completes exiting.
4050         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
4051           // Raise the priority of the oldest exiting thread to increase its chances
4052           // to complete sooner.
4053           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
4054           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
4055           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
4056             i = (res - WAIT_OBJECT_0);
4057             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4058             for (; i < handle_count; ++i) {
4059               handles[i] = handles[i + 1];
4060             }
4061           } else {
4062             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4063                     (res == WAIT_FAILED ? "failed" : "timed out"),
4064                     GetLastError(), __FILE__, __LINE__);
4065             // Don't keep handles, if we failed waiting for them.
4066             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4067               CloseHandle(handles[i]);
4068             }
4069             handle_count = 0;
4070           }
4071         }
4072 
4073         // Store a duplicate of the current thread handle in the array of handles.
4074         hproc = GetCurrentProcess();
4075         hthr = GetCurrentThread();
4076         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4077                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
4078           warning("DuplicateHandle failed (%u) in %s: %d\n",
4079                   GetLastError(), __FILE__, __LINE__);
4080 
4081           // We can't register this thread (no more handles) so this thread
4082           // may be racing with a thread that is calling exit(). If the thread
4083           // that is calling exit() has managed to set the process_exiting
4084           // flag, then this thread will be caught in the SuspendThread()
4085           // infinite loop below which closes that race. A small timing
4086           // window remains before the process_exiting flag is set, but it
4087           // is only exposed when we are out of handles.
4088         } else {
4089           ++handle_count;
4090           registered = true;
4091 
4092           // The current exiting thread has stored its handle in the array, and now
4093           // should leave the critical section before calling _endthreadex().
4094         }
4095 
4096       } else if (what != EPT_THREAD && handle_count > 0) {
4097         jlong start_time, finish_time, timeout_left;
4098         // Before ending the process, make sure all the threads that had called
4099         // _endthreadex() completed.
4100 
4101         // Set the priority level of the current thread to the same value as
4102         // the priority level of exiting threads.
4103         // This is to ensure it will be given a fair chance to execute if
4104         // the timeout expires.
4105         hthr = GetCurrentThread();
4106         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4107         start_time = os::javaTimeNanos();
4108         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4109         for (i = 0; ; ) {
4110           int portion_count = handle_count - i;
4111           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4112             portion_count = MAXIMUM_WAIT_OBJECTS;
4113           }
4114           for (j = 0; j < portion_count; ++j) {
4115             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4116           }
4117           timeout_left = (finish_time - start_time) / 1000000L;
4118           if (timeout_left < 0) {
4119             timeout_left = 0;
4120           }
4121           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4122           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4123             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4124                     (res == WAIT_FAILED ? "failed" : "timed out"),
4125                     GetLastError(), __FILE__, __LINE__);
4126             // Reset portion_count so we close the remaining
4127             // handles due to this error.
4128             portion_count = handle_count - i;
4129           }
4130           for (j = 0; j < portion_count; ++j) {
4131             CloseHandle(handles[i + j]);
4132           }
4133           if ((i += portion_count) >= handle_count) {
4134             break;
4135           }
4136           start_time = os::javaTimeNanos();
4137         }
4138         handle_count = 0;
4139       }
4140 
4141       LeaveCriticalSection(&crit_sect);
4142     }
4143 
4144     if (!registered &&
4145         Atomic::load_acquire(&process_exiting) != 0 &&
4146         process_exiting != GetCurrentThreadId()) {
4147       // Some other thread is about to call exit(), so we don't let
4148       // the current unregistered thread proceed to exit() or _endthreadex()
4149       while (true) {
4150         SuspendThread(GetCurrentThread());
4151         // Avoid busy-wait loop, if SuspendThread() failed.
4152         Sleep(EXIT_TIMEOUT);
4153       }
4154     }
4155   }
4156 
4157   // We are here if either
4158   // - there's no 'race at exit' bug on this OS release;
4159   // - initialization of the critical section failed (unlikely);
4160   // - the current thread has registered itself and left the critical section;
4161   // - the process-exiting thread has raised the flag and left the critical section.
4162   if (what == EPT_THREAD) {
4163     _endthreadex((unsigned)exit_code);
4164   } else if (what == EPT_PROCESS) {
4165     ::exit(exit_code);
4166   } else { // EPT_PROCESS_DIE
4167     ::_exit(exit_code);
4168   }
4169 
4170   // Should not reach here
4171   return exit_code;
4172 }
4173 
4174 #undef EXIT_TIMEOUT
4175 
4176 void os::win32::setmode_streams() {
4177   _setmode(_fileno(stdin), _O_BINARY);
4178   _setmode(_fileno(stdout), _O_BINARY);
4179   _setmode(_fileno(stderr), _O_BINARY);
4180 }
4181 
4182 void os::wait_for_keypress_at_exit(void) {
4183   if (PauseAtExit) {
4184     fprintf(stderr, "Press any key to continue...\n");
4185     fgetc(stdin);
4186   }
4187 }
4188 
4189 
4190 bool os::message_box(const char* title, const char* message) {
4191   int result = MessageBox(NULL, message, title,
4192                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4193   return result == IDYES;
4194 }
4195 
4196 #ifndef PRODUCT
4197 #ifndef _WIN64
4198 // Helpers to check whether NX protection is enabled
4199 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4200   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4201       pex->ExceptionRecord->NumberParameters > 0 &&
4202       pex->ExceptionRecord->ExceptionInformation[0] ==
4203       EXCEPTION_INFO_EXEC_VIOLATION) {
4204     return EXCEPTION_EXECUTE_HANDLER;
4205   }
4206   return EXCEPTION_CONTINUE_SEARCH;
4207 }
4208 
4209 void nx_check_protection() {
4210   // If NX is enabled we'll get an exception calling into code on the stack
4211   char code[] = { (char)0xC3 }; // ret
4212   void *code_ptr = (void *)code;
4213   __try {
4214     __asm call code_ptr
4215   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4216     tty->print_raw_cr("NX protection detected.");
4217   }
4218 }
4219 #endif // _WIN64
4220 #endif // PRODUCT
4221 
4222 // This is called _before_ the global arguments have been parsed
4223 void os::init(void) {
4224   _initial_pid = _getpid();
4225 
4226   win32::initialize_system_info();
4227   win32::setmode_streams();
4228   _page_sizes.add(win32::vm_page_size());
4229 
4230   // This may be overridden later when argument processing is done.
4231   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4232 
4233   // Initialize main_process and main_thread
4234   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4235   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4236                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4237     fatal("DuplicateHandle failed\n");
4238   }
4239   main_thread_id = (int) GetCurrentThreadId();
4240 
4241   // initialize fast thread access - only used for 32-bit
4242   win32::initialize_thread_ptr_offset();
4243 }
4244 
4245 // To install functions for atexit processing
4246 extern "C" {
4247   static void perfMemory_exit_helper() {
4248     perfMemory_exit();
4249   }
4250 }
4251 
4252 static jint initSock();
4253 
4254 
4255 // this is called _after_ the global arguments have been parsed
4256 jint os::init_2(void) {
4257 
4258   // This could be set any time but all platforms
4259   // have to set it the same so we have to mirror Solaris.
4260   DEBUG_ONLY(os::set_mutex_init_done();)
4261 
4262   // Setup Windows Exceptions
4263 
4264 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
4265   topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
4266   previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
4267 #endif
4268 
4269   // for debugging float code generation bugs
4270 #if defined(ASSERT) && !defined(_WIN64)
4271   static long fp_control_word = 0;
4272   __asm { fstcw fp_control_word }
4273   // see Intel PPro Manual, Vol. 2, p 7-16
4274   const long invalid   = 0x01;
4275   fp_control_word |= invalid;
4276   __asm { fldcw fp_control_word }
4277 #endif
4278 
4279   // If stack_commit_size is 0, windows will reserve the default size,
4280   // but only commit a small portion of it.
4281   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4282   size_t default_reserve_size = os::win32::default_stack_size();
4283   size_t actual_reserve_size = stack_commit_size;
4284   if (stack_commit_size < default_reserve_size) {
4285     // If stack_commit_size == 0, we want this too
4286     actual_reserve_size = default_reserve_size;
4287   }
4288 
4289   // Check minimum allowable stack size for thread creation and to initialize
4290   // the java system classes, including StackOverflowError - depends on page
4291   // size.  Add two 4K pages for compiler2 recursion in main thread.
4292   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4293   // class initialization depending on 32 or 64 bit VM.
4294   size_t min_stack_allowed =
4295             (size_t)(StackOverflow::stack_guard_zone_size() +
4296                      StackOverflow::stack_shadow_zone_size() +
4297                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4298 
4299   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4300 
4301   if (actual_reserve_size < min_stack_allowed) {
4302     tty->print_cr("\nThe Java thread stack size specified is too small. "
4303                   "Specify at least %dk",
4304                   min_stack_allowed / K);
4305     return JNI_ERR;
4306   }
4307 
4308   JavaThread::set_stack_size_at_create(stack_commit_size);
4309 
4310   // Calculate theoretical max. size of Threads to guard gainst artifical
4311   // out-of-memory situations, where all available address-space has been
4312   // reserved by thread stacks.
4313   assert(actual_reserve_size != 0, "Must have a stack");
4314 
4315   // Calculate the thread limit when we should start doing Virtual Memory
4316   // banging. Currently when the threads will have used all but 200Mb of space.
4317   //
4318   // TODO: consider performing a similar calculation for commit size instead
4319   // as reserve size, since on a 64-bit platform we'll run into that more
4320   // often than running out of virtual memory space.  We can use the
4321   // lower value of the two calculations as the os_thread_limit.
4322   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4323   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4324 
4325   // at exit methods are called in the reverse order of their registration.
4326   // there is no limit to the number of functions registered. atexit does
4327   // not set errno.
4328 
4329   if (PerfAllowAtExitRegistration) {
4330     // only register atexit functions if PerfAllowAtExitRegistration is set.
4331     // atexit functions can be delayed until process exit time, which
4332     // can be problematic for embedded VM situations. Embedded VMs should
4333     // call DestroyJavaVM() to assure that VM resources are released.
4334 
4335     // note: perfMemory_exit_helper atexit function may be removed in
4336     // the future if the appropriate cleanup code can be added to the
4337     // VM_Exit VMOperation's doit method.
4338     if (atexit(perfMemory_exit_helper) != 0) {
4339       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4340     }
4341   }
4342 
4343 #ifndef _WIN64
4344   // Print something if NX is enabled (win32 on AMD64)
4345   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4346 #endif
4347 
4348   // initialize thread priority policy
4349   prio_init();
4350 
4351   UseNUMA = false; // We don't fully support this yet
4352 
4353   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4354     if (!numa_interleaving_init()) {
4355       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4356     } else if (!UseNUMAInterleaving) {
4357       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4358       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4359     }
4360   }
4361 
4362   if (initSock() != JNI_OK) {
4363     return JNI_ERR;
4364   }
4365 
4366   SymbolEngine::recalc_search_path();
4367 
4368   // Initialize data for jdk.internal.misc.Signal
4369   if (!ReduceSignalUsage) {
4370     jdk_misc_signal_init();
4371   }
4372 
4373   // Lookup SetThreadDescription - the docs state we must use runtime-linking of
4374   // kernelbase.dll, so that is what we do.
4375   HINSTANCE _kernelbase = LoadLibrary(TEXT("kernelbase.dll"));
4376   if (_kernelbase != NULL) {
4377     _SetThreadDescription =
4378       reinterpret_cast<SetThreadDescriptionFnPtr>(
4379                                                   GetProcAddress(_kernelbase,
4380                                                                  "SetThreadDescription"));
4381 #ifdef ASSERT
4382     _GetThreadDescription =
4383       reinterpret_cast<GetThreadDescriptionFnPtr>(
4384                                                   GetProcAddress(_kernelbase,
4385                                                                  "GetThreadDescription"));
4386 #endif
4387   }
4388   log_info(os, thread)("The SetThreadDescription API is%s available.", _SetThreadDescription == NULL ? " not" : "");
4389 
4390 
4391   return JNI_OK;
4392 }
4393 
4394 // combine the high and low DWORD into a ULONGLONG
4395 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4396   ULONGLONG value = high_word;
4397   value <<= sizeof(high_word) * 8;
4398   value |= low_word;
4399   return value;
4400 }
4401 
4402 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4403 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4404   ::memset((void*)sbuf, 0, sizeof(struct stat));
4405   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4406   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4407                                   file_data.ftLastWriteTime.dwLowDateTime);
4408   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4409                                   file_data.ftCreationTime.dwLowDateTime);
4410   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4411                                   file_data.ftLastAccessTime.dwLowDateTime);
4412   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4413     sbuf->st_mode |= S_IFDIR;
4414   } else {
4415     sbuf->st_mode |= S_IFREG;
4416   }
4417 }
4418 
4419 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4420   // Get required buffer size to convert to Unicode
4421   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4422                                              MB_ERR_INVALID_CHARS,
4423                                              char_path, -1,
4424                                              NULL, 0);
4425   if (unicode_path_len == 0) {
4426     return EINVAL;
4427   }
4428 
4429   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4430 
4431   int result = MultiByteToWideChar(CP_ACP,
4432                                    MB_ERR_INVALID_CHARS,
4433                                    char_path, -1,
4434                                    *unicode_path, unicode_path_len);
4435   assert(result == unicode_path_len, "length already checked above");
4436 
4437   return ERROR_SUCCESS;
4438 }
4439 
4440 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4441   // Get required buffer size to convert to full path. The return
4442   // value INCLUDES the terminating null character.
4443   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4444   if (full_path_len == 0) {
4445     return EINVAL;
4446   }
4447 
4448   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4449 
4450   // When the buffer has sufficient size, the return value EXCLUDES the
4451   // terminating null character
4452   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4453   assert(result <= full_path_len, "length already checked above");
4454 
4455   return ERROR_SUCCESS;
4456 }
4457 
4458 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4459   *prefix_off = 0;
4460   *needs_fullpath = true;
4461 
4462   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4463     *prefix = L"\\\\?\\";
4464   } else if (buf[0] == '\\' && buf[1] == '\\') {
4465     if (buf[2] == '?' && buf[3] == '\\') {
4466       *prefix = L"";
4467       *needs_fullpath = false;
4468     } else {
4469       *prefix = L"\\\\?\\UNC";
4470       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4471     }
4472   } else {
4473     *prefix = L"\\\\?\\";
4474   }
4475 }
4476 
4477 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4478 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4479 // additional_space is the size of space, in wchar_t, the function will additionally add to
4480 // the allocation of return buffer (such that the size of the returned buffer is at least
4481 // wcslen(buf) + 1 + additional_space).
4482 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4483   if ((path == NULL) || (path[0] == '\0')) {
4484     err = ENOENT;
4485     return NULL;
4486   }
4487 
4488   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4489   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4490   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4491   strncpy(buf, path, buf_len);
4492   os::native_path(buf);
4493 
4494   LPWSTR prefix = NULL;
4495   int prefix_off = 0;
4496   bool needs_fullpath = true;
4497   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4498 
4499   LPWSTR unicode_path = NULL;
4500   err = convert_to_unicode(buf, &unicode_path);
4501   FREE_C_HEAP_ARRAY(char, buf);
4502   if (err != ERROR_SUCCESS) {
4503     return NULL;
4504   }
4505 
4506   LPWSTR converted_path = NULL;
4507   if (needs_fullpath) {
4508     err = get_full_path(unicode_path, &converted_path);
4509   } else {
4510     converted_path = unicode_path;
4511   }
4512 
4513   LPWSTR result = NULL;
4514   if (converted_path != NULL) {
4515     size_t prefix_len = wcslen(prefix);
4516     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4517     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4518     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4519 
4520     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4521     result_len = wcslen(result);
4522     if ((result[result_len - 1] == L'\\') &&
4523         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4524       result[result_len - 1] = L'\0';
4525     }
4526   }
4527 
4528   if (converted_path != unicode_path) {
4529     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4530   }
4531   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4532 
4533   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4534 }
4535 
4536 int os::stat(const char *path, struct stat *sbuf) {
4537   errno_t err;
4538   wchar_t* wide_path = wide_abs_unc_path(path, err);
4539 
4540   if (wide_path == NULL) {
4541     errno = err;
4542     return -1;
4543   }
4544 
4545   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4546   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4547   os::free(wide_path);
4548 
4549   if (!bret) {
4550     errno = ::GetLastError();
4551     return -1;
4552   }
4553 
4554   file_attribute_data_to_stat(sbuf, file_data);
4555   return 0;
4556 }
4557 
4558 static HANDLE create_read_only_file_handle(const char* file) {
4559   errno_t err;
4560   wchar_t* wide_path = wide_abs_unc_path(file, err);
4561 
4562   if (wide_path == NULL) {
4563     errno = err;
4564     return INVALID_HANDLE_VALUE;
4565   }
4566 
4567   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4568                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4569   os::free(wide_path);
4570 
4571   return handle;
4572 }
4573 
4574 bool os::same_files(const char* file1, const char* file2) {
4575 
4576   if (file1 == NULL && file2 == NULL) {
4577     return true;
4578   }
4579 
4580   if (file1 == NULL || file2 == NULL) {
4581     return false;
4582   }
4583 
4584   if (strcmp(file1, file2) == 0) {
4585     return true;
4586   }
4587 
4588   char* native_file1 = os::strdup_check_oom(file1);
4589   native_file1 = os::native_path(native_file1);
4590   char* native_file2 = os::strdup_check_oom(file2);
4591   native_file2 = os::native_path(native_file2);
4592   if (strcmp(native_file1, native_file2) == 0) {
4593     os::free(native_file1);
4594     os::free(native_file2);
4595     return true;
4596   }
4597 
4598   HANDLE handle1 = create_read_only_file_handle(native_file1);
4599   HANDLE handle2 = create_read_only_file_handle(native_file2);
4600   bool result = false;
4601 
4602   // if we could open both paths...
4603   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4604     BY_HANDLE_FILE_INFORMATION fileInfo1;
4605     BY_HANDLE_FILE_INFORMATION fileInfo2;
4606     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4607       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4608       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4609       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4610         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4611         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4612         result = true;
4613       }
4614     }
4615   }
4616 
4617   //free the handles
4618   if (handle1 != INVALID_HANDLE_VALUE) {
4619     ::CloseHandle(handle1);
4620   }
4621 
4622   if (handle2 != INVALID_HANDLE_VALUE) {
4623     ::CloseHandle(handle2);
4624   }
4625 
4626   os::free(native_file1);
4627   os::free(native_file2);
4628 
4629   return result;
4630 }
4631 
4632 #define FT2INT64(ft) \
4633   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4634 
4635 
4636 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4637 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4638 // of a thread.
4639 //
4640 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4641 // the fast estimate available on the platform.
4642 
4643 // current_thread_cpu_time() is not optimized for Windows yet
4644 jlong os::current_thread_cpu_time() {
4645   // return user + sys since the cost is the same
4646   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4647 }
4648 
4649 jlong os::thread_cpu_time(Thread* thread) {
4650   // consistent with what current_thread_cpu_time() returns.
4651   return os::thread_cpu_time(thread, true /* user+sys */);
4652 }
4653 
4654 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4655   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4656 }
4657 
4658 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4659   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4660   // If this function changes, os::is_thread_cpu_time_supported() should too
4661   FILETIME CreationTime;
4662   FILETIME ExitTime;
4663   FILETIME KernelTime;
4664   FILETIME UserTime;
4665 
4666   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4667                       &ExitTime, &KernelTime, &UserTime) == 0) {
4668     return -1;
4669   } else if (user_sys_cpu_time) {
4670     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4671   } else {
4672     return FT2INT64(UserTime) * 100;
4673   }
4674 }
4675 
4676 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4677   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4678   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4679   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4680   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4681 }
4682 
4683 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4684   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4685   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4686   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4687   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4688 }
4689 
4690 bool os::is_thread_cpu_time_supported() {
4691   // see os::thread_cpu_time
4692   FILETIME CreationTime;
4693   FILETIME ExitTime;
4694   FILETIME KernelTime;
4695   FILETIME UserTime;
4696 
4697   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4698                       &KernelTime, &UserTime) == 0) {
4699     return false;
4700   } else {
4701     return true;
4702   }
4703 }
4704 
4705 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4706 // It does have primitives (PDH API) to get CPU usage and run queue length.
4707 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4708 // If we wanted to implement loadavg on Windows, we have a few options:
4709 //
4710 // a) Query CPU usage and run queue length and "fake" an answer by
4711 //    returning the CPU usage if it's under 100%, and the run queue
4712 //    length otherwise.  It turns out that querying is pretty slow
4713 //    on Windows, on the order of 200 microseconds on a fast machine.
4714 //    Note that on the Windows the CPU usage value is the % usage
4715 //    since the last time the API was called (and the first call
4716 //    returns 100%), so we'd have to deal with that as well.
4717 //
4718 // b) Sample the "fake" answer using a sampling thread and store
4719 //    the answer in a global variable.  The call to loadavg would
4720 //    just return the value of the global, avoiding the slow query.
4721 //
4722 // c) Sample a better answer using exponential decay to smooth the
4723 //    value.  This is basically the algorithm used by UNIX kernels.
4724 //
4725 // Note that sampling thread starvation could affect both (b) and (c).
4726 int os::loadavg(double loadavg[], int nelem) {
4727   return -1;
4728 }
4729 
4730 
4731 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4732 bool os::dont_yield() {
4733   return DontYieldALot;
4734 }
4735 
4736 int os::open(const char *path, int oflag, int mode) {
4737   errno_t err;
4738   wchar_t* wide_path = wide_abs_unc_path(path, err);
4739 
4740   if (wide_path == NULL) {
4741     errno = err;
4742     return -1;
4743   }
4744   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4745   os::free(wide_path);
4746 
4747   if (fd == -1) {
4748     errno = ::GetLastError();
4749   }
4750 
4751   return fd;
4752 }
4753 
4754 FILE* os::open(int fd, const char* mode) {
4755   return ::_fdopen(fd, mode);
4756 }
4757 
4758 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
4759   return ::write(fd, buf, nBytes);
4760 }
4761 
4762 int os::close(int fd) {
4763   return ::close(fd);
4764 }
4765 
4766 void os::exit(int num) {
4767   win32::exit_process_or_thread(win32::EPT_PROCESS, num);
4768 }
4769 
4770 void os::_exit(int num) {
4771   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, num);
4772 }
4773 
4774 // Is a (classpath) directory empty?
4775 bool os::dir_is_empty(const char* path) {
4776   errno_t err;
4777   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4778 
4779   if (wide_path == NULL) {
4780     errno = err;
4781     return false;
4782   }
4783 
4784   // Make sure we end with "\\*"
4785   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4786     wcscat(wide_path, L"*");
4787   } else {
4788     wcscat(wide_path, L"\\*");
4789   }
4790 
4791   WIN32_FIND_DATAW fd;
4792   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4793   os::free(wide_path);
4794   bool is_empty = true;
4795 
4796   if (f != INVALID_HANDLE_VALUE) {
4797     while (is_empty && ::FindNextFileW(f, &fd)) {
4798       // An empty directory contains only the current directory file
4799       // and the previous directory file.
4800       if ((wcscmp(fd.cFileName, L".") != 0) &&
4801           (wcscmp(fd.cFileName, L"..") != 0)) {
4802         is_empty = false;
4803       }
4804     }
4805     FindClose(f);
4806   } else {
4807     errno = ::GetLastError();
4808   }
4809 
4810   return is_empty;
4811 }
4812 
4813 // create binary file, rewriting existing file if required
4814 int os::create_binary_file(const char* path, bool rewrite_existing) {
4815   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4816   oflags |= rewrite_existing ? _O_TRUNC : _O_EXCL;
4817   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4818 }
4819 
4820 // return current position of file pointer
4821 jlong os::current_file_offset(int fd) {
4822   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4823 }
4824 
4825 // move file pointer to the specified offset
4826 jlong os::seek_to_file_offset(int fd, jlong offset) {
4827   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4828 }
4829 
4830 
4831 jlong os::lseek(int fd, jlong offset, int whence) {
4832   return (jlong) ::_lseeki64(fd, offset, whence);
4833 }
4834 
4835 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4836   OVERLAPPED ov;
4837   DWORD nread;
4838   BOOL result;
4839 
4840   ZeroMemory(&ov, sizeof(ov));
4841   ov.Offset = (DWORD)offset;
4842   ov.OffsetHigh = (DWORD)(offset >> 32);
4843 
4844   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4845 
4846   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4847 
4848   return result ? nread : 0;
4849 }
4850 
4851 
4852 // This method is a slightly reworked copy of JDK's sysNativePath
4853 // from src/windows/hpi/src/path_md.c
4854 
4855 // Convert a pathname to native format.  On win32, this involves forcing all
4856 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4857 // sometimes rejects '/') and removing redundant separators.  The input path is
4858 // assumed to have been converted into the character encoding used by the local
4859 // system.  Because this might be a double-byte encoding, care is taken to
4860 // treat double-byte lead characters correctly.
4861 //
4862 // This procedure modifies the given path in place, as the result is never
4863 // longer than the original.  There is no error return; this operation always
4864 // succeeds.
4865 char * os::native_path(char *path) {
4866   char *src = path, *dst = path, *end = path;
4867   char *colon = NULL;  // If a drive specifier is found, this will
4868                        // point to the colon following the drive letter
4869 
4870   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4871   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4872           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4873 
4874   // Check for leading separators
4875 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4876   while (isfilesep(*src)) {
4877     src++;
4878   }
4879 
4880   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4881     // Remove leading separators if followed by drive specifier.  This
4882     // hack is necessary to support file URLs containing drive
4883     // specifiers (e.g., "file://c:/path").  As a side effect,
4884     // "/c:/path" can be used as an alternative to "c:/path".
4885     *dst++ = *src++;
4886     colon = dst;
4887     *dst++ = ':';
4888     src++;
4889   } else {
4890     src = path;
4891     if (isfilesep(src[0]) && isfilesep(src[1])) {
4892       // UNC pathname: Retain first separator; leave src pointed at
4893       // second separator so that further separators will be collapsed
4894       // into the second separator.  The result will be a pathname
4895       // beginning with "\\\\" followed (most likely) by a host name.
4896       src = dst = path + 1;
4897       path[0] = '\\';     // Force first separator to '\\'
4898     }
4899   }
4900 
4901   end = dst;
4902 
4903   // Remove redundant separators from remainder of path, forcing all
4904   // separators to be '\\' rather than '/'. Also, single byte space
4905   // characters are removed from the end of the path because those
4906   // are not legal ending characters on this operating system.
4907   //
4908   while (*src != '\0') {
4909     if (isfilesep(*src)) {
4910       *dst++ = '\\'; src++;
4911       while (isfilesep(*src)) src++;
4912       if (*src == '\0') {
4913         // Check for trailing separator
4914         end = dst;
4915         if (colon == dst - 2) break;  // "z:\\"
4916         if (dst == path + 1) break;   // "\\"
4917         if (dst == path + 2 && isfilesep(path[0])) {
4918           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4919           // beginning of a UNC pathname.  Even though it is not, by
4920           // itself, a valid UNC pathname, we leave it as is in order
4921           // to be consistent with the path canonicalizer as well
4922           // as the win32 APIs, which treat this case as an invalid
4923           // UNC pathname rather than as an alias for the root
4924           // directory of the current drive.
4925           break;
4926         }
4927         end = --dst;  // Path does not denote a root directory, so
4928                       // remove trailing separator
4929         break;
4930       }
4931       end = dst;
4932     } else {
4933       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4934         *dst++ = *src++;
4935         if (*src) *dst++ = *src++;
4936         end = dst;
4937       } else {  // Copy a single-byte character
4938         char c = *src++;
4939         *dst++ = c;
4940         // Space is not a legal ending character
4941         if (c != ' ') end = dst;
4942       }
4943     }
4944   }
4945 
4946   *end = '\0';
4947 
4948   // For "z:", add "." to work around a bug in the C runtime library
4949   if (colon == dst - 1) {
4950     path[2] = '.';
4951     path[3] = '\0';
4952   }
4953 
4954   return path;
4955 }
4956 
4957 // This code is a copy of JDK's sysSetLength
4958 // from src/windows/hpi/src/sys_api_md.c
4959 
4960 int os::ftruncate(int fd, jlong length) {
4961   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4962   long high = (long)(length >> 32);
4963   DWORD ret;
4964 
4965   if (h == (HANDLE)(-1)) {
4966     return -1;
4967   }
4968 
4969   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4970   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4971     return -1;
4972   }
4973 
4974   if (::SetEndOfFile(h) == FALSE) {
4975     return -1;
4976   }
4977 
4978   return 0;
4979 }
4980 
4981 int os::get_fileno(FILE* fp) {
4982   return _fileno(fp);
4983 }
4984 
4985 // This code is a copy of JDK's sysSync
4986 // from src/windows/hpi/src/sys_api_md.c
4987 // except for the legacy workaround for a bug in Win 98
4988 
4989 int os::fsync(int fd) {
4990   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4991 
4992   if ((!::FlushFileBuffers(handle)) &&
4993       (GetLastError() != ERROR_ACCESS_DENIED)) {
4994     // from winerror.h
4995     return -1;
4996   }
4997   return 0;
4998 }
4999 
5000 static int nonSeekAvailable(int, long *);
5001 static int stdinAvailable(int, long *);
5002 
5003 // This code is a copy of JDK's sysAvailable
5004 // from src/windows/hpi/src/sys_api_md.c
5005 
5006 int os::available(int fd, jlong *bytes) {
5007   jlong cur, end;
5008   struct _stati64 stbuf64;
5009 
5010   if (::_fstati64(fd, &stbuf64) >= 0) {
5011     int mode = stbuf64.st_mode;
5012     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
5013       int ret;
5014       long lpbytes;
5015       if (fd == 0) {
5016         ret = stdinAvailable(fd, &lpbytes);
5017       } else {
5018         ret = nonSeekAvailable(fd, &lpbytes);
5019       }
5020       (*bytes) = (jlong)(lpbytes);
5021       return ret;
5022     }
5023     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
5024       return FALSE;
5025     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
5026       return FALSE;
5027     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
5028       return FALSE;
5029     }
5030     *bytes = end - cur;
5031     return TRUE;
5032   } else {
5033     return FALSE;
5034   }
5035 }
5036 
5037 void os::flockfile(FILE* fp) {
5038   _lock_file(fp);
5039 }
5040 
5041 void os::funlockfile(FILE* fp) {
5042   _unlock_file(fp);
5043 }
5044 
5045 // This code is a copy of JDK's nonSeekAvailable
5046 // from src/windows/hpi/src/sys_api_md.c
5047 
5048 static int nonSeekAvailable(int fd, long *pbytes) {
5049   // This is used for available on non-seekable devices
5050   // (like both named and anonymous pipes, such as pipes
5051   //  connected to an exec'd process).
5052   // Standard Input is a special case.
5053   HANDLE han;
5054 
5055   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
5056     return FALSE;
5057   }
5058 
5059   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
5060     // PeekNamedPipe fails when at EOF.  In that case we
5061     // simply make *pbytes = 0 which is consistent with the
5062     // behavior we get on Solaris when an fd is at EOF.
5063     // The only alternative is to raise an Exception,
5064     // which isn't really warranted.
5065     //
5066     if (::GetLastError() != ERROR_BROKEN_PIPE) {
5067       return FALSE;
5068     }
5069     *pbytes = 0;
5070   }
5071   return TRUE;
5072 }
5073 
5074 #define MAX_INPUT_EVENTS 2000
5075 
5076 // This code is a copy of JDK's stdinAvailable
5077 // from src/windows/hpi/src/sys_api_md.c
5078 
5079 static int stdinAvailable(int fd, long *pbytes) {
5080   HANDLE han;
5081   DWORD numEventsRead = 0;  // Number of events read from buffer
5082   DWORD numEvents = 0;      // Number of events in buffer
5083   DWORD i = 0;              // Loop index
5084   DWORD curLength = 0;      // Position marker
5085   DWORD actualLength = 0;   // Number of bytes readable
5086   BOOL error = FALSE;       // Error holder
5087   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
5088 
5089   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
5090     return FALSE;
5091   }
5092 
5093   // Construct an array of input records in the console buffer
5094   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
5095   if (error == 0) {
5096     return nonSeekAvailable(fd, pbytes);
5097   }
5098 
5099   // lpBuffer must fit into 64K or else PeekConsoleInput fails
5100   if (numEvents > MAX_INPUT_EVENTS) {
5101     numEvents = MAX_INPUT_EVENTS;
5102   }
5103 
5104   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
5105   if (lpBuffer == NULL) {
5106     return FALSE;
5107   }
5108 
5109   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
5110   if (error == 0) {
5111     os::free(lpBuffer);
5112     return FALSE;
5113   }
5114 
5115   // Examine input records for the number of bytes available
5116   for (i=0; i<numEvents; i++) {
5117     if (lpBuffer[i].EventType == KEY_EVENT) {
5118 
5119       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
5120                                       &(lpBuffer[i].Event);
5121       if (keyRecord->bKeyDown == TRUE) {
5122         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
5123         curLength++;
5124         if (*keyPressed == '\r') {
5125           actualLength = curLength;
5126         }
5127       }
5128     }
5129   }
5130 
5131   if (lpBuffer != NULL) {
5132     os::free(lpBuffer);
5133   }
5134 
5135   *pbytes = (long) actualLength;
5136   return TRUE;
5137 }
5138 
5139 // Map a block of memory.
5140 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5141                         char *addr, size_t bytes, bool read_only,
5142                         bool allow_exec) {
5143 
5144   errno_t err;
5145   wchar_t* wide_path = wide_abs_unc_path(file_name, err);
5146 
5147   if (wide_path == NULL) {
5148     return NULL;
5149   }
5150 
5151   HANDLE hFile;
5152   char* base;
5153 
5154   hFile = CreateFileW(wide_path, GENERIC_READ, FILE_SHARE_READ, NULL,
5155                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5156   if (hFile == INVALID_HANDLE_VALUE) {
5157     log_info(os)("CreateFileW() failed: GetLastError->%ld.", GetLastError());
5158     os::free(wide_path);
5159     return NULL;
5160   }
5161   os::free(wide_path);
5162 
5163   if (allow_exec) {
5164     // CreateFileMapping/MapViewOfFileEx can't map executable memory
5165     // unless it comes from a PE image (which the shared archive is not.)
5166     // Even VirtualProtect refuses to give execute access to mapped memory
5167     // that was not previously executable.
5168     //
5169     // Instead, stick the executable region in anonymous memory.  Yuck.
5170     // Penalty is that ~4 pages will not be shareable - in the future
5171     // we might consider DLLizing the shared archive with a proper PE
5172     // header so that mapping executable + sharing is possible.
5173 
5174     base = (char*) virtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5175                                 PAGE_READWRITE);
5176     if (base == NULL) {
5177       CloseHandle(hFile);
5178       return NULL;
5179     }
5180 
5181     // Record virtual memory allocation
5182     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5183 
5184     DWORD bytes_read;
5185     OVERLAPPED overlapped;
5186     overlapped.Offset = (DWORD)file_offset;
5187     overlapped.OffsetHigh = 0;
5188     overlapped.hEvent = NULL;
5189     // ReadFile guarantees that if the return value is true, the requested
5190     // number of bytes were read before returning.
5191     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5192     if (!res) {
5193       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5194       release_memory(base, bytes);
5195       CloseHandle(hFile);
5196       return NULL;
5197     }
5198   } else {
5199     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5200                                     NULL /* file_name */);
5201     if (hMap == NULL) {
5202       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5203       CloseHandle(hFile);
5204       return NULL;
5205     }
5206 
5207     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5208     base = (char*)mapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5209                                   (DWORD)bytes, addr);
5210     if (base == NULL) {
5211       CloseHandle(hMap);
5212       CloseHandle(hFile);
5213       return NULL;
5214     }
5215 
5216     if (CloseHandle(hMap) == 0) {
5217       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5218       CloseHandle(hFile);
5219       return base;
5220     }
5221   }
5222 
5223   if (allow_exec) {
5224     DWORD old_protect;
5225     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5226     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5227 
5228     if (!res) {
5229       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5230       // Don't consider this a hard error, on IA32 even if the
5231       // VirtualProtect fails, we should still be able to execute
5232       CloseHandle(hFile);
5233       return base;
5234     }
5235   }
5236 
5237   if (CloseHandle(hFile) == 0) {
5238     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5239     return base;
5240   }
5241 
5242   return base;
5243 }
5244 
5245 
5246 // Remap a block of memory.
5247 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5248                           char *addr, size_t bytes, bool read_only,
5249                           bool allow_exec) {
5250   // This OS does not allow existing memory maps to be remapped so we
5251   // would have to unmap the memory before we remap it.
5252 
5253   // Because there is a small window between unmapping memory and mapping
5254   // it in again with different protections, CDS archives are mapped RW
5255   // on windows, so this function isn't called.
5256   ShouldNotReachHere();
5257   return NULL;
5258 }
5259 
5260 
5261 // Unmap a block of memory.
5262 // Returns true=success, otherwise false.
5263 
5264 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5265   MEMORY_BASIC_INFORMATION mem_info;
5266   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5267     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5268     return false;
5269   }
5270 
5271   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5272   // Instead, executable region was allocated using VirtualAlloc(). See
5273   // pd_map_memory() above.
5274   //
5275   // The following flags should match the 'exec_access' flages used for
5276   // VirtualProtect() in pd_map_memory().
5277   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5278       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5279     return pd_release_memory(addr, bytes);
5280   }
5281 
5282   BOOL result = unmapViewOfFile(addr);
5283   if (result == 0) {
5284     return false;
5285   }
5286   return true;
5287 }
5288 
5289 void os::pause() {
5290   char filename[MAX_PATH];
5291   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5292     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5293   } else {
5294     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5295   }
5296 
5297   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5298   if (fd != -1) {
5299     struct stat buf;
5300     ::close(fd);
5301     while (::stat(filename, &buf) == 0) {
5302       Sleep(100);
5303     }
5304   } else {
5305     jio_fprintf(stderr,
5306                 "Could not open pause file '%s', continuing immediately.\n", filename);
5307   }
5308 }
5309 
5310 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5311 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5312 
5313 os::ThreadCrashProtection::ThreadCrashProtection() {
5314   _protected_thread = Thread::current();
5315   assert(_protected_thread->is_JfrSampler_thread(), "should be JFRSampler");
5316 }
5317 
5318 // See the caveats for this class in os_windows.hpp
5319 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5320 // into this method and returns false. If no OS EXCEPTION was raised, returns
5321 // true.
5322 // The callback is supposed to provide the method that should be protected.
5323 //
5324 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5325   bool success = true;
5326   __try {
5327     _crash_protection = this;
5328     cb.call();
5329   } __except(EXCEPTION_EXECUTE_HANDLER) {
5330     // only for protection, nothing to do
5331     success = false;
5332   }
5333   _crash_protection = NULL;
5334   _protected_thread = NULL;
5335   return success;
5336 }
5337 
5338 
5339 class HighResolutionInterval : public CHeapObj<mtThread> {
5340   // The default timer resolution seems to be 10 milliseconds.
5341   // (Where is this written down?)
5342   // If someone wants to sleep for only a fraction of the default,
5343   // then we set the timer resolution down to 1 millisecond for
5344   // the duration of their interval.
5345   // We carefully set the resolution back, since otherwise we
5346   // seem to incur an overhead (3%?) that we don't need.
5347   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5348   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5349   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5350   // timeBeginPeriod() if the relative error exceeded some threshold.
5351   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5352   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5353   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5354   // resolution timers running.
5355  private:
5356   jlong resolution;
5357  public:
5358   HighResolutionInterval(jlong ms) {
5359     resolution = ms % 10L;
5360     if (resolution != 0) {
5361       MMRESULT result = timeBeginPeriod(1L);
5362     }
5363   }
5364   ~HighResolutionInterval() {
5365     if (resolution != 0) {
5366       MMRESULT result = timeEndPeriod(1L);
5367     }
5368     resolution = 0L;
5369   }
5370 };
5371 
5372 // An Event wraps a win32 "CreateEvent" kernel handle.
5373 //
5374 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5375 //
5376 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5377 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5378 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5379 //     In addition, an unpark() operation might fetch the handle field, but the
5380 //     event could recycle between the fetch and the SetEvent() operation.
5381 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5382 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5383 //     on an stale but recycled handle would be harmless, but in practice this might
5384 //     confuse other non-Sun code, so it's not a viable approach.
5385 //
5386 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5387 //     with the Event.  The event handle is never closed.  This could be construed
5388 //     as handle leakage, but only up to the maximum # of threads that have been extant
5389 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5390 //     permit a process to have hundreds of thousands of open handles.
5391 //
5392 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5393 //     and release unused handles.
5394 //
5395 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5396 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5397 //
5398 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5399 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5400 //
5401 // We use (2).
5402 //
5403 // TODO-FIXME:
5404 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5405 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5406 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5407 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5408 //     into a single win32 CreateEvent() handle.
5409 //
5410 // Assumption:
5411 //    Only one parker can exist on an event, which is why we allocate
5412 //    them per-thread. Multiple unparkers can coexist.
5413 //
5414 // _Event transitions in park()
5415 //   -1 => -1 : illegal
5416 //    1 =>  0 : pass - return immediately
5417 //    0 => -1 : block; then set _Event to 0 before returning
5418 //
5419 // _Event transitions in unpark()
5420 //    0 => 1 : just return
5421 //    1 => 1 : just return
5422 //   -1 => either 0 or 1; must signal target thread
5423 //         That is, we can safely transition _Event from -1 to either
5424 //         0 or 1.
5425 //
5426 // _Event serves as a restricted-range semaphore.
5427 //   -1 : thread is blocked, i.e. there is a waiter
5428 //    0 : neutral: thread is running or ready,
5429 //        could have been signaled after a wait started
5430 //    1 : signaled - thread is running or ready
5431 //
5432 // Another possible encoding of _Event would be with
5433 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5434 //
5435 
5436 int os::PlatformEvent::park(jlong Millis) {
5437   // Transitions for _Event:
5438   //   -1 => -1 : illegal
5439   //    1 =>  0 : pass - return immediately
5440   //    0 => -1 : block; then set _Event to 0 before returning
5441 
5442   guarantee(_ParkHandle != NULL , "Invariant");
5443   guarantee(Millis > 0          , "Invariant");
5444 
5445   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5446   // the initial park() operation.
5447   // Consider: use atomic decrement instead of CAS-loop
5448 
5449   int v;
5450   for (;;) {
5451     v = _Event;
5452     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5453   }
5454   guarantee((v == 0) || (v == 1), "invariant");
5455   if (v != 0) return OS_OK;
5456 
5457   // Do this the hard way by blocking ...
5458   // TODO: consider a brief spin here, gated on the success of recent
5459   // spin attempts by this thread.
5460   //
5461   // We decompose long timeouts into series of shorter timed waits.
5462   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5463   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5464   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5465   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5466   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5467   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5468   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5469   // for the already waited time.  This policy does not admit any new outcomes.
5470   // In the future, however, we might want to track the accumulated wait time and
5471   // adjust Millis accordingly if we encounter a spurious wakeup.
5472 
5473   const int MAXTIMEOUT = 0x10000000;
5474   DWORD rv = WAIT_TIMEOUT;
5475   while (_Event < 0 && Millis > 0) {
5476     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5477     if (Millis > MAXTIMEOUT) {
5478       prd = MAXTIMEOUT;
5479     }
5480     HighResolutionInterval *phri = NULL;
5481     if (!ForceTimeHighResolution) {
5482       phri = new HighResolutionInterval(prd);
5483     }
5484     rv = ::WaitForSingleObject(_ParkHandle, prd);
5485     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5486     if (rv == WAIT_TIMEOUT) {
5487       Millis -= prd;
5488     }
5489     delete phri; // if it is NULL, harmless
5490   }
5491   v = _Event;
5492   _Event = 0;
5493   // see comment at end of os::PlatformEvent::park() below:
5494   OrderAccess::fence();
5495   // If we encounter a nearly simultanous timeout expiry and unpark()
5496   // we return OS_OK indicating we awoke via unpark().
5497   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5498   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5499 }
5500 
5501 void os::PlatformEvent::park() {
5502   // Transitions for _Event:
5503   //   -1 => -1 : illegal
5504   //    1 =>  0 : pass - return immediately
5505   //    0 => -1 : block; then set _Event to 0 before returning
5506 
5507   guarantee(_ParkHandle != NULL, "Invariant");
5508   // Invariant: Only the thread associated with the Event/PlatformEvent
5509   // may call park().
5510   // Consider: use atomic decrement instead of CAS-loop
5511   int v;
5512   for (;;) {
5513     v = _Event;
5514     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5515   }
5516   guarantee((v == 0) || (v == 1), "invariant");
5517   if (v != 0) return;
5518 
5519   // Do this the hard way by blocking ...
5520   // TODO: consider a brief spin here, gated on the success of recent
5521   // spin attempts by this thread.
5522   while (_Event < 0) {
5523     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5524     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5525   }
5526 
5527   // Usually we'll find _Event == 0 at this point, but as
5528   // an optional optimization we clear it, just in case can
5529   // multiple unpark() operations drove _Event up to 1.
5530   _Event = 0;
5531   OrderAccess::fence();
5532   guarantee(_Event >= 0, "invariant");
5533 }
5534 
5535 void os::PlatformEvent::unpark() {
5536   guarantee(_ParkHandle != NULL, "Invariant");
5537 
5538   // Transitions for _Event:
5539   //    0 => 1 : just return
5540   //    1 => 1 : just return
5541   //   -1 => either 0 or 1; must signal target thread
5542   //         That is, we can safely transition _Event from -1 to either
5543   //         0 or 1.
5544   // See also: "Semaphores in Plan 9" by Mullender & Cox
5545   //
5546   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5547   // that it will take two back-to-back park() calls for the owning
5548   // thread to block. This has the benefit of forcing a spurious return
5549   // from the first park() call after an unpark() call which will help
5550   // shake out uses of park() and unpark() without condition variables.
5551 
5552   if (Atomic::xchg(&_Event, 1) >= 0) return;
5553 
5554   ::SetEvent(_ParkHandle);
5555 }
5556 
5557 
5558 // JSR166
5559 // -------------------------------------------------------
5560 
5561 // The Windows implementation of Park is very straightforward: Basic
5562 // operations on Win32 Events turn out to have the right semantics to
5563 // use them directly.
5564 
5565 void Parker::park(bool isAbsolute, jlong time) {
5566   guarantee(_ParkHandle != NULL, "invariant");
5567   // First, demultiplex/decode time arguments
5568   if (time < 0) { // don't wait
5569     return;
5570   } else if (time == 0 && !isAbsolute) {
5571     time = INFINITE;
5572   } else if (isAbsolute) {
5573     time -= os::javaTimeMillis(); // convert to relative time
5574     if (time <= 0) {  // already elapsed
5575       return;
5576     }
5577   } else { // relative
5578     time /= 1000000;  // Must coarsen from nanos to millis
5579     if (time == 0) {  // Wait for the minimal time unit if zero
5580       time = 1;
5581     }
5582   }
5583 
5584   JavaThread* thread = JavaThread::current();
5585 
5586   // Don't wait if interrupted or already triggered
5587   if (thread->is_interrupted(false) ||
5588       WaitForSingleObject(_ParkHandle, 0) == WAIT_OBJECT_0) {
5589     ResetEvent(_ParkHandle);
5590     return;
5591   } else {
5592     ThreadBlockInVM tbivm(thread);
5593     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5594 
5595     WaitForSingleObject(_ParkHandle, time);
5596     ResetEvent(_ParkHandle);
5597   }
5598 }
5599 
5600 void Parker::unpark() {
5601   guarantee(_ParkHandle != NULL, "invariant");
5602   SetEvent(_ParkHandle);
5603 }
5604 
5605 // Platform Monitor implementation
5606 
5607 // Must already be locked
5608 int os::PlatformMonitor::wait(jlong millis) {
5609   assert(millis >= 0, "negative timeout");
5610   int ret = OS_TIMEOUT;
5611   int status = SleepConditionVariableCS(&_cond, &_mutex,
5612                                         millis == 0 ? INFINITE : millis);
5613   if (status != 0) {
5614     ret = OS_OK;
5615   }
5616   #ifndef PRODUCT
5617   else {
5618     DWORD err = GetLastError();
5619     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5620   }
5621   #endif
5622   return ret;
5623 }
5624 
5625 // Run the specified command in a separate process. Return its exit value,
5626 // or -1 on failure (e.g. can't create a new process).
5627 int os::fork_and_exec(const char* cmd) {
5628   STARTUPINFO si;
5629   PROCESS_INFORMATION pi;
5630   DWORD exit_code;
5631 
5632   char * cmd_string;
5633   const char * cmd_prefix = "cmd /C ";
5634   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5635   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5636   if (cmd_string == NULL) {
5637     return -1;
5638   }
5639   cmd_string[0] = '\0';
5640   strcat(cmd_string, cmd_prefix);
5641   strcat(cmd_string, cmd);
5642 
5643   // now replace all '\n' with '&'
5644   char * substring = cmd_string;
5645   while ((substring = strchr(substring, '\n')) != NULL) {
5646     substring[0] = '&';
5647     substring++;
5648   }
5649   memset(&si, 0, sizeof(si));
5650   si.cb = sizeof(si);
5651   memset(&pi, 0, sizeof(pi));
5652   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5653                             cmd_string,    // command line
5654                             NULL,   // process security attribute
5655                             NULL,   // thread security attribute
5656                             TRUE,   // inherits system handles
5657                             0,      // no creation flags
5658                             NULL,   // use parent's environment block
5659                             NULL,   // use parent's starting directory
5660                             &si,    // (in) startup information
5661                             &pi);   // (out) process information
5662 
5663   if (rslt) {
5664     // Wait until child process exits.
5665     WaitForSingleObject(pi.hProcess, INFINITE);
5666 
5667     GetExitCodeProcess(pi.hProcess, &exit_code);
5668 
5669     // Close process and thread handles.
5670     CloseHandle(pi.hProcess);
5671     CloseHandle(pi.hThread);
5672   } else {
5673     exit_code = -1;
5674   }
5675 
5676   FREE_C_HEAP_ARRAY(char, cmd_string);
5677   return (int)exit_code;
5678 }
5679 
5680 bool os::find(address addr, outputStream* st) {
5681   int offset = -1;
5682   bool result = false;
5683   char buf[256];
5684   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5685     st->print(PTR_FORMAT " ", addr);
5686     if (strlen(buf) < sizeof(buf) - 1) {
5687       char* p = strrchr(buf, '\\');
5688       if (p) {
5689         st->print("%s", p + 1);
5690       } else {
5691         st->print("%s", buf);
5692       }
5693     } else {
5694         // The library name is probably truncated. Let's omit the library name.
5695         // See also JDK-8147512.
5696     }
5697     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5698       st->print("::%s + 0x%x", buf, offset);
5699     }
5700     st->cr();
5701     result = true;
5702   }
5703   return result;
5704 }
5705 
5706 static jint initSock() {
5707   WSADATA wsadata;
5708 
5709   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5710     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5711                 ::GetLastError());
5712     return JNI_ERR;
5713   }
5714   return JNI_OK;
5715 }
5716 
5717 struct hostent* os::get_host_by_name(char* name) {
5718   return (struct hostent*)gethostbyname(name);
5719 }
5720 
5721 int os::socket_close(int fd) {
5722   return ::closesocket(fd);
5723 }
5724 
5725 int os::socket(int domain, int type, int protocol) {
5726   return ::socket(domain, type, protocol);
5727 }
5728 
5729 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5730   return ::connect(fd, him, len);
5731 }
5732 
5733 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5734   return ::recv(fd, buf, (int)nBytes, flags);
5735 }
5736 
5737 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5738   return ::send(fd, buf, (int)nBytes, flags);
5739 }
5740 
5741 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5742   return ::send(fd, buf, (int)nBytes, flags);
5743 }
5744 
5745 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5746 #if defined(IA32)
5747   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5748 #elif defined(AMD64) || defined(_M_ARM64)
5749   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5750 #endif
5751 
5752 // returns true if thread could be suspended,
5753 // false otherwise
5754 static bool do_suspend(HANDLE* h) {
5755   if (h != NULL) {
5756     if (SuspendThread(*h) != ~0) {
5757       return true;
5758     }
5759   }
5760   return false;
5761 }
5762 
5763 // resume the thread
5764 // calling resume on an active thread is a no-op
5765 static void do_resume(HANDLE* h) {
5766   if (h != NULL) {
5767     ResumeThread(*h);
5768   }
5769 }
5770 
5771 // retrieve a suspend/resume context capable handle
5772 // from the tid. Caller validates handle return value.
5773 void get_thread_handle_for_extended_context(HANDLE* h,
5774                                             OSThread::thread_id_t tid) {
5775   if (h != NULL) {
5776     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5777   }
5778 }
5779 
5780 // Thread sampling implementation
5781 //
5782 void os::SuspendedThreadTask::internal_do_task() {
5783   CONTEXT    ctxt;
5784   HANDLE     h = NULL;
5785 
5786   // get context capable handle for thread
5787   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5788 
5789   // sanity
5790   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5791     return;
5792   }
5793 
5794   // suspend the thread
5795   if (do_suspend(&h)) {
5796     ctxt.ContextFlags = sampling_context_flags;
5797     // get thread context
5798     GetThreadContext(h, &ctxt);
5799     SuspendedThreadTaskContext context(_thread, &ctxt);
5800     // pass context to Thread Sampling impl
5801     do_task(context);
5802     // resume thread
5803     do_resume(&h);
5804   }
5805 
5806   // close handle
5807   CloseHandle(h);
5808 }
5809 
5810 bool os::start_debugging(char *buf, int buflen) {
5811   int len = (int)strlen(buf);
5812   char *p = &buf[len];
5813 
5814   jio_snprintf(p, buflen-len,
5815              "\n\n"
5816              "Do you want to debug the problem?\n\n"
5817              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5818              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5819              "Otherwise, select 'No' to abort...",
5820              os::current_process_id(), os::current_thread_id());
5821 
5822   bool yes = os::message_box("Unexpected Error", buf);
5823 
5824   if (yes) {
5825     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5826     // exception. If VM is running inside a debugger, the debugger will
5827     // catch the exception. Otherwise, the breakpoint exception will reach
5828     // the default windows exception handler, which can spawn a debugger and
5829     // automatically attach to the dying VM.
5830     os::breakpoint();
5831     yes = false;
5832   }
5833   return yes;
5834 }
5835 
5836 void* os::get_default_process_handle() {
5837   return (void*)GetModuleHandle(NULL);
5838 }
5839 
5840 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5841 // which is used to find statically linked in agents.
5842 // Additionally for windows, takes into account __stdcall names.
5843 // Parameters:
5844 //            sym_name: Symbol in library we are looking for
5845 //            lib_name: Name of library to look in, NULL for shared libs.
5846 //            is_absolute_path == true if lib_name is absolute path to agent
5847 //                                     such as "C:/a/b/L.dll"
5848 //            == false if only the base name of the library is passed in
5849 //               such as "L"
5850 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5851                                     bool is_absolute_path) {
5852   char *agent_entry_name;
5853   size_t len;
5854   size_t name_len;
5855   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5856   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5857   const char *start;
5858 
5859   if (lib_name != NULL) {
5860     len = name_len = strlen(lib_name);
5861     if (is_absolute_path) {
5862       // Need to strip path, prefix and suffix
5863       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5864         lib_name = ++start;
5865       } else {
5866         // Need to check for drive prefix
5867         if ((start = strchr(lib_name, ':')) != NULL) {
5868           lib_name = ++start;
5869         }
5870       }
5871       if (len <= (prefix_len + suffix_len)) {
5872         return NULL;
5873       }
5874       lib_name += prefix_len;
5875       name_len = strlen(lib_name) - suffix_len;
5876     }
5877   }
5878   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5879   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5880   if (agent_entry_name == NULL) {
5881     return NULL;
5882   }
5883   if (lib_name != NULL) {
5884     const char *p = strrchr(sym_name, '@');
5885     if (p != NULL && p != sym_name) {
5886       // sym_name == _Agent_OnLoad@XX
5887       strncpy(agent_entry_name, sym_name, (p - sym_name));
5888       agent_entry_name[(p-sym_name)] = '\0';
5889       // agent_entry_name == _Agent_OnLoad
5890       strcat(agent_entry_name, "_");
5891       strncat(agent_entry_name, lib_name, name_len);
5892       strcat(agent_entry_name, p);
5893       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5894     } else {
5895       strcpy(agent_entry_name, sym_name);
5896       strcat(agent_entry_name, "_");
5897       strncat(agent_entry_name, lib_name, name_len);
5898     }
5899   } else {
5900     strcpy(agent_entry_name, sym_name);
5901   }
5902   return agent_entry_name;
5903 }
5904 
5905 /*
5906   All the defined signal names for Windows.
5907 
5908   NOTE that not all of these names are accepted by FindSignal!
5909 
5910   For various reasons some of these may be rejected at runtime.
5911 
5912   Here are the names currently accepted by a user of sun.misc.Signal with
5913   1.4.1 (ignoring potential interaction with use of chaining, etc):
5914 
5915      (LIST TBD)
5916 
5917 */
5918 int os::get_signal_number(const char* name) {
5919   static const struct {
5920     const char* name;
5921     int         number;
5922   } siglabels [] =
5923     // derived from version 6.0 VC98/include/signal.h
5924   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5925   "FPE",        SIGFPE,         // floating point exception
5926   "SEGV",       SIGSEGV,        // segment violation
5927   "INT",        SIGINT,         // interrupt
5928   "TERM",       SIGTERM,        // software term signal from kill
5929   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5930   "ILL",        SIGILL};        // illegal instruction
5931   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5932     if (strcmp(name, siglabels[i].name) == 0) {
5933       return siglabels[i].number;
5934     }
5935   }
5936   return -1;
5937 }
5938 
5939 // Fast current thread access
5940 
5941 int os::win32::_thread_ptr_offset = 0;
5942 
5943 static void call_wrapper_dummy() {}
5944 
5945 // We need to call the os_exception_wrapper once so that it sets
5946 // up the offset from FS of the thread pointer.
5947 void os::win32::initialize_thread_ptr_offset() {
5948   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5949                            NULL, methodHandle(), NULL, NULL);
5950 }
5951 
5952 bool os::supports_map_sync() {
5953   return false;
5954 }
5955 
5956 #ifdef ASSERT
5957 static void check_meminfo(MEMORY_BASIC_INFORMATION* minfo) {
5958   assert(minfo->State == MEM_FREE || minfo->State == MEM_COMMIT || minfo->State == MEM_RESERVE, "Invalid state");
5959   if (minfo->State != MEM_FREE) {
5960     assert(minfo->AllocationBase != NULL && minfo->BaseAddress >= minfo->AllocationBase, "Invalid pointers");
5961     assert(minfo->RegionSize > 0, "Invalid region size");
5962   }
5963 }
5964 #endif
5965 
5966 
5967 static bool checkedVirtualQuery(address addr, MEMORY_BASIC_INFORMATION* minfo) {
5968   ZeroMemory(minfo, sizeof(MEMORY_BASIC_INFORMATION));
5969   if (::VirtualQuery(addr, minfo, sizeof(MEMORY_BASIC_INFORMATION)) == sizeof(MEMORY_BASIC_INFORMATION)) {
5970     DEBUG_ONLY(check_meminfo(minfo);)
5971     return true;
5972   }
5973   return false;
5974 }
5975 
5976 // Given a pointer pointing into an allocation (an area allocated with VirtualAlloc),
5977 //  return information about that allocation.
5978 bool os::win32::find_mapping(address addr, mapping_info_t* mi) {
5979   // Query at addr to find allocation base; then, starting at allocation base,
5980   //  query all regions, until we either find the next allocation or a free area.
5981   ZeroMemory(mi, sizeof(mapping_info_t));
5982   MEMORY_BASIC_INFORMATION minfo;
5983   address allocation_base = NULL;
5984   address allocation_end = NULL;
5985   bool rc = false;
5986   if (checkedVirtualQuery(addr, &minfo)) {
5987     if (minfo.State != MEM_FREE) {
5988       allocation_base = (address)minfo.AllocationBase;
5989       allocation_end = allocation_base;
5990       // Iterate through all regions in this allocation to find its end. While we are here, also count things.
5991       for (;;) {
5992         bool rc = checkedVirtualQuery(allocation_end, &minfo);
5993         if (rc == false ||                                       // VirtualQuery error, end of allocation?
5994            minfo.State == MEM_FREE ||                            // end of allocation, free memory follows
5995            (address)minfo.AllocationBase != allocation_base)     // end of allocation, a new one starts
5996         {
5997           break;
5998         }
5999         const size_t region_size = minfo.RegionSize;
6000         mi->regions ++;
6001         if (minfo.State == MEM_COMMIT) {
6002           mi->committed_size += minfo.RegionSize;
6003         }
6004         allocation_end += region_size;
6005       }
6006       if (allocation_base != NULL && allocation_end > allocation_base) {
6007         mi->base = allocation_base;
6008         mi->size = allocation_end - allocation_base;
6009         rc = true;
6010       }
6011     }
6012   }
6013 #ifdef ASSERT
6014   if (rc) {
6015     assert(mi->size > 0 && mi->size >= mi->committed_size, "Sanity");
6016     assert(addr >= mi->base && addr < mi->base + mi->size, "Sanity");
6017     assert(mi->regions > 0, "Sanity");
6018   }
6019 #endif
6020   return rc;
6021 }
6022 
6023 // Helper for print_one_mapping: print n words, both as hex and ascii.
6024 // Use Safefetch for all values.
6025 static void print_snippet(const void* p, outputStream* st) {
6026   static const int num_words = LP64_ONLY(3) NOT_LP64(6);
6027   static const int num_bytes = num_words * sizeof(int);
6028   intptr_t v[num_words];
6029   const int errval = 0xDE210244;
6030   for (int i = 0; i < num_words; i++) {
6031     v[i] = SafeFetchN((intptr_t*)p + i, errval);
6032     if (v[i] == errval &&
6033         SafeFetchN((intptr_t*)p + i, ~errval) == ~errval) {
6034       return;
6035     }
6036   }
6037   st->put('[');
6038   for (int i = 0; i < num_words; i++) {
6039     st->print(INTPTR_FORMAT " ", v[i]);
6040   }
6041   const char* b = (char*)v;
6042   st->put('\"');
6043   for (int i = 0; i < num_bytes; i++) {
6044     st->put(::isgraph(b[i]) ? b[i] : '.');
6045   }
6046   st->put('\"');
6047   st->put(']');
6048 }
6049 
6050 // Helper function for print_memory_mappings:
6051 //  Given a MEMORY_BASIC_INFORMATION, containing information about a non-free region:
6052 //  print out all regions in that allocation. If any of those regions
6053 //  fall outside the given range [start, end), indicate that in the output.
6054 // Return the pointer to the end of the allocation.
6055 static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start, address end, outputStream* st) {
6056   // Print it like this:
6057   //
6058   // Base: <xxxxx>: [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx       (region 1)
6059   //                [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx       (region 2)
6060   assert(minfo->State != MEM_FREE, "Not inside an allocation.");
6061   address allocation_base = (address)minfo->AllocationBase;
6062   #define IS_IN(p) (p >= start && p < end)
6063   bool first_line = true;
6064   bool is_dll = false;
6065   for(;;) {
6066     if (first_line) {
6067       st->print("Base " PTR_FORMAT ": ", p2i(allocation_base));
6068     } else {
6069       st->print_raw(NOT_LP64 ("                 ")
6070                     LP64_ONLY("                         "));
6071     }
6072     address region_start = (address)minfo->BaseAddress;
6073     address region_end = region_start + minfo->RegionSize;
6074     assert(region_end > region_start, "Sanity");
6075     if (region_end <= start) {
6076       st->print("<outside range> ");
6077     } else if (region_start >= end) {
6078       st->print("<outside range> ");
6079     } else if (!IS_IN(region_start) || !IS_IN(region_end - 1)) {
6080       st->print("<partly outside range> ");
6081     }
6082     st->print("[" PTR_FORMAT "-" PTR_FORMAT "), state=", p2i(region_start), p2i(region_end));
6083     switch (minfo->State) {
6084       case MEM_COMMIT:  st->print_raw("MEM_COMMIT "); break;
6085       case MEM_FREE:    st->print_raw("MEM_FREE   "); break;
6086       case MEM_RESERVE: st->print_raw("MEM_RESERVE"); break;
6087       default: st->print("%x?", (unsigned)minfo->State);
6088     }
6089     st->print(", prot=%3x, type=", (unsigned)minfo->Protect);
6090     switch (minfo->Type) {
6091       case MEM_IMAGE:   st->print_raw("MEM_IMAGE  "); break;
6092       case MEM_MAPPED:  st->print_raw("MEM_MAPPED "); break;
6093       case MEM_PRIVATE: st->print_raw("MEM_PRIVATE"); break;
6094       default: st->print("%x?", (unsigned)minfo->State);
6095     }
6096     // At the start of every allocation, print some more information about this mapping.
6097     // Notes:
6098     //  - this could be beefed up a lot, similar to os::print_location
6099     //  - for now we just query the allocation start point. This may be confusing for cases where
6100     //    the kernel merges multiple mappings.
6101     if (first_line) {
6102       char buf[MAX_PATH];
6103       if (os::dll_address_to_library_name(allocation_base, buf, sizeof(buf), nullptr)) {
6104         st->print(", %s", buf);
6105         is_dll = true;
6106       }
6107     }
6108     // If memory is accessible, and we do not know anything else about it, print a snippet
6109     if (!is_dll &&
6110         minfo->State == MEM_COMMIT &&
6111         !(minfo->Protect & PAGE_NOACCESS || minfo->Protect & PAGE_GUARD)) {
6112       st->print_raw(", ");
6113       print_snippet(region_start, st);
6114     }
6115     st->cr();
6116     // Next region...
6117     bool rc = checkedVirtualQuery(region_end, minfo);
6118     if (rc == false ||                                         // VirtualQuery error, end of allocation?
6119        (minfo->State == MEM_FREE) ||                           // end of allocation, free memory follows
6120        ((address)minfo->AllocationBase != allocation_base) ||  // end of allocation, a new one starts
6121        (region_end > end))                                     // end of range to print.
6122     {
6123       return region_end;
6124     }
6125     first_line = false;
6126   }
6127   #undef IS_IN
6128   ShouldNotReachHere();
6129   return NULL;
6130 }
6131 
6132 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
6133   MEMORY_BASIC_INFORMATION minfo;
6134   address start = (address)addr;
6135   address end = start + bytes;
6136   address p = start;
6137   if (p == nullptr) { // Lets skip the zero pages.
6138     p += os::vm_allocation_granularity();
6139   }
6140   address p2 = p; // guard against wraparounds
6141   int fuse = 0;
6142 
6143   while (p < end && p >= p2) {
6144     p2 = p;
6145     // Probe for the next mapping.
6146     if (checkedVirtualQuery(p, &minfo)) {
6147       if (minfo.State != MEM_FREE) {
6148         // Found one. Print it out.
6149         address p2 = print_one_mapping(&minfo, start, end, st);
6150         assert(p2 > p, "Sanity");
6151         p = p2;
6152       } else {
6153         // Note: for free regions, most of MEMORY_BASIC_INFORMATION is undefined.
6154         //  Only region dimensions are not: use those to jump to the end of
6155         //  the free range.
6156         address region_start = (address)minfo.BaseAddress;
6157         address region_end = region_start + minfo.RegionSize;
6158         assert(p >= region_start && p < region_end, "Sanity");
6159         p = region_end;
6160       }
6161     } else {
6162       // MSDN doc on VirtualQuery is unclear about what it means if it returns an error.
6163       //  In particular, whether querying an address outside any mappings would report
6164       //  a MEM_FREE region or just return an error. From experiments, it seems to return
6165       //  a MEM_FREE region for unmapped areas in valid address space and an error if we
6166       //  are outside valid address space.
6167       // Here, we advance the probe pointer by alloc granularity. But if the range to print
6168       //  is large, this may take a long time. Therefore lets stop right away if the address
6169       //  is outside of what we know are valid addresses on Windows. Also, add a loop fuse.
6170       static const address end_virt = (address)(LP64_ONLY(0x7ffffffffffULL) NOT_LP64(3*G));
6171       if (p >= end_virt) {
6172         break;
6173       } else {
6174         // Advance probe pointer, but with a fuse to break long loops.
6175         if (fuse++ == 100000) {
6176           break;
6177         }
6178         p += os::vm_allocation_granularity();
6179       }
6180     }
6181   }
6182 }
--- EOF ---