1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvmtifiles/jvmti.h"
  39 #include "logging/log.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/globals_extension.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safefetch.inline.hpp"
  60 #include "runtime/safepointMechanism.hpp"
  61 #include "runtime/semaphore.inline.hpp"
  62 #include "runtime/sharedRuntime.hpp"
  63 #include "runtime/statSampler.hpp"
  64 #include "runtime/thread.inline.hpp"
  65 #include "runtime/threadCritical.hpp"
  66 #include "runtime/timer.hpp"
  67 #include "runtime/vm_version.hpp"
  68 #include "services/attachListener.hpp"
  69 #include "services/memTracker.hpp"
  70 #include "services/runtimeService.hpp"
  71 #include "utilities/align.hpp"
  72 #include "utilities/decoder.hpp"
  73 #include "utilities/defaultStream.hpp"
  74 #include "utilities/events.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/vmError.hpp"
  77 #include "symbolengine.hpp"
  78 #include "windbghelp.hpp"
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 #include <windows.h>
  85 #include <sys/types.h>
  86 #include <sys/stat.h>
  87 #include <sys/timeb.h>
  88 #include <objidl.h>
  89 #include <shlobj.h>
  90 
  91 #include <malloc.h>
  92 #include <signal.h>
  93 #include <direct.h>
  94 #include <errno.h>
  95 #include <fcntl.h>
  96 #include <io.h>
  97 #include <process.h>              // For _beginthreadex(), _endthreadex()
  98 #include <imagehlp.h>             // For os::dll_address_to_function_name
  99 // for enumerating dll libraries
 100 #include <vdmdbg.h>
 101 #include <psapi.h>
 102 #include <mmsystem.h>
 103 #include <winsock2.h>
 104 
 105 // for timer info max values which include all bits
 106 #define ALL_64_BITS CONST64(-1)
 107 
 108 // For DLL loading/load error detection
 109 // Values of PE COFF
 110 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 111 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 112 
 113 static HANDLE main_process;
 114 static HANDLE main_thread;
 115 static int    main_thread_id;
 116 
 117 static FILETIME process_creation_time;
 118 static FILETIME process_exit_time;
 119 static FILETIME process_user_time;
 120 static FILETIME process_kernel_time;
 121 
 122 #if defined(_M_ARM64)
 123   #define __CPU__ aarch64
 124 #elif defined(_M_AMD64)
 125   #define __CPU__ amd64
 126 #else
 127   #define __CPU__ i486
 128 #endif
 129 
 130 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
 131 PVOID  topLevelVectoredExceptionHandler = NULL;
 132 LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
 133 #endif
 134 
 135 // save DLL module handle, used by GetModuleFileName
 136 
 137 HINSTANCE vm_lib_handle;
 138 
 139 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 140   switch (reason) {
 141   case DLL_PROCESS_ATTACH:
 142     vm_lib_handle = hinst;
 143     if (ForceTimeHighResolution) {
 144       timeBeginPeriod(1L);
 145     }
 146     WindowsDbgHelp::pre_initialize();
 147     SymbolEngine::pre_initialize();
 148     break;
 149   case DLL_PROCESS_DETACH:
 150     if (ForceTimeHighResolution) {
 151       timeEndPeriod(1L);
 152     }
 153 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
 154     if (topLevelVectoredExceptionHandler != NULL) {
 155       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 156       topLevelVectoredExceptionHandler = NULL;
 157     }
 158 #endif
 159     break;
 160   default:
 161     break;
 162   }
 163   return true;
 164 }
 165 
 166 static inline double fileTimeAsDouble(FILETIME* time) {
 167   const double high  = (double) ((unsigned int) ~0);
 168   const double split = 10000000.0;
 169   double result = (time->dwLowDateTime / split) +
 170                    time->dwHighDateTime * (high/split);
 171   return result;
 172 }
 173 
 174 // Implementation of os
 175 
 176 #define RANGE_FORMAT                "[" PTR_FORMAT "-" PTR_FORMAT ")"
 177 #define RANGE_FORMAT_ARGS(p, len)   p2i(p), p2i((address)p + len)
 178 
 179 // A number of wrappers for more frequently used system calls, to add standard logging.
 180 
 181 struct PreserveLastError {
 182   const DWORD v;
 183   PreserveLastError() : v(::GetLastError()) {}
 184   ~PreserveLastError() { ::SetLastError(v); }
 185 };
 186 
 187 // Logging wrapper for VirtualAlloc
 188 static LPVOID virtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
 189   LPVOID result = ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
 190   if (result != NULL) {
 191     log_trace(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) returned " PTR_FORMAT "%s.",
 192                   p2i(lpAddress), dwSize, flAllocationType, flProtect, p2i(result),
 193                   ((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
 194   } else {
 195     PreserveLastError ple;
 196     log_info(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) failed (%u).",
 197                   p2i(lpAddress), dwSize, flAllocationType, flProtect, ple.v);
 198   }
 199   return result;
 200 }
 201 
 202 // Logging wrapper for VirtualFree
 203 static BOOL virtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD  dwFreeType) {
 204   BOOL result = ::VirtualFree(lpAddress, dwSize, dwFreeType);
 205   if (result != FALSE) {
 206     log_trace(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) succeeded",
 207                   p2i(lpAddress), dwSize, dwFreeType);
 208   } else {
 209     PreserveLastError ple;
 210     log_info(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) failed (%u).",
 211                  p2i(lpAddress), dwSize, dwFreeType, ple.v);
 212   }
 213   return result;
 214 }
 215 
 216 // Logging wrapper for VirtualAllocExNuma
 217 static LPVOID virtualAllocExNuma(HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD  flAllocationType,
 218                                  DWORD  flProtect, DWORD  nndPreferred) {
 219   LPVOID result = ::VirtualAllocExNuma(hProcess, lpAddress, dwSize, flAllocationType, flProtect, nndPreferred);
 220   if (result != NULL) {
 221     log_trace(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) returned " PTR_FORMAT "%s.",
 222                   p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, p2i(result),
 223                   ((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
 224   } else {
 225     PreserveLastError ple;
 226     log_info(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) failed (%u).",
 227                  p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, ple.v);
 228   }
 229   return result;
 230 }
 231 
 232 // Logging wrapper for MapViewOfFileEx
 233 static LPVOID mapViewOfFileEx(HANDLE hFileMappingObject, DWORD  dwDesiredAccess, DWORD  dwFileOffsetHigh,
 234                               DWORD  dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap, LPVOID lpBaseAddress) {
 235   LPVOID result = ::MapViewOfFileEx(hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh,
 236                                     dwFileOffsetLow, dwNumberOfBytesToMap, lpBaseAddress);
 237   if (result != NULL) {
 238     log_trace(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") returned " PTR_FORMAT "%s.",
 239                   p2i(lpBaseAddress), dwNumberOfBytesToMap, p2i(result),
 240                   ((lpBaseAddress != NULL && result != lpBaseAddress) ? " <different base!>" : ""));
 241   } else {
 242     PreserveLastError ple;
 243     log_info(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") failed (%u).",
 244                  p2i(lpBaseAddress), dwNumberOfBytesToMap, ple.v);
 245   }
 246   return result;
 247 }
 248 
 249 // Logging wrapper for UnmapViewOfFile
 250 static BOOL unmapViewOfFile(LPCVOID lpBaseAddress) {
 251   BOOL result = ::UnmapViewOfFile(lpBaseAddress);
 252   if (result != FALSE) {
 253     log_trace(os)("UnmapViewOfFile(" PTR_FORMAT ") succeeded", p2i(lpBaseAddress));
 254   } else {
 255     PreserveLastError ple;
 256     log_info(os)("UnmapViewOfFile(" PTR_FORMAT ") failed (%u).",  p2i(lpBaseAddress), ple.v);
 257   }
 258   return result;
 259 }
 260 
 261 char** os::get_environ() { return _environ; }
 262 
 263 // No setuid programs under Windows.
 264 bool os::have_special_privileges() {
 265   return false;
 266 }
 267 
 268 
 269 // This method is  a periodic task to check for misbehaving JNI applications
 270 // under CheckJNI, we can add any periodic checks here.
 271 // For Windows at the moment does nothing
 272 void os::run_periodic_checks() {
 273   return;
 274 }
 275 
 276 // previous UnhandledExceptionFilter, if there is one
 277 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 278 
 279 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 280 
 281 void os::init_system_properties_values() {
 282   // sysclasspath, java_home, dll_dir
 283   {
 284     char *home_path;
 285     char *dll_path;
 286     char *pslash;
 287     const char *bin = "\\bin";
 288     char home_dir[MAX_PATH + 1];
 289     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 290 
 291     if (alt_home_dir != NULL)  {
 292       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 293       home_dir[MAX_PATH] = '\0';
 294     } else {
 295       os::jvm_path(home_dir, sizeof(home_dir));
 296       // Found the full path to jvm.dll.
 297       // Now cut the path to <java_home>/jre if we can.
 298       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 299       pslash = strrchr(home_dir, '\\');
 300       if (pslash != NULL) {
 301         *pslash = '\0';                   // get rid of \{client|server}
 302         pslash = strrchr(home_dir, '\\');
 303         if (pslash != NULL) {
 304           *pslash = '\0';                 // get rid of \bin
 305         }
 306       }
 307     }
 308 
 309     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 310     strcpy(home_path, home_dir);
 311     Arguments::set_java_home(home_path);
 312     FREE_C_HEAP_ARRAY(char, home_path);
 313 
 314     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 315                                 mtInternal);
 316     strcpy(dll_path, home_dir);
 317     strcat(dll_path, bin);
 318     Arguments::set_dll_dir(dll_path);
 319     FREE_C_HEAP_ARRAY(char, dll_path);
 320 
 321     if (!set_boot_path('\\', ';')) {
 322       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 323     }
 324   }
 325 
 326 // library_path
 327 #define EXT_DIR "\\lib\\ext"
 328 #define BIN_DIR "\\bin"
 329 #define PACKAGE_DIR "\\Sun\\Java"
 330   {
 331     // Win32 library search order (See the documentation for LoadLibrary):
 332     //
 333     // 1. The directory from which application is loaded.
 334     // 2. The system wide Java Extensions directory (Java only)
 335     // 3. System directory (GetSystemDirectory)
 336     // 4. Windows directory (GetWindowsDirectory)
 337     // 5. The PATH environment variable
 338     // 6. The current directory
 339 
 340     char *library_path;
 341     char tmp[MAX_PATH];
 342     char *path_str = ::getenv("PATH");
 343 
 344     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 345                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 346 
 347     library_path[0] = '\0';
 348 
 349     GetModuleFileName(NULL, tmp, sizeof(tmp));
 350     *(strrchr(tmp, '\\')) = '\0';
 351     strcat(library_path, tmp);
 352 
 353     GetWindowsDirectory(tmp, sizeof(tmp));
 354     strcat(library_path, ";");
 355     strcat(library_path, tmp);
 356     strcat(library_path, PACKAGE_DIR BIN_DIR);
 357 
 358     GetSystemDirectory(tmp, sizeof(tmp));
 359     strcat(library_path, ";");
 360     strcat(library_path, tmp);
 361 
 362     GetWindowsDirectory(tmp, sizeof(tmp));
 363     strcat(library_path, ";");
 364     strcat(library_path, tmp);
 365 
 366     if (path_str) {
 367       strcat(library_path, ";");
 368       strcat(library_path, path_str);
 369     }
 370 
 371     strcat(library_path, ";.");
 372 
 373     Arguments::set_library_path(library_path);
 374     FREE_C_HEAP_ARRAY(char, library_path);
 375   }
 376 
 377   // Default extensions directory
 378   {
 379     char path[MAX_PATH];
 380     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 381     GetWindowsDirectory(path, MAX_PATH);
 382     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 383             path, PACKAGE_DIR, EXT_DIR);
 384     Arguments::set_ext_dirs(buf);
 385   }
 386   #undef EXT_DIR
 387   #undef BIN_DIR
 388   #undef PACKAGE_DIR
 389 
 390 #ifndef _WIN64
 391   // set our UnhandledExceptionFilter and save any previous one
 392   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 393 #endif
 394 
 395   // Done
 396   return;
 397 }
 398 
 399 void os::breakpoint() {
 400   DebugBreak();
 401 }
 402 
 403 // Invoked from the BREAKPOINT Macro
 404 extern "C" void breakpoint() {
 405   os::breakpoint();
 406 }
 407 
 408 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 409 // So far, this method is only used by Native Memory Tracking, which is
 410 // only supported on Windows XP or later.
 411 //
 412 int os::get_native_stack(address* stack, int frames, int toSkip) {
 413   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 414   for (int index = captured; index < frames; index ++) {
 415     stack[index] = NULL;
 416   }
 417   return captured;
 418 }
 419 
 420 // os::current_stack_base()
 421 //
 422 //   Returns the base of the stack, which is the stack's
 423 //   starting address.  This function must be called
 424 //   while running on the stack of the thread being queried.
 425 
 426 address os::current_stack_base() {
 427   MEMORY_BASIC_INFORMATION minfo;
 428   address stack_bottom;
 429   size_t stack_size;
 430 
 431   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 432   stack_bottom =  (address)minfo.AllocationBase;
 433   stack_size = minfo.RegionSize;
 434 
 435   // Add up the sizes of all the regions with the same
 436   // AllocationBase.
 437   while (1) {
 438     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 439     if (stack_bottom == (address)minfo.AllocationBase) {
 440       stack_size += minfo.RegionSize;
 441     } else {
 442       break;
 443     }
 444   }
 445   return stack_bottom + stack_size;
 446 }
 447 
 448 size_t os::current_stack_size() {
 449   size_t sz;
 450   MEMORY_BASIC_INFORMATION minfo;
 451   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 452   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 453   return sz;
 454 }
 455 
 456 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 457   MEMORY_BASIC_INFORMATION minfo;
 458   committed_start = NULL;
 459   committed_size = 0;
 460   address top = start + size;
 461   const address start_addr = start;
 462   while (start < top) {
 463     VirtualQuery(start, &minfo, sizeof(minfo));
 464     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 465       if (committed_start != NULL) {
 466         break;
 467       }
 468     } else {  // committed
 469       if (committed_start == NULL) {
 470         committed_start = start;
 471       }
 472       size_t offset = start - (address)minfo.BaseAddress;
 473       committed_size += minfo.RegionSize - offset;
 474     }
 475     start = (address)minfo.BaseAddress + minfo.RegionSize;
 476   }
 477 
 478   if (committed_start == NULL) {
 479     assert(committed_size == 0, "Sanity");
 480     return false;
 481   } else {
 482     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 483     // current region may go beyond the limit, trim to the limit
 484     committed_size = MIN2(committed_size, size_t(top - committed_start));
 485     return true;
 486   }
 487 }
 488 
 489 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 490   const struct tm* time_struct_ptr = localtime(clock);
 491   if (time_struct_ptr != NULL) {
 492     *res = *time_struct_ptr;
 493     return res;
 494   }
 495   return NULL;
 496 }
 497 
 498 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 499   const struct tm* time_struct_ptr = gmtime(clock);
 500   if (time_struct_ptr != NULL) {
 501     *res = *time_struct_ptr;
 502     return res;
 503   }
 504   return NULL;
 505 }
 506 
 507 JNIEXPORT
 508 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 509 
 510 // Thread start routine for all newly created threads
 511 static unsigned __stdcall thread_native_entry(Thread* thread) {
 512 
 513   thread->record_stack_base_and_size();
 514   thread->initialize_thread_current();
 515 
 516   OSThread* osthr = thread->osthread();
 517   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 518 
 519   if (UseNUMA) {
 520     int lgrp_id = os::numa_get_group_id();
 521     if (lgrp_id != -1) {
 522       thread->set_lgrp_id(lgrp_id);
 523     }
 524   }
 525 
 526   // Diagnostic code to investigate JDK-6573254
 527   int res = 30115;  // non-java thread
 528   if (thread->is_Java_thread()) {
 529     res = 20115;    // java thread
 530   }
 531 
 532   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 533 
 534 #ifdef USE_VECTORED_EXCEPTION_HANDLING
 535   // Any exception is caught by the Vectored Exception Handler, so VM can
 536   // generate error dump when an exception occurred in non-Java thread
 537   // (e.g. VM thread).
 538   thread->call_run();
 539 #else
 540   // Install a win32 structured exception handler around every thread created
 541   // by VM, so VM can generate error dump when an exception occurred in non-
 542   // Java thread (e.g. VM thread).
 543   __try {
 544     thread->call_run();
 545   } __except(topLevelExceptionFilter(
 546                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 547     // Nothing to do.
 548   }
 549 #endif
 550 
 551   // Note: at this point the thread object may already have deleted itself.
 552   // Do not dereference it from here on out.
 553 
 554   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 555 
 556   // One less thread is executing
 557   // When the VMThread gets here, the main thread may have already exited
 558   // which frees the CodeHeap containing the Atomic::add code
 559   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 560     Atomic::dec(&os::win32::_os_thread_count);
 561   }
 562 
 563   // Thread must not return from exit_process_or_thread(), but if it does,
 564   // let it proceed to exit normally
 565   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 566 }
 567 
 568 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 569                                   int thread_id) {
 570   // Allocate the OSThread object
 571   OSThread* osthread = new OSThread(NULL, NULL);
 572   if (osthread == NULL) return NULL;
 573 
 574   // Initialize the JDK library's interrupt event.
 575   // This should really be done when OSThread is constructed,
 576   // but there is no way for a constructor to report failure to
 577   // allocate the event.
 578   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 579   if (interrupt_event == NULL) {
 580     delete osthread;
 581     return NULL;
 582   }
 583   osthread->set_interrupt_event(interrupt_event);
 584 
 585   // Store info on the Win32 thread into the OSThread
 586   osthread->set_thread_handle(thread_handle);
 587   osthread->set_thread_id(thread_id);
 588 
 589   if (UseNUMA) {
 590     int lgrp_id = os::numa_get_group_id();
 591     if (lgrp_id != -1) {
 592       thread->set_lgrp_id(lgrp_id);
 593     }
 594   }
 595 
 596   // Initial thread state is INITIALIZED, not SUSPENDED
 597   osthread->set_state(INITIALIZED);
 598 
 599   return osthread;
 600 }
 601 
 602 
 603 bool os::create_attached_thread(JavaThread* thread) {
 604 #ifdef ASSERT
 605   thread->verify_not_published();
 606 #endif
 607   HANDLE thread_h;
 608   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 609                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 610     fatal("DuplicateHandle failed\n");
 611   }
 612   OSThread* osthread = create_os_thread(thread, thread_h,
 613                                         (int)current_thread_id());
 614   if (osthread == NULL) {
 615     return false;
 616   }
 617 
 618   // Initial thread state is RUNNABLE
 619   osthread->set_state(RUNNABLE);
 620 
 621   thread->set_osthread(osthread);
 622 
 623   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 624     os::current_thread_id());
 625 
 626   return true;
 627 }
 628 
 629 bool os::create_main_thread(JavaThread* thread) {
 630 #ifdef ASSERT
 631   thread->verify_not_published();
 632 #endif
 633   if (_starting_thread == NULL) {
 634     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 635     if (_starting_thread == NULL) {
 636       return false;
 637     }
 638   }
 639 
 640   // The primordial thread is runnable from the start)
 641   _starting_thread->set_state(RUNNABLE);
 642 
 643   thread->set_osthread(_starting_thread);
 644   return true;
 645 }
 646 
 647 // Helper function to trace _beginthreadex attributes,
 648 //  similar to os::Posix::describe_pthread_attr()
 649 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 650                                                size_t stacksize, unsigned initflag) {
 651   stringStream ss(buf, buflen);
 652   if (stacksize == 0) {
 653     ss.print("stacksize: default, ");
 654   } else {
 655     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 656   }
 657   ss.print("flags: ");
 658   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 659   #define ALL(X) \
 660     X(CREATE_SUSPENDED) \
 661     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 662   ALL(PRINT_FLAG)
 663   #undef ALL
 664   #undef PRINT_FLAG
 665   return buf;
 666 }
 667 
 668 // Allocate and initialize a new OSThread
 669 bool os::create_thread(Thread* thread, ThreadType thr_type,
 670                        size_t stack_size) {
 671   unsigned thread_id;
 672 
 673   // Allocate the OSThread object
 674   OSThread* osthread = new OSThread(NULL, NULL);
 675   if (osthread == NULL) {
 676     return false;
 677   }
 678 
 679   // Initialize the JDK library's interrupt event.
 680   // This should really be done when OSThread is constructed,
 681   // but there is no way for a constructor to report failure to
 682   // allocate the event.
 683   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 684   if (interrupt_event == NULL) {
 685     delete osthread;
 686     return false;
 687   }
 688   osthread->set_interrupt_event(interrupt_event);
 689   // We don't call set_interrupted(false) as it will trip the assert in there
 690   // as we are not operating on the current thread. We don't need to call it
 691   // because the initial state is already correct.
 692 
 693   thread->set_osthread(osthread);
 694 
 695   if (stack_size == 0) {
 696     switch (thr_type) {
 697     case os::java_thread:
 698       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 699       if (JavaThread::stack_size_at_create() > 0) {
 700         stack_size = JavaThread::stack_size_at_create();
 701       }
 702       break;
 703     case os::compiler_thread:
 704       if (CompilerThreadStackSize > 0) {
 705         stack_size = (size_t)(CompilerThreadStackSize * K);
 706         break;
 707       } // else fall through:
 708         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 709     case os::vm_thread:
 710     case os::gc_thread:
 711     case os::asynclog_thread:
 712     case os::watcher_thread:
 713       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 714       break;
 715     }
 716   }
 717 
 718   // Create the Win32 thread
 719   //
 720   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 721   // does not specify stack size. Instead, it specifies the size of
 722   // initially committed space. The stack size is determined by
 723   // PE header in the executable. If the committed "stack_size" is larger
 724   // than default value in the PE header, the stack is rounded up to the
 725   // nearest multiple of 1MB. For example if the launcher has default
 726   // stack size of 320k, specifying any size less than 320k does not
 727   // affect the actual stack size at all, it only affects the initial
 728   // commitment. On the other hand, specifying 'stack_size' larger than
 729   // default value may cause significant increase in memory usage, because
 730   // not only the stack space will be rounded up to MB, but also the
 731   // entire space is committed upfront.
 732   //
 733   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 734   // for CreateThread() that can treat 'stack_size' as stack size. However we
 735   // are not supposed to call CreateThread() directly according to MSDN
 736   // document because JVM uses C runtime library. The good news is that the
 737   // flag appears to work with _beginthredex() as well.
 738 
 739   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 740   HANDLE thread_handle;
 741   int limit = 3;
 742   do {
 743     thread_handle =
 744       (HANDLE)_beginthreadex(NULL,
 745                              (unsigned)stack_size,
 746                              (unsigned (__stdcall *)(void*)) thread_native_entry,
 747                              thread,
 748                              initflag,
 749                              &thread_id);
 750   } while (thread_handle == NULL && errno == EAGAIN && limit-- > 0);
 751 
 752   ResourceMark rm;
 753   char buf[64];
 754   if (thread_handle != NULL) {
 755     log_info(os, thread)("Thread \"%s\" started (tid: %u, attributes: %s)",
 756                          thread->name(), thread_id,
 757                          describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 758   } else {
 759     log_warning(os, thread)("Failed to start thread \"%s\" - _beginthreadex failed (%s) for attributes: %s.",
 760                             thread->name(), os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 761     // Log some OS information which might explain why creating the thread failed.
 762     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 763     LogStream st(Log(os, thread)::info());
 764     os::print_memory_info(&st);
 765   }
 766 
 767   if (thread_handle == NULL) {
 768     // Need to clean up stuff we've allocated so far
 769     thread->set_osthread(NULL);
 770     delete osthread;
 771     return false;
 772   }
 773 
 774   Atomic::inc(&os::win32::_os_thread_count);
 775 
 776   // Store info on the Win32 thread into the OSThread
 777   osthread->set_thread_handle(thread_handle);
 778   osthread->set_thread_id(thread_id);
 779 
 780   // Initial thread state is INITIALIZED, not SUSPENDED
 781   osthread->set_state(INITIALIZED);
 782 
 783   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 784   return true;
 785 }
 786 
 787 
 788 // Free Win32 resources related to the OSThread
 789 void os::free_thread(OSThread* osthread) {
 790   assert(osthread != NULL, "osthread not set");
 791 
 792   // We are told to free resources of the argument thread,
 793   // but we can only really operate on the current thread.
 794   assert(Thread::current()->osthread() == osthread,
 795          "os::free_thread but not current thread");
 796 
 797   CloseHandle(osthread->thread_handle());
 798   delete osthread;
 799 }
 800 
 801 static jlong first_filetime;
 802 static jlong initial_performance_count;
 803 static jlong performance_frequency;
 804 
 805 
 806 jlong as_long(LARGE_INTEGER x) {
 807   jlong result = 0; // initialization to avoid warning
 808   set_high(&result, x.HighPart);
 809   set_low(&result, x.LowPart);
 810   return result;
 811 }
 812 
 813 
 814 jlong os::elapsed_counter() {
 815   LARGE_INTEGER count;
 816   QueryPerformanceCounter(&count);
 817   return as_long(count) - initial_performance_count;
 818 }
 819 
 820 
 821 jlong os::elapsed_frequency() {
 822   return performance_frequency;
 823 }
 824 
 825 
 826 julong os::available_memory() {
 827   return win32::available_memory();
 828 }
 829 
 830 julong os::win32::available_memory() {
 831   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 832   // value if total memory is larger than 4GB
 833   MEMORYSTATUSEX ms;
 834   ms.dwLength = sizeof(ms);
 835   GlobalMemoryStatusEx(&ms);
 836 
 837   return (julong)ms.ullAvailPhys;
 838 }
 839 
 840 julong os::physical_memory() {
 841   return win32::physical_memory();
 842 }
 843 
 844 bool os::has_allocatable_memory_limit(size_t* limit) {
 845   MEMORYSTATUSEX ms;
 846   ms.dwLength = sizeof(ms);
 847   GlobalMemoryStatusEx(&ms);
 848 #ifdef _LP64
 849   *limit = (size_t)ms.ullAvailVirtual;
 850   return true;
 851 #else
 852   // Limit to 1400m because of the 2gb address space wall
 853   *limit = MIN2((size_t)1400*M, (size_t)ms.ullAvailVirtual);
 854   return true;
 855 #endif
 856 }
 857 
 858 int os::active_processor_count() {
 859   // User has overridden the number of active processors
 860   if (ActiveProcessorCount > 0) {
 861     log_trace(os)("active_processor_count: "
 862                   "active processor count set by user : %d",
 863                   ActiveProcessorCount);
 864     return ActiveProcessorCount;
 865   }
 866 
 867   DWORD_PTR lpProcessAffinityMask = 0;
 868   DWORD_PTR lpSystemAffinityMask = 0;
 869   int proc_count = processor_count();
 870   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 871       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 872     // Nof active processors is number of bits in process affinity mask
 873     int bitcount = 0;
 874     while (lpProcessAffinityMask != 0) {
 875       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 876       bitcount++;
 877     }
 878     return bitcount;
 879   } else {
 880     return proc_count;
 881   }
 882 }
 883 
 884 uint os::processor_id() {
 885   return (uint)GetCurrentProcessorNumber();
 886 }
 887 
 888 // For dynamic lookup of SetThreadDescription API
 889 typedef HRESULT (WINAPI *SetThreadDescriptionFnPtr)(HANDLE, PCWSTR);
 890 typedef HRESULT (WINAPI *GetThreadDescriptionFnPtr)(HANDLE, PWSTR*);
 891 static SetThreadDescriptionFnPtr _SetThreadDescription = NULL;
 892 DEBUG_ONLY(static GetThreadDescriptionFnPtr _GetThreadDescription = NULL;)
 893 
 894 // forward decl.
 895 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path);
 896 
 897 void os::set_native_thread_name(const char *name) {
 898 
 899   // From Windows 10 and Windows 2016 server, we have a direct API
 900   // for setting the thread name/description:
 901   // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
 902 
 903   if (_SetThreadDescription != NULL) {
 904     // SetThreadDescription takes a PCWSTR but we have conversion routines that produce
 905     // LPWSTR. The only difference is that PCWSTR is a pointer to const WCHAR.
 906     LPWSTR unicode_name;
 907     errno_t err = convert_to_unicode(name, &unicode_name);
 908     if (err == ERROR_SUCCESS) {
 909       HANDLE current = GetCurrentThread();
 910       HRESULT hr = _SetThreadDescription(current, unicode_name);
 911       if (FAILED(hr)) {
 912         log_debug(os, thread)("set_native_thread_name: SetThreadDescription failed - falling back to debugger method");
 913         FREE_C_HEAP_ARRAY(WCHAR, unicode_name);
 914       } else {
 915         log_trace(os, thread)("set_native_thread_name: SetThreadDescription succeeded - new name: %s", name);
 916 
 917 #ifdef ASSERT
 918         // For verification purposes in a debug build we read the thread name back and check it.
 919         PWSTR thread_name;
 920         HRESULT hr2 = _GetThreadDescription(current, &thread_name);
 921         if (FAILED(hr2)) {
 922           log_debug(os, thread)("set_native_thread_name: GetThreadDescription failed!");
 923         } else {
 924           int res = CompareStringW(LOCALE_USER_DEFAULT,
 925                                    0, // no special comparison rules
 926                                    unicode_name,
 927                                    -1, // null-terminated
 928                                    thread_name,
 929                                    -1  // null-terminated
 930                                    );
 931           assert(res == CSTR_EQUAL,
 932                  "Name strings were not the same - set: %ls, but read: %ls", unicode_name, thread_name);
 933           LocalFree(thread_name);
 934         }
 935 #endif
 936         FREE_C_HEAP_ARRAY(WCHAR, unicode_name);
 937         return;
 938       }
 939     } else {
 940       log_debug(os, thread)("set_native_thread_name: convert_to_unicode failed - falling back to debugger method");
 941     }
 942   }
 943 
 944   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 945   //
 946   // Note that unfortunately this only works if the process
 947   // is already attached to a debugger; debugger must observe
 948   // the exception below to show the correct name.
 949 
 950   // If there is no debugger attached skip raising the exception
 951   if (!IsDebuggerPresent()) {
 952     log_debug(os, thread)("set_native_thread_name: no debugger present so unable to set thread name");
 953     return;
 954   }
 955 
 956   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 957   struct {
 958     DWORD dwType;     // must be 0x1000
 959     LPCSTR szName;    // pointer to name (in user addr space)
 960     DWORD dwThreadID; // thread ID (-1=caller thread)
 961     DWORD dwFlags;    // reserved for future use, must be zero
 962   } info;
 963 
 964   info.dwType = 0x1000;
 965   info.szName = name;
 966   info.dwThreadID = -1;
 967   info.dwFlags = 0;
 968 
 969   __try {
 970     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 971   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 972 }
 973 
 974 void os::win32::initialize_performance_counter() {
 975   LARGE_INTEGER count;
 976   QueryPerformanceFrequency(&count);
 977   performance_frequency = as_long(count);
 978   QueryPerformanceCounter(&count);
 979   initial_performance_count = as_long(count);
 980 }
 981 
 982 
 983 double os::elapsedTime() {
 984   return (double) elapsed_counter() / (double) elapsed_frequency();
 985 }
 986 
 987 
 988 // Windows format:
 989 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 990 // Java format:
 991 //   Java standards require the number of milliseconds since 1/1/1970
 992 
 993 // Constant offset - calculated using offset()
 994 static jlong  _offset   = 116444736000000000;
 995 // Fake time counter for reproducible results when debugging
 996 static jlong  fake_time = 0;
 997 
 998 #ifdef ASSERT
 999 // Just to be safe, recalculate the offset in debug mode
1000 static jlong _calculated_offset = 0;
1001 static int   _has_calculated_offset = 0;
1002 
1003 jlong offset() {
1004   if (_has_calculated_offset) return _calculated_offset;
1005   SYSTEMTIME java_origin;
1006   java_origin.wYear          = 1970;
1007   java_origin.wMonth         = 1;
1008   java_origin.wDayOfWeek     = 0; // ignored
1009   java_origin.wDay           = 1;
1010   java_origin.wHour          = 0;
1011   java_origin.wMinute        = 0;
1012   java_origin.wSecond        = 0;
1013   java_origin.wMilliseconds  = 0;
1014   FILETIME jot;
1015   if (!SystemTimeToFileTime(&java_origin, &jot)) {
1016     fatal("Error = %d\nWindows error", GetLastError());
1017   }
1018   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
1019   _has_calculated_offset = 1;
1020   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
1021   return _calculated_offset;
1022 }
1023 #else
1024 jlong offset() {
1025   return _offset;
1026 }
1027 #endif
1028 
1029 jlong windows_to_java_time(FILETIME wt) {
1030   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
1031   return (a - offset()) / 10000;
1032 }
1033 
1034 // Returns time ticks in (10th of micro seconds)
1035 jlong windows_to_time_ticks(FILETIME wt) {
1036   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
1037   return (a - offset());
1038 }
1039 
1040 FILETIME java_to_windows_time(jlong l) {
1041   jlong a = (l * 10000) + offset();
1042   FILETIME result;
1043   result.dwHighDateTime = high(a);
1044   result.dwLowDateTime  = low(a);
1045   return result;
1046 }
1047 
1048 bool os::supports_vtime() { return true; }
1049 
1050 double os::elapsedVTime() {
1051   FILETIME created;
1052   FILETIME exited;
1053   FILETIME kernel;
1054   FILETIME user;
1055   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
1056     // the resolution of windows_to_java_time() should be sufficient (ms)
1057     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
1058   } else {
1059     return elapsedTime();
1060   }
1061 }
1062 
1063 jlong os::javaTimeMillis() {
1064   FILETIME wt;
1065   GetSystemTimeAsFileTime(&wt);
1066   return windows_to_java_time(wt);
1067 }
1068 
1069 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1070   FILETIME wt;
1071   GetSystemTimeAsFileTime(&wt);
1072   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
1073   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
1074   seconds = secs;
1075   nanos = jlong(ticks - (secs*10000000)) * 100;
1076 }
1077 
1078 jlong os::javaTimeNanos() {
1079     LARGE_INTEGER current_count;
1080     QueryPerformanceCounter(&current_count);
1081     double current = as_long(current_count);
1082     double freq = performance_frequency;
1083     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
1084     return time;
1085 }
1086 
1087 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1088   jlong freq = performance_frequency;
1089   if (freq < NANOSECS_PER_SEC) {
1090     // the performance counter is 64 bits and we will
1091     // be multiplying it -- so no wrap in 64 bits
1092     info_ptr->max_value = ALL_64_BITS;
1093   } else if (freq > NANOSECS_PER_SEC) {
1094     // use the max value the counter can reach to
1095     // determine the max value which could be returned
1096     julong max_counter = (julong)ALL_64_BITS;
1097     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
1098   } else {
1099     // the performance counter is 64 bits and we will
1100     // be using it directly -- so no wrap in 64 bits
1101     info_ptr->max_value = ALL_64_BITS;
1102   }
1103 
1104   // using a counter, so no skipping
1105   info_ptr->may_skip_backward = false;
1106   info_ptr->may_skip_forward = false;
1107 
1108   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
1109 }
1110 
1111 char* os::local_time_string(char *buf, size_t buflen) {
1112   SYSTEMTIME st;
1113   GetLocalTime(&st);
1114   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1115                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
1116   return buf;
1117 }
1118 
1119 bool os::getTimesSecs(double* process_real_time,
1120                       double* process_user_time,
1121                       double* process_system_time) {
1122   HANDLE h_process = GetCurrentProcess();
1123   FILETIME create_time, exit_time, kernel_time, user_time;
1124   BOOL result = GetProcessTimes(h_process,
1125                                 &create_time,
1126                                 &exit_time,
1127                                 &kernel_time,
1128                                 &user_time);
1129   if (result != 0) {
1130     FILETIME wt;
1131     GetSystemTimeAsFileTime(&wt);
1132     jlong rtc_millis = windows_to_java_time(wt);
1133     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1134     *process_user_time =
1135       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1136     *process_system_time =
1137       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1138     return true;
1139   } else {
1140     return false;
1141   }
1142 }
1143 
1144 void os::shutdown() {
1145   // allow PerfMemory to attempt cleanup of any persistent resources
1146   perfMemory_exit();
1147 
1148   // flush buffered output, finish log files
1149   ostream_abort();
1150 
1151   // Check for abort hook
1152   abort_hook_t abort_hook = Arguments::abort_hook();
1153   if (abort_hook != NULL) {
1154     abort_hook();
1155   }
1156 }
1157 
1158 
1159 static HANDLE dumpFile = NULL;
1160 
1161 // Check if dump file can be created.
1162 void os::check_dump_limit(char* buffer, size_t buffsz) {
1163   bool status = true;
1164   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1165     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1166     status = false;
1167   }
1168 
1169 #ifndef ASSERT
1170   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1171     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1172     status = false;
1173   }
1174 #endif
1175 
1176   if (status) {
1177     const char* cwd = get_current_directory(NULL, 0);
1178     int pid = current_process_id();
1179     if (cwd != NULL) {
1180       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1181     } else {
1182       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1183     }
1184 
1185     if (dumpFile == NULL &&
1186        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1187                  == INVALID_HANDLE_VALUE) {
1188       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1189       status = false;
1190     }
1191   }
1192   VMError::record_coredump_status(buffer, status);
1193 }
1194 
1195 void os::abort(bool dump_core, void* siginfo, const void* context) {
1196   EXCEPTION_POINTERS ep;
1197   MINIDUMP_EXCEPTION_INFORMATION mei;
1198   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1199 
1200   HANDLE hProcess = GetCurrentProcess();
1201   DWORD processId = GetCurrentProcessId();
1202   MINIDUMP_TYPE dumpType;
1203 
1204   shutdown();
1205   if (!dump_core || dumpFile == NULL) {
1206     if (dumpFile != NULL) {
1207       CloseHandle(dumpFile);
1208     }
1209     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1210   }
1211 
1212   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1213     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1214 
1215   if (siginfo != NULL && context != NULL) {
1216     ep.ContextRecord = (PCONTEXT) context;
1217     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1218 
1219     mei.ThreadId = GetCurrentThreadId();
1220     mei.ExceptionPointers = &ep;
1221     pmei = &mei;
1222   } else {
1223     pmei = NULL;
1224   }
1225 
1226   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1227   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1228   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1229       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1230     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1231   }
1232   CloseHandle(dumpFile);
1233   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1234 }
1235 
1236 // Die immediately, no exit hook, no abort hook, no cleanup.
1237 void os::die() {
1238   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1239 }
1240 
1241 const char* os::dll_file_extension() { return ".dll"; }
1242 
1243 void  os::dll_unload(void *lib) {
1244   ::FreeLibrary((HMODULE)lib);
1245 }
1246 
1247 void* os::dll_lookup(void *lib, const char *name) {
1248   return (void*)::GetProcAddress((HMODULE)lib, name);
1249 }
1250 
1251 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1252 //  * dirent_md.c       1.15 00/02/02
1253 //
1254 // The declarations for DIR and struct dirent are in jvm_win32.h.
1255 
1256 // Caller must have already run dirname through JVM_NativePath, which removes
1257 // duplicate slashes and converts all instances of '/' into '\\'.
1258 
1259 DIR * os::opendir(const char *dirname) {
1260   assert(dirname != NULL, "just checking");   // hotspot change
1261   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1262   DWORD fattr;                                // hotspot change
1263   char alt_dirname[4] = { 0, 0, 0, 0 };
1264 
1265   if (dirp == 0) {
1266     errno = ENOMEM;
1267     return 0;
1268   }
1269 
1270   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1271   // as a directory in FindFirstFile().  We detect this case here and
1272   // prepend the current drive name.
1273   //
1274   if (dirname[1] == '\0' && dirname[0] == '\\') {
1275     alt_dirname[0] = _getdrive() + 'A' - 1;
1276     alt_dirname[1] = ':';
1277     alt_dirname[2] = '\\';
1278     alt_dirname[3] = '\0';
1279     dirname = alt_dirname;
1280   }
1281 
1282   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1283   if (dirp->path == 0) {
1284     free(dirp);
1285     errno = ENOMEM;
1286     return 0;
1287   }
1288   strcpy(dirp->path, dirname);
1289 
1290   fattr = GetFileAttributes(dirp->path);
1291   if (fattr == 0xffffffff) {
1292     free(dirp->path);
1293     free(dirp);
1294     errno = ENOENT;
1295     return 0;
1296   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1297     free(dirp->path);
1298     free(dirp);
1299     errno = ENOTDIR;
1300     return 0;
1301   }
1302 
1303   // Append "*.*", or possibly "\\*.*", to path
1304   if (dirp->path[1] == ':' &&
1305       (dirp->path[2] == '\0' ||
1306       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1307     // No '\\' needed for cases like "Z:" or "Z:\"
1308     strcat(dirp->path, "*.*");
1309   } else {
1310     strcat(dirp->path, "\\*.*");
1311   }
1312 
1313   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1314   if (dirp->handle == INVALID_HANDLE_VALUE) {
1315     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1316       free(dirp->path);
1317       free(dirp);
1318       errno = EACCES;
1319       return 0;
1320     }
1321   }
1322   return dirp;
1323 }
1324 
1325 struct dirent * os::readdir(DIR *dirp) {
1326   assert(dirp != NULL, "just checking");      // hotspot change
1327   if (dirp->handle == INVALID_HANDLE_VALUE) {
1328     return NULL;
1329   }
1330 
1331   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1332 
1333   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1334     if (GetLastError() == ERROR_INVALID_HANDLE) {
1335       errno = EBADF;
1336       return NULL;
1337     }
1338     FindClose(dirp->handle);
1339     dirp->handle = INVALID_HANDLE_VALUE;
1340   }
1341 
1342   return &dirp->dirent;
1343 }
1344 
1345 int os::closedir(DIR *dirp) {
1346   assert(dirp != NULL, "just checking");      // hotspot change
1347   if (dirp->handle != INVALID_HANDLE_VALUE) {
1348     if (!FindClose(dirp->handle)) {
1349       errno = EBADF;
1350       return -1;
1351     }
1352     dirp->handle = INVALID_HANDLE_VALUE;
1353   }
1354   free(dirp->path);
1355   free(dirp);
1356   return 0;
1357 }
1358 
1359 // This must be hard coded because it's the system's temporary
1360 // directory not the java application's temp directory, ala java.io.tmpdir.
1361 const char* os::get_temp_directory() {
1362   static char path_buf[MAX_PATH];
1363   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1364     return path_buf;
1365   } else {
1366     path_buf[0] = '\0';
1367     return path_buf;
1368   }
1369 }
1370 
1371 // Needs to be in os specific directory because windows requires another
1372 // header file <direct.h>
1373 const char* os::get_current_directory(char *buf, size_t buflen) {
1374   int n = static_cast<int>(buflen);
1375   if (buflen > INT_MAX)  n = INT_MAX;
1376   return _getcwd(buf, n);
1377 }
1378 
1379 //-----------------------------------------------------------
1380 // Helper functions for fatal error handler
1381 #ifdef _WIN64
1382 // Helper routine which returns true if address in
1383 // within the NTDLL address space.
1384 //
1385 static bool _addr_in_ntdll(address addr) {
1386   HMODULE hmod;
1387   MODULEINFO minfo;
1388 
1389   hmod = GetModuleHandle("NTDLL.DLL");
1390   if (hmod == NULL) return false;
1391   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1392                                           &minfo, sizeof(MODULEINFO))) {
1393     return false;
1394   }
1395 
1396   if ((addr >= minfo.lpBaseOfDll) &&
1397       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1398     return true;
1399   } else {
1400     return false;
1401   }
1402 }
1403 #endif
1404 
1405 struct _modinfo {
1406   address addr;
1407   char*   full_path;   // point to a char buffer
1408   int     buflen;      // size of the buffer
1409   address base_addr;
1410 };
1411 
1412 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1413                                   address top_address, void * param) {
1414   struct _modinfo *pmod = (struct _modinfo *)param;
1415   if (!pmod) return -1;
1416 
1417   if (base_addr   <= pmod->addr &&
1418       top_address > pmod->addr) {
1419     // if a buffer is provided, copy path name to the buffer
1420     if (pmod->full_path) {
1421       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1422     }
1423     pmod->base_addr = base_addr;
1424     return 1;
1425   }
1426   return 0;
1427 }
1428 
1429 bool os::dll_address_to_library_name(address addr, char* buf,
1430                                      int buflen, int* offset) {
1431   // buf is not optional, but offset is optional
1432   assert(buf != NULL, "sanity check");
1433 
1434 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1435 //       return the full path to the DLL file, sometimes it returns path
1436 //       to the corresponding PDB file (debug info); sometimes it only
1437 //       returns partial path, which makes life painful.
1438 
1439   struct _modinfo mi;
1440   mi.addr      = addr;
1441   mi.full_path = buf;
1442   mi.buflen    = buflen;
1443   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1444     // buf already contains path name
1445     if (offset) *offset = addr - mi.base_addr;
1446     return true;
1447   }
1448 
1449   buf[0] = '\0';
1450   if (offset) *offset = -1;
1451   return false;
1452 }
1453 
1454 bool os::dll_address_to_function_name(address addr, char *buf,
1455                                       int buflen, int *offset,
1456                                       bool demangle) {
1457   // buf is not optional, but offset is optional
1458   assert(buf != NULL, "sanity check");
1459 
1460   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1461     return true;
1462   }
1463   if (offset != NULL)  *offset  = -1;
1464   buf[0] = '\0';
1465   return false;
1466 }
1467 
1468 // save the start and end address of jvm.dll into param[0] and param[1]
1469 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1470                            address top_address, void * param) {
1471   if (!param) return -1;
1472 
1473   if (base_addr   <= (address)_locate_jvm_dll &&
1474       top_address > (address)_locate_jvm_dll) {
1475     ((address*)param)[0] = base_addr;
1476     ((address*)param)[1] = top_address;
1477     return 1;
1478   }
1479   return 0;
1480 }
1481 
1482 address vm_lib_location[2];    // start and end address of jvm.dll
1483 
1484 // check if addr is inside jvm.dll
1485 bool os::address_is_in_vm(address addr) {
1486   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1487     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1488       assert(false, "Can't find jvm module.");
1489       return false;
1490     }
1491   }
1492 
1493   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1494 }
1495 
1496 // print module info; param is outputStream*
1497 static int _print_module(const char* fname, address base_address,
1498                          address top_address, void* param) {
1499   if (!param) return -1;
1500 
1501   outputStream* st = (outputStream*)param;
1502 
1503   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1504   return 0;
1505 }
1506 
1507 // Loads .dll/.so and
1508 // in case of error it checks if .dll/.so was built for the
1509 // same architecture as Hotspot is running on
1510 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1511   log_info(os)("attempting shared library load of %s", name);
1512 
1513   void * result = LoadLibrary(name);
1514   if (result != NULL) {
1515     Events::log(NULL, "Loaded shared library %s", name);
1516     // Recalculate pdb search path if a DLL was loaded successfully.
1517     SymbolEngine::recalc_search_path();
1518     log_info(os)("shared library load of %s was successful", name);
1519     return result;
1520   }
1521   DWORD errcode = GetLastError();
1522   // Read system error message into ebuf
1523   // It may or may not be overwritten below (in the for loop and just above)
1524   lasterror(ebuf, (size_t) ebuflen);
1525   ebuf[ebuflen - 1] = '\0';
1526   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1527   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1528 
1529   if (errcode == ERROR_MOD_NOT_FOUND) {
1530     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1531     ebuf[ebuflen - 1] = '\0';
1532     return NULL;
1533   }
1534 
1535   // Parsing dll below
1536   // If we can read dll-info and find that dll was built
1537   // for an architecture other than Hotspot is running in
1538   // - then print to buffer "DLL was built for a different architecture"
1539   // else call os::lasterror to obtain system error message
1540   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1541   if (fd < 0) {
1542     return NULL;
1543   }
1544 
1545   uint32_t signature_offset;
1546   uint16_t lib_arch = 0;
1547   bool failed_to_get_lib_arch =
1548     ( // Go to position 3c in the dll
1549      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1550      ||
1551      // Read location of signature
1552      (sizeof(signature_offset) !=
1553      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1554      ||
1555      // Go to COFF File Header in dll
1556      // that is located after "signature" (4 bytes long)
1557      (os::seek_to_file_offset(fd,
1558      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1559      ||
1560      // Read field that contains code of architecture
1561      // that dll was built for
1562      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1563     );
1564 
1565   ::close(fd);
1566   if (failed_to_get_lib_arch) {
1567     // file i/o error - report os::lasterror(...) msg
1568     return NULL;
1569   }
1570 
1571   typedef struct {
1572     uint16_t arch_code;
1573     char* arch_name;
1574   } arch_t;
1575 
1576   static const arch_t arch_array[] = {
1577     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1578     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1579     {IMAGE_FILE_MACHINE_ARM64,     (char*)"ARM 64"}
1580   };
1581 #if (defined _M_ARM64)
1582   static const uint16_t running_arch = IMAGE_FILE_MACHINE_ARM64;
1583 #elif (defined _M_AMD64)
1584   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1585 #elif (defined _M_IX86)
1586   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1587 #else
1588   #error Method os::dll_load requires that one of following \
1589          is defined :_M_AMD64 or _M_IX86 or _M_ARM64
1590 #endif
1591 
1592 
1593   // Obtain a string for printf operation
1594   // lib_arch_str shall contain string what platform this .dll was built for
1595   // running_arch_str shall string contain what platform Hotspot was built for
1596   char *running_arch_str = NULL, *lib_arch_str = NULL;
1597   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1598     if (lib_arch == arch_array[i].arch_code) {
1599       lib_arch_str = arch_array[i].arch_name;
1600     }
1601     if (running_arch == arch_array[i].arch_code) {
1602       running_arch_str = arch_array[i].arch_name;
1603     }
1604   }
1605 
1606   assert(running_arch_str,
1607          "Didn't find running architecture code in arch_array");
1608 
1609   // If the architecture is right
1610   // but some other error took place - report os::lasterror(...) msg
1611   if (lib_arch == running_arch) {
1612     return NULL;
1613   }
1614 
1615   if (lib_arch_str != NULL) {
1616     ::_snprintf(ebuf, ebuflen - 1,
1617                 "Can't load %s-bit .dll on a %s-bit platform",
1618                 lib_arch_str, running_arch_str);
1619   } else {
1620     // don't know what architecture this dll was build for
1621     ::_snprintf(ebuf, ebuflen - 1,
1622                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1623                 lib_arch, running_arch_str);
1624   }
1625 
1626   return NULL;
1627 }
1628 
1629 void os::print_dll_info(outputStream *st) {
1630   st->print_cr("Dynamic libraries:");
1631   get_loaded_modules_info(_print_module, (void *)st);
1632 }
1633 
1634 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1635   HANDLE   hProcess;
1636 
1637 # define MAX_NUM_MODULES 128
1638   HMODULE     modules[MAX_NUM_MODULES];
1639   static char filename[MAX_PATH];
1640   int         result = 0;
1641 
1642   int pid = os::current_process_id();
1643   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1644                          FALSE, pid);
1645   if (hProcess == NULL) return 0;
1646 
1647   DWORD size_needed;
1648   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1649     CloseHandle(hProcess);
1650     return 0;
1651   }
1652 
1653   // number of modules that are currently loaded
1654   int num_modules = size_needed / sizeof(HMODULE);
1655 
1656   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1657     // Get Full pathname:
1658     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1659       filename[0] = '\0';
1660     }
1661 
1662     MODULEINFO modinfo;
1663     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1664       modinfo.lpBaseOfDll = NULL;
1665       modinfo.SizeOfImage = 0;
1666     }
1667 
1668     // Invoke callback function
1669     result = callback(filename, (address)modinfo.lpBaseOfDll,
1670                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1671     if (result) break;
1672   }
1673 
1674   CloseHandle(hProcess);
1675   return result;
1676 }
1677 
1678 bool os::get_host_name(char* buf, size_t buflen) {
1679   DWORD size = (DWORD)buflen;
1680   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1681 }
1682 
1683 void os::get_summary_os_info(char* buf, size_t buflen) {
1684   stringStream sst(buf, buflen);
1685   os::win32::print_windows_version(&sst);
1686   // chop off newline character
1687   char* nl = strchr(buf, '\n');
1688   if (nl != NULL) *nl = '\0';
1689 }
1690 
1691 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1692 #if _MSC_VER >= 1900
1693   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1694   int result = ::vsnprintf(buf, len, fmt, args);
1695   // If an encoding error occurred (result < 0) then it's not clear
1696   // whether the buffer is NUL terminated, so ensure it is.
1697   if ((result < 0) && (len > 0)) {
1698     buf[len - 1] = '\0';
1699   }
1700   return result;
1701 #else
1702   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1703   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1704   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1705   // go straight to _vscprintf.  The output is going to be truncated in
1706   // that case, except in the unusual case of empty output.  More
1707   // importantly, the documentation for various versions of Visual Studio
1708   // are inconsistent about the behavior of _vsnprintf when len == 0,
1709   // including it possibly being an error.
1710   int result = -1;
1711   if (len > 0) {
1712     result = _vsnprintf(buf, len, fmt, args);
1713     // If output (including NUL terminator) is truncated, the buffer
1714     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1715     if ((result < 0) || ((size_t)result >= len)) {
1716       buf[len - 1] = '\0';
1717     }
1718   }
1719   if (result < 0) {
1720     result = _vscprintf(fmt, args);
1721   }
1722   return result;
1723 #endif // _MSC_VER dispatch
1724 }
1725 
1726 static inline time_t get_mtime(const char* filename) {
1727   struct stat st;
1728   int ret = os::stat(filename, &st);
1729   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1730   return st.st_mtime;
1731 }
1732 
1733 int os::compare_file_modified_times(const char* file1, const char* file2) {
1734   time_t t1 = get_mtime(file1);
1735   time_t t2 = get_mtime(file2);
1736   return t1 - t2;
1737 }
1738 
1739 void os::print_os_info_brief(outputStream* st) {
1740   os::print_os_info(st);
1741 }
1742 
1743 void os::win32::print_uptime_info(outputStream* st) {
1744   unsigned long long ticks = GetTickCount64();
1745   os::print_dhm(st, "OS uptime:", ticks/1000);
1746 }
1747 
1748 void os::print_os_info(outputStream* st) {
1749 #ifdef ASSERT
1750   char buffer[1024];
1751   st->print("HostName: ");
1752   if (get_host_name(buffer, sizeof(buffer))) {
1753     st->print_cr(buffer);
1754   } else {
1755     st->print_cr("N/A");
1756   }
1757 #endif
1758   st->print_cr("OS:");
1759   os::win32::print_windows_version(st);
1760 
1761   os::win32::print_uptime_info(st);
1762 
1763   VM_Version::print_platform_virtualization_info(st);
1764 }
1765 
1766 void os::win32::print_windows_version(outputStream* st) {
1767   OSVERSIONINFOEX osvi;
1768   VS_FIXEDFILEINFO *file_info;
1769   TCHAR kernel32_path[MAX_PATH];
1770   UINT len, ret;
1771 
1772   // Use the GetVersionEx information to see if we're on a server or
1773   // workstation edition of Windows. Starting with Windows 8.1 we can't
1774   // trust the OS version information returned by this API.
1775   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1776   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1777   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1778     st->print_cr("Call to GetVersionEx failed");
1779     return;
1780   }
1781   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1782 
1783   // Get the full path to \Windows\System32\kernel32.dll and use that for
1784   // determining what version of Windows we're running on.
1785   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1786   ret = GetSystemDirectory(kernel32_path, len);
1787   if (ret == 0 || ret > len) {
1788     st->print_cr("Call to GetSystemDirectory failed");
1789     return;
1790   }
1791   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1792 
1793   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1794   if (version_size == 0) {
1795     st->print_cr("Call to GetFileVersionInfoSize failed");
1796     return;
1797   }
1798 
1799   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1800   if (version_info == NULL) {
1801     st->print_cr("Failed to allocate version_info");
1802     return;
1803   }
1804 
1805   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1806     os::free(version_info);
1807     st->print_cr("Call to GetFileVersionInfo failed");
1808     return;
1809   }
1810 
1811   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1812     os::free(version_info);
1813     st->print_cr("Call to VerQueryValue failed");
1814     return;
1815   }
1816 
1817   int major_version = HIWORD(file_info->dwProductVersionMS);
1818   int minor_version = LOWORD(file_info->dwProductVersionMS);
1819   int build_number = HIWORD(file_info->dwProductVersionLS);
1820   int build_minor = LOWORD(file_info->dwProductVersionLS);
1821   int os_vers = major_version * 1000 + minor_version;
1822   os::free(version_info);
1823 
1824   st->print(" Windows ");
1825   switch (os_vers) {
1826 
1827   case 6000:
1828     if (is_workstation) {
1829       st->print("Vista");
1830     } else {
1831       st->print("Server 2008");
1832     }
1833     break;
1834 
1835   case 6001:
1836     if (is_workstation) {
1837       st->print("7");
1838     } else {
1839       st->print("Server 2008 R2");
1840     }
1841     break;
1842 
1843   case 6002:
1844     if (is_workstation) {
1845       st->print("8");
1846     } else {
1847       st->print("Server 2012");
1848     }
1849     break;
1850 
1851   case 6003:
1852     if (is_workstation) {
1853       st->print("8.1");
1854     } else {
1855       st->print("Server 2012 R2");
1856     }
1857     break;
1858 
1859   case 10000:
1860     if (is_workstation) {
1861       if (build_number >= 22000) {
1862         st->print("11");
1863       } else {
1864         st->print("10");
1865       }
1866     } else {
1867       // distinguish Windows Server by build number
1868       // - 2016 GA 10/2016 build: 14393
1869       // - 2019 GA 11/2018 build: 17763
1870       // - 2022 GA 08/2021 build: 20348
1871       if (build_number > 20347) {
1872         st->print("Server 2022");
1873       } else if (build_number > 17762) {
1874         st->print("Server 2019");
1875       } else {
1876         st->print("Server 2016");
1877       }
1878     }
1879     break;
1880 
1881   default:
1882     // Unrecognized windows, print out its major and minor versions
1883     st->print("%d.%d", major_version, minor_version);
1884     break;
1885   }
1886 
1887   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1888   // find out whether we are running on 64 bit processor or not
1889   SYSTEM_INFO si;
1890   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1891   GetNativeSystemInfo(&si);
1892   if ((si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) ||
1893       (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM64)) {
1894     st->print(" , 64 bit");
1895   }
1896 
1897   st->print(" Build %d", build_number);
1898   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1899   st->cr();
1900 }
1901 
1902 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1903   // Nothing to do for now.
1904 }
1905 
1906 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1907   HKEY key;
1908   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1909                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1910   if (status == ERROR_SUCCESS) {
1911     DWORD size = (DWORD)buflen;
1912     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1913     if (status != ERROR_SUCCESS) {
1914         strncpy(buf, "## __CPU__", buflen);
1915     }
1916     RegCloseKey(key);
1917   } else {
1918     // Put generic cpu info to return
1919     strncpy(buf, "## __CPU__", buflen);
1920   }
1921 }
1922 
1923 void os::print_memory_info(outputStream* st) {
1924   st->print("Memory:");
1925   st->print(" %dk page", os::vm_page_size()>>10);
1926 
1927   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1928   // value if total memory is larger than 4GB
1929   MEMORYSTATUSEX ms;
1930   ms.dwLength = sizeof(ms);
1931   int r1 = GlobalMemoryStatusEx(&ms);
1932 
1933   if (r1 != 0) {
1934     st->print(", system-wide physical " INT64_FORMAT "M ",
1935              (int64_t) ms.ullTotalPhys >> 20);
1936     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1937 
1938     st->print("TotalPageFile size " INT64_FORMAT "M ",
1939              (int64_t) ms.ullTotalPageFile >> 20);
1940     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1941              (int64_t) ms.ullAvailPageFile >> 20);
1942 
1943     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1944 #if defined(_M_IX86)
1945     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1946              (int64_t) ms.ullTotalVirtual >> 20);
1947     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1948 #endif
1949   } else {
1950     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1951   }
1952 
1953   // extended memory statistics for a process
1954   PROCESS_MEMORY_COUNTERS_EX pmex;
1955   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1956   pmex.cb = sizeof(pmex);
1957   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1958 
1959   if (r2 != 0) {
1960     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1961              (int64_t) pmex.WorkingSetSize >> 20);
1962     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1963 
1964     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1965              (int64_t) pmex.PrivateUsage >> 20);
1966     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1967   } else {
1968     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1969   }
1970 
1971   st->cr();
1972 }
1973 
1974 bool os::signal_sent_by_kill(const void* siginfo) {
1975   // TODO: Is this possible?
1976   return false;
1977 }
1978 
1979 void os::print_siginfo(outputStream *st, const void* siginfo) {
1980   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1981   st->print("siginfo:");
1982 
1983   char tmp[64];
1984   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1985     strcpy(tmp, "EXCEPTION_??");
1986   }
1987   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1988 
1989   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1990        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1991        er->NumberParameters >= 2) {
1992     switch (er->ExceptionInformation[0]) {
1993     case 0: st->print(", reading address"); break;
1994     case 1: st->print(", writing address"); break;
1995     case 8: st->print(", data execution prevention violation at address"); break;
1996     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1997                        er->ExceptionInformation[0]);
1998     }
1999     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
2000   } else {
2001     int num = er->NumberParameters;
2002     if (num > 0) {
2003       st->print(", ExceptionInformation=");
2004       for (int i = 0; i < num; i++) {
2005         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
2006       }
2007     }
2008   }
2009   st->cr();
2010 }
2011 
2012 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
2013   // TODO: Can we kill thread?
2014   return false;
2015 }
2016 
2017 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2018   // do nothing
2019 }
2020 
2021 static char saved_jvm_path[MAX_PATH] = {0};
2022 
2023 // Find the full path to the current module, jvm.dll
2024 void os::jvm_path(char *buf, jint buflen) {
2025   // Error checking.
2026   if (buflen < MAX_PATH) {
2027     assert(false, "must use a large-enough buffer");
2028     buf[0] = '\0';
2029     return;
2030   }
2031   // Lazy resolve the path to current module.
2032   if (saved_jvm_path[0] != 0) {
2033     strcpy(buf, saved_jvm_path);
2034     return;
2035   }
2036 
2037   buf[0] = '\0';
2038   if (Arguments::sun_java_launcher_is_altjvm()) {
2039     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
2040     // for a JAVA_HOME environment variable and fix up the path so it
2041     // looks like jvm.dll is installed there (append a fake suffix
2042     // hotspot/jvm.dll).
2043     char* java_home_var = ::getenv("JAVA_HOME");
2044     if (java_home_var != NULL && java_home_var[0] != 0 &&
2045         strlen(java_home_var) < (size_t)buflen) {
2046       strncpy(buf, java_home_var, buflen);
2047 
2048       // determine if this is a legacy image or modules image
2049       // modules image doesn't have "jre" subdirectory
2050       size_t len = strlen(buf);
2051       char* jrebin_p = buf + len;
2052       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
2053       if (0 != _access(buf, 0)) {
2054         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
2055       }
2056       len = strlen(buf);
2057       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
2058     }
2059   }
2060 
2061   if (buf[0] == '\0') {
2062     GetModuleFileName(vm_lib_handle, buf, buflen);
2063   }
2064   strncpy(saved_jvm_path, buf, MAX_PATH);
2065   saved_jvm_path[MAX_PATH - 1] = '\0';
2066 }
2067 
2068 
2069 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2070 #ifndef _WIN64
2071   st->print("_");
2072 #endif
2073 }
2074 
2075 
2076 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2077 #ifndef _WIN64
2078   st->print("@%d", args_size  * sizeof(int));
2079 #endif
2080 }
2081 
2082 // This method is a copy of JDK's sysGetLastErrorString
2083 // from src/windows/hpi/src/system_md.c
2084 
2085 size_t os::lasterror(char* buf, size_t len) {
2086   DWORD errval;
2087 
2088   if ((errval = GetLastError()) != 0) {
2089     // DOS error
2090     size_t n = (size_t)FormatMessage(
2091                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
2092                                      NULL,
2093                                      errval,
2094                                      0,
2095                                      buf,
2096                                      (DWORD)len,
2097                                      NULL);
2098     if (n > 3) {
2099       // Drop final '.', CR, LF
2100       if (buf[n - 1] == '\n') n--;
2101       if (buf[n - 1] == '\r') n--;
2102       if (buf[n - 1] == '.') n--;
2103       buf[n] = '\0';
2104     }
2105     return n;
2106   }
2107 
2108   if (errno != 0) {
2109     // C runtime error that has no corresponding DOS error code
2110     const char* s = os::strerror(errno);
2111     size_t n = strlen(s);
2112     if (n >= len) n = len - 1;
2113     strncpy(buf, s, n);
2114     buf[n] = '\0';
2115     return n;
2116   }
2117 
2118   return 0;
2119 }
2120 
2121 int os::get_last_error() {
2122   DWORD error = GetLastError();
2123   if (error == 0) {
2124     error = errno;
2125   }
2126   return (int)error;
2127 }
2128 
2129 // sun.misc.Signal
2130 // NOTE that this is a workaround for an apparent kernel bug where if
2131 // a signal handler for SIGBREAK is installed then that signal handler
2132 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
2133 // See bug 4416763.
2134 static void (*sigbreakHandler)(int) = NULL;
2135 
2136 static void UserHandler(int sig, void *siginfo, void *context) {
2137   os::signal_notify(sig);
2138   // We need to reinstate the signal handler each time...
2139   os::signal(sig, (void*)UserHandler);
2140 }
2141 
2142 void* os::user_handler() {
2143   return (void*) UserHandler;
2144 }
2145 
2146 void* os::signal(int signal_number, void* handler) {
2147   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2148     void (*oldHandler)(int) = sigbreakHandler;
2149     sigbreakHandler = (void (*)(int)) handler;
2150     return (void*) oldHandler;
2151   } else {
2152     return (void*)::signal(signal_number, (void (*)(int))handler);
2153   }
2154 }
2155 
2156 void os::signal_raise(int signal_number) {
2157   raise(signal_number);
2158 }
2159 
2160 // The Win32 C runtime library maps all console control events other than ^C
2161 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2162 // logoff, and shutdown events.  We therefore install our own console handler
2163 // that raises SIGTERM for the latter cases.
2164 //
2165 static BOOL WINAPI consoleHandler(DWORD event) {
2166   switch (event) {
2167   case CTRL_C_EVENT:
2168     if (VMError::is_error_reported()) {
2169       // Ctrl-C is pressed during error reporting, likely because the error
2170       // handler fails to abort. Let VM die immediately.
2171       os::die();
2172     }
2173 
2174     os::signal_raise(SIGINT);
2175     return TRUE;
2176     break;
2177   case CTRL_BREAK_EVENT:
2178     if (sigbreakHandler != NULL) {
2179       (*sigbreakHandler)(SIGBREAK);
2180     }
2181     return TRUE;
2182     break;
2183   case CTRL_LOGOFF_EVENT: {
2184     // Don't terminate JVM if it is running in a non-interactive session,
2185     // such as a service process.
2186     USEROBJECTFLAGS flags;
2187     HANDLE handle = GetProcessWindowStation();
2188     if (handle != NULL &&
2189         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2190         sizeof(USEROBJECTFLAGS), NULL)) {
2191       // If it is a non-interactive session, let next handler to deal
2192       // with it.
2193       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2194         return FALSE;
2195       }
2196     }
2197   }
2198   case CTRL_CLOSE_EVENT:
2199   case CTRL_SHUTDOWN_EVENT:
2200     os::signal_raise(SIGTERM);
2201     return TRUE;
2202     break;
2203   default:
2204     break;
2205   }
2206   return FALSE;
2207 }
2208 
2209 // The following code is moved from os.cpp for making this
2210 // code platform specific, which it is by its very nature.
2211 
2212 // Return maximum OS signal used + 1 for internal use only
2213 // Used as exit signal for signal_thread
2214 int os::sigexitnum_pd() {
2215   return NSIG;
2216 }
2217 
2218 // a counter for each possible signal value, including signal_thread exit signal
2219 static volatile jint pending_signals[NSIG+1] = { 0 };
2220 static Semaphore* sig_sem = NULL;
2221 
2222 static void jdk_misc_signal_init() {
2223   // Initialize signal structures
2224   memset((void*)pending_signals, 0, sizeof(pending_signals));
2225 
2226   // Initialize signal semaphore
2227   sig_sem = new Semaphore();
2228 
2229   // Programs embedding the VM do not want it to attempt to receive
2230   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2231   // shutdown hooks mechanism introduced in 1.3.  For example, when
2232   // the VM is run as part of a Windows NT service (i.e., a servlet
2233   // engine in a web server), the correct behavior is for any console
2234   // control handler to return FALSE, not TRUE, because the OS's
2235   // "final" handler for such events allows the process to continue if
2236   // it is a service (while terminating it if it is not a service).
2237   // To make this behavior uniform and the mechanism simpler, we
2238   // completely disable the VM's usage of these console events if -Xrs
2239   // (=ReduceSignalUsage) is specified.  This means, for example, that
2240   // the CTRL-BREAK thread dump mechanism is also disabled in this
2241   // case.  See bugs 4323062, 4345157, and related bugs.
2242 
2243   // Add a CTRL-C handler
2244   SetConsoleCtrlHandler(consoleHandler, TRUE);
2245 }
2246 
2247 void os::signal_notify(int sig) {
2248   if (sig_sem != NULL) {
2249     Atomic::inc(&pending_signals[sig]);
2250     sig_sem->signal();
2251   } else {
2252     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2253     // initialization isn't called.
2254     assert(ReduceSignalUsage, "signal semaphore should be created");
2255   }
2256 }
2257 
2258 static int check_pending_signals() {
2259   while (true) {
2260     for (int i = 0; i < NSIG + 1; i++) {
2261       jint n = pending_signals[i];
2262       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2263         return i;
2264       }
2265     }
2266     sig_sem->wait_with_safepoint_check(JavaThread::current());
2267   }
2268   ShouldNotReachHere();
2269   return 0; // Satisfy compiler
2270 }
2271 
2272 int os::signal_wait() {
2273   return check_pending_signals();
2274 }
2275 
2276 // Implicit OS exception handling
2277 
2278 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2279                       address handler) {
2280   Thread* thread = Thread::current_or_null();
2281 
2282 #if defined(_M_ARM64)
2283   #define PC_NAME Pc
2284 #elif defined(_M_AMD64)
2285   #define PC_NAME Rip
2286 #elif defined(_M_IX86)
2287   #define PC_NAME Eip
2288 #else
2289   #error unknown architecture
2290 #endif
2291 
2292   // Save pc in thread
2293   if (thread != nullptr && thread->is_Java_thread()) {
2294     JavaThread::cast(thread)->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->PC_NAME);
2295   }
2296 
2297   // Set pc to handler
2298   exceptionInfo->ContextRecord->PC_NAME = (DWORD64)handler;
2299 
2300   // Continue the execution
2301   return EXCEPTION_CONTINUE_EXECUTION;
2302 }
2303 
2304 
2305 // Used for PostMortemDump
2306 extern "C" void safepoints();
2307 extern "C" void find(int x);
2308 extern "C" void events();
2309 
2310 // According to Windows API documentation, an illegal instruction sequence should generate
2311 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2312 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2313 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2314 
2315 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2316 
2317 // From "Execution Protection in the Windows Operating System" draft 0.35
2318 // Once a system header becomes available, the "real" define should be
2319 // included or copied here.
2320 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2321 
2322 // Windows Vista/2008 heap corruption check
2323 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2324 
2325 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2326 // C++ compiler contain this error code. Because this is a compiler-generated
2327 // error, the code is not listed in the Win32 API header files.
2328 // The code is actually a cryptic mnemonic device, with the initial "E"
2329 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2330 // ASCII values of "msc".
2331 
2332 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2333 
2334 #define def_excpt(val) { #val, (val) }
2335 
2336 static const struct { const char* name; uint number; } exceptlabels[] = {
2337     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2338     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2339     def_excpt(EXCEPTION_BREAKPOINT),
2340     def_excpt(EXCEPTION_SINGLE_STEP),
2341     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2342     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2343     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2344     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2345     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2346     def_excpt(EXCEPTION_FLT_OVERFLOW),
2347     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2348     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2349     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2350     def_excpt(EXCEPTION_INT_OVERFLOW),
2351     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2352     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2353     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2354     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2355     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2356     def_excpt(EXCEPTION_STACK_OVERFLOW),
2357     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2358     def_excpt(EXCEPTION_GUARD_PAGE),
2359     def_excpt(EXCEPTION_INVALID_HANDLE),
2360     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2361     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2362 };
2363 
2364 #undef def_excpt
2365 
2366 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2367   uint code = static_cast<uint>(exception_code);
2368   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2369     if (exceptlabels[i].number == code) {
2370       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2371       return buf;
2372     }
2373   }
2374 
2375   return NULL;
2376 }
2377 
2378 //-----------------------------------------------------------------------------
2379 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2380   // handle exception caused by idiv; should only happen for -MinInt/-1
2381   // (division by zero is handled explicitly)
2382 #if defined(_M_ARM64)
2383   PCONTEXT ctx = exceptionInfo->ContextRecord;
2384   address pc = (address)ctx->Sp;
2385   assert(pc[0] == 0x83, "not an sdiv opcode"); //Fixme did i get the right opcode?
2386   assert(ctx->X4 == min_jint, "unexpected idiv exception");
2387   // set correct result values and continue after idiv instruction
2388   ctx->Pc = (uint64_t)pc + 4;        // idiv reg, reg, reg  is 4 bytes
2389   ctx->X4 = (uint64_t)min_jint;      // result
2390   ctx->X5 = (uint64_t)0;             // remainder
2391   // Continue the execution
2392 #elif defined(_M_AMD64)
2393   PCONTEXT ctx = exceptionInfo->ContextRecord;
2394   address pc = (address)ctx->Rip;
2395   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2396   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2397   if (pc[0] == 0xF7) {
2398     // set correct result values and continue after idiv instruction
2399     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2400   } else {
2401     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2402   }
2403   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2404   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2405   // idiv opcode (0xF7).
2406   ctx->Rdx = (DWORD)0;             // remainder
2407   // Continue the execution
2408 #else
2409   PCONTEXT ctx = exceptionInfo->ContextRecord;
2410   address pc = (address)ctx->Eip;
2411   assert(pc[0] == 0xF7, "not an idiv opcode");
2412   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2413   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2414   // set correct result values and continue after idiv instruction
2415   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2416   ctx->Eax = (DWORD)min_jint;      // result
2417   ctx->Edx = (DWORD)0;             // remainder
2418   // Continue the execution
2419 #endif
2420   return EXCEPTION_CONTINUE_EXECUTION;
2421 }
2422 
2423 #if defined(_M_AMD64) || defined(_M_IX86)
2424 //-----------------------------------------------------------------------------
2425 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2426   PCONTEXT ctx = exceptionInfo->ContextRecord;
2427 #ifndef  _WIN64
2428   // handle exception caused by native method modifying control word
2429   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2430 
2431   switch (exception_code) {
2432   case EXCEPTION_FLT_DENORMAL_OPERAND:
2433   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2434   case EXCEPTION_FLT_INEXACT_RESULT:
2435   case EXCEPTION_FLT_INVALID_OPERATION:
2436   case EXCEPTION_FLT_OVERFLOW:
2437   case EXCEPTION_FLT_STACK_CHECK:
2438   case EXCEPTION_FLT_UNDERFLOW:
2439     jint fp_control_word = (* (jint*) StubRoutines::x86::addr_fpu_cntrl_wrd_std());
2440     if (fp_control_word != ctx->FloatSave.ControlWord) {
2441       // Restore FPCW and mask out FLT exceptions
2442       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2443       // Mask out pending FLT exceptions
2444       ctx->FloatSave.StatusWord &=  0xffffff00;
2445       return EXCEPTION_CONTINUE_EXECUTION;
2446     }
2447   }
2448 
2449   if (prev_uef_handler != NULL) {
2450     // We didn't handle this exception so pass it to the previous
2451     // UnhandledExceptionFilter.
2452     return (prev_uef_handler)(exceptionInfo);
2453   }
2454 #else // !_WIN64
2455   // On Windows, the mxcsr control bits are non-volatile across calls
2456   // See also CR 6192333
2457   //
2458   jint MxCsr = INITIAL_MXCSR;
2459   // we can't use StubRoutines::x86::addr_mxcsr_std()
2460   // because in Win64 mxcsr is not saved there
2461   if (MxCsr != ctx->MxCsr) {
2462     ctx->MxCsr = MxCsr;
2463     return EXCEPTION_CONTINUE_EXECUTION;
2464   }
2465 #endif // !_WIN64
2466 
2467   return EXCEPTION_CONTINUE_SEARCH;
2468 }
2469 #endif
2470 
2471 static inline void report_error(Thread* t, DWORD exception_code,
2472                                 address addr, void* siginfo, void* context) {
2473   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2474 
2475   // If UseOSErrorReporting, this will return here and save the error file
2476   // somewhere where we can find it in the minidump.
2477 }
2478 
2479 //-----------------------------------------------------------------------------
2480 JNIEXPORT
2481 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2482   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2483   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2484   DWORD exception_code = exception_record->ExceptionCode;
2485 #if defined(_M_ARM64)
2486   address pc = (address) exceptionInfo->ContextRecord->Pc;
2487 #elif defined(_M_AMD64)
2488   address pc = (address) exceptionInfo->ContextRecord->Rip;
2489 #else
2490   address pc = (address) exceptionInfo->ContextRecord->Eip;
2491 #endif
2492   Thread* t = Thread::current_or_null_safe();
2493 
2494   // Handle SafeFetch32 and SafeFetchN exceptions.
2495   if (StubRoutines::is_safefetch_fault(pc)) {
2496     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2497   }
2498 
2499 #ifndef _WIN64
2500   // Execution protection violation - win32 running on AMD64 only
2501   // Handled first to avoid misdiagnosis as a "normal" access violation;
2502   // This is safe to do because we have a new/unique ExceptionInformation
2503   // code for this condition.
2504   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2505     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2506     address addr = (address) exception_record->ExceptionInformation[1];
2507 
2508     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2509       int page_size = os::vm_page_size();
2510 
2511       // Make sure the pc and the faulting address are sane.
2512       //
2513       // If an instruction spans a page boundary, and the page containing
2514       // the beginning of the instruction is executable but the following
2515       // page is not, the pc and the faulting address might be slightly
2516       // different - we still want to unguard the 2nd page in this case.
2517       //
2518       // 15 bytes seems to be a (very) safe value for max instruction size.
2519       bool pc_is_near_addr =
2520         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2521       bool instr_spans_page_boundary =
2522         (align_down((intptr_t) pc ^ (intptr_t) addr,
2523                          (intptr_t) page_size) > 0);
2524 
2525       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2526         static volatile address last_addr =
2527           (address) os::non_memory_address_word();
2528 
2529         // In conservative mode, don't unguard unless the address is in the VM
2530         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2531             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2532 
2533           // Set memory to RWX and retry
2534           address page_start = align_down(addr, page_size);
2535           bool res = os::protect_memory((char*) page_start, page_size,
2536                                         os::MEM_PROT_RWX);
2537 
2538           log_debug(os)("Execution protection violation "
2539                         "at " INTPTR_FORMAT
2540                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2541                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2542 
2543           // Set last_addr so if we fault again at the same address, we don't
2544           // end up in an endless loop.
2545           //
2546           // There are two potential complications here.  Two threads trapping
2547           // at the same address at the same time could cause one of the
2548           // threads to think it already unguarded, and abort the VM.  Likely
2549           // very rare.
2550           //
2551           // The other race involves two threads alternately trapping at
2552           // different addresses and failing to unguard the page, resulting in
2553           // an endless loop.  This condition is probably even more unlikely
2554           // than the first.
2555           //
2556           // Although both cases could be avoided by using locks or thread
2557           // local last_addr, these solutions are unnecessary complication:
2558           // this handler is a best-effort safety net, not a complete solution.
2559           // It is disabled by default and should only be used as a workaround
2560           // in case we missed any no-execute-unsafe VM code.
2561 
2562           last_addr = addr;
2563 
2564           return EXCEPTION_CONTINUE_EXECUTION;
2565         }
2566       }
2567 
2568       // Last unguard failed or not unguarding
2569       tty->print_raw_cr("Execution protection violation");
2570 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2571       report_error(t, exception_code, addr, exception_record,
2572                    exceptionInfo->ContextRecord);
2573 #endif
2574       return EXCEPTION_CONTINUE_SEARCH;
2575     }
2576   }
2577 #endif // _WIN64
2578 
2579 #if defined(_M_AMD64) || defined(_M_IX86)
2580   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2581       VM_Version::is_cpuinfo_segv_addr(pc)) {
2582     // Verify that OS save/restore AVX registers.
2583     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2584   }
2585 #endif
2586 
2587   if (t != NULL && t->is_Java_thread()) {
2588     JavaThread* thread = JavaThread::cast(t);
2589     bool in_java = thread->thread_state() == _thread_in_Java;
2590     bool in_native = thread->thread_state() == _thread_in_native;
2591     bool in_vm = thread->thread_state() == _thread_in_vm;
2592 
2593     // Handle potential stack overflows up front.
2594     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2595       StackOverflow* overflow_state = thread->stack_overflow_state();
2596       if (overflow_state->stack_guards_enabled()) {
2597         if (in_java) {
2598           frame fr;
2599           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2600             assert(fr.is_java_frame(), "Must be a Java frame");
2601             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2602           }
2603         }
2604         // Yellow zone violation.  The o/s has unprotected the first yellow
2605         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2606         // update the enabled status, even if the zone contains only one page.
2607         assert(!in_vm, "Undersized StackShadowPages");
2608         overflow_state->disable_stack_yellow_reserved_zone();
2609         // If not in java code, return and hope for the best.
2610         return in_java
2611             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2612             :  EXCEPTION_CONTINUE_EXECUTION;
2613       } else {
2614         // Fatal red zone violation.
2615         overflow_state->disable_stack_red_zone();
2616         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2617 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2618         report_error(t, exception_code, pc, exception_record,
2619                       exceptionInfo->ContextRecord);
2620 #endif
2621         return EXCEPTION_CONTINUE_SEARCH;
2622       }
2623     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2624       if (in_java) {
2625         // Either stack overflow or null pointer exception.
2626         address addr = (address) exception_record->ExceptionInformation[1];
2627         address stack_end = thread->stack_end();
2628         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2629           // Stack overflow.
2630           assert(!os::uses_stack_guard_pages(),
2631                  "should be caught by red zone code above.");
2632           return Handle_Exception(exceptionInfo,
2633                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2634         }
2635         // Check for safepoint polling and implicit null
2636         // We only expect null pointers in the stubs (vtable)
2637         // the rest are checked explicitly now.
2638         CodeBlob* cb = CodeCache::find_blob(pc);
2639         if (cb != NULL) {
2640           if (SafepointMechanism::is_poll_address(addr)) {
2641             address stub = SharedRuntime::get_poll_stub(pc);
2642             return Handle_Exception(exceptionInfo, stub);
2643           }
2644         }
2645 #ifdef _WIN64
2646         // If it's a legal stack address map the entire region in
2647         if (thread->is_in_usable_stack(addr)) {
2648           addr = (address)((uintptr_t)addr &
2649                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2650           os::commit_memory((char *)addr, thread->stack_base() - addr,
2651                             !ExecMem);
2652           return EXCEPTION_CONTINUE_EXECUTION;
2653         }
2654 #endif
2655         // Null pointer exception.
2656         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2657           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2658           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2659         }
2660         report_error(t, exception_code, pc, exception_record,
2661                       exceptionInfo->ContextRecord);
2662         return EXCEPTION_CONTINUE_SEARCH;
2663       }
2664 
2665 #ifdef _WIN64
2666       // Special care for fast JNI field accessors.
2667       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2668       // in and the heap gets shrunk before the field access.
2669       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2670       if (slowcase_pc != (address)-1) {
2671         return Handle_Exception(exceptionInfo, slowcase_pc);
2672       }
2673 #endif
2674 
2675       // Stack overflow or null pointer exception in native code.
2676 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2677       report_error(t, exception_code, pc, exception_record,
2678                    exceptionInfo->ContextRecord);
2679 #endif
2680       return EXCEPTION_CONTINUE_SEARCH;
2681     } // /EXCEPTION_ACCESS_VIOLATION
2682     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2683 
2684     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2685       CompiledMethod* nm = NULL;
2686       if (in_java) {
2687         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2688         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2689       }
2690 
2691       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2692       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2693           (nm != NULL && nm->has_unsafe_access())) {
2694         address next_pc =  Assembler::locate_next_instruction(pc);
2695         if (is_unsafe_arraycopy) {
2696           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2697         }
2698         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2699       }
2700     }
2701 
2702 #ifdef _M_ARM64
2703     if (in_java &&
2704         (exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2705           exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2706       if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
2707         if (TraceTraps) {
2708           tty->print_cr("trap: zombie_not_entrant");
2709         }
2710         return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
2711       }
2712     }
2713 #endif
2714 
2715     if (in_java) {
2716       switch (exception_code) {
2717       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2718         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2719 
2720       case EXCEPTION_INT_OVERFLOW:
2721         return Handle_IDiv_Exception(exceptionInfo);
2722 
2723       } // switch
2724     }
2725 
2726 #if defined(_M_AMD64) || defined(_M_IX86)
2727     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2728       LONG result=Handle_FLT_Exception(exceptionInfo);
2729       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2730     }
2731 #endif
2732 
2733     if (in_java && (exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2734       // Check for UD trap caused by NOP patching.
2735       // If it is, patch return address to be deopt handler.
2736       if (NativeDeoptInstruction::is_deopt_at(pc)) {
2737         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2738         if (cb != NULL && cb->is_compiled()) {
2739           CompiledMethod* cm = cb->as_compiled_method();
2740           frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2741           address deopt = cm->is_method_handle_return(pc) ?
2742             cm->deopt_mh_handler_begin() :
2743             cm->deopt_handler_begin();
2744           assert(cm->insts_contains_inclusive(pc), "");
2745           cm->set_original_pc(&fr, pc);
2746           // Set pc to handler
2747           exceptionInfo->ContextRecord->PC_NAME = (DWORD64)deopt;
2748           return EXCEPTION_CONTINUE_EXECUTION;
2749         }
2750       }
2751     }
2752   }
2753 
2754 
2755 #if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2756   if (exception_code != EXCEPTION_BREAKPOINT) {
2757     report_error(t, exception_code, pc, exception_record,
2758                  exceptionInfo->ContextRecord);
2759   }
2760 #endif
2761   return EXCEPTION_CONTINUE_SEARCH;
2762 }
2763 
2764 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
2765 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2766   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2767 #if defined(_M_ARM64)
2768   address pc = (address) exceptionInfo->ContextRecord->Pc;
2769 #elif defined(_M_AMD64)
2770   address pc = (address) exceptionInfo->ContextRecord->Rip;
2771 #else
2772   address pc = (address) exceptionInfo->ContextRecord->Eip;
2773 #endif
2774 
2775   // Fast path for code part of the code cache
2776   if (CodeCache::low_bound() <= pc && pc < CodeCache::high_bound()) {
2777     return topLevelExceptionFilter(exceptionInfo);
2778   }
2779 
2780   // If the exception occurred in the codeCache, pass control
2781   // to our normal exception handler.
2782   CodeBlob* cb = CodeCache::find_blob(pc);
2783   if (cb != NULL) {
2784     return topLevelExceptionFilter(exceptionInfo);
2785   }
2786 
2787   return EXCEPTION_CONTINUE_SEARCH;
2788 }
2789 #endif
2790 
2791 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
2792 LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2793   if (InterceptOSException) goto exit;
2794   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2795 #if defined(_M_ARM64)
2796   address pc = (address)exceptionInfo->ContextRecord->Pc;
2797 #elif defined(_M_AMD64)
2798   address pc = (address) exceptionInfo->ContextRecord->Rip;
2799 #else
2800   address pc = (address) exceptionInfo->ContextRecord->Eip;
2801 #endif
2802   Thread* t = Thread::current_or_null_safe();
2803 
2804   if (exception_code != EXCEPTION_BREAKPOINT) {
2805     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2806                 exceptionInfo->ContextRecord);
2807   }
2808 exit:
2809   return previousUnhandledExceptionFilter ? previousUnhandledExceptionFilter(exceptionInfo) : EXCEPTION_CONTINUE_SEARCH;
2810 }
2811 #endif
2812 
2813 #ifndef _WIN64
2814 // Special care for fast JNI accessors.
2815 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2816 // the heap gets shrunk before the field access.
2817 // Need to install our own structured exception handler since native code may
2818 // install its own.
2819 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2820   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2821   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2822     address pc = (address) exceptionInfo->ContextRecord->Eip;
2823     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2824     if (addr != (address)-1) {
2825       return Handle_Exception(exceptionInfo, addr);
2826     }
2827   }
2828   return EXCEPTION_CONTINUE_SEARCH;
2829 }
2830 
2831 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2832   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2833                                                      jobject obj,           \
2834                                                      jfieldID fieldID) {    \
2835     __try {                                                                 \
2836       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2837                                                                  obj,       \
2838                                                                  fieldID);  \
2839     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2840                                               _exception_info())) {         \
2841     }                                                                       \
2842     return 0;                                                               \
2843   }
2844 
2845 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2846 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2847 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2848 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2849 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2850 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2851 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2852 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2853 
2854 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2855   switch (type) {
2856   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2857   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2858   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2859   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2860   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2861   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2862   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2863   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2864   default:        ShouldNotReachHere();
2865   }
2866   return (address)-1;
2867 }
2868 #endif
2869 
2870 // Virtual Memory
2871 
2872 int os::vm_page_size() { return os::win32::vm_page_size(); }
2873 int os::vm_allocation_granularity() {
2874   return os::win32::vm_allocation_granularity();
2875 }
2876 
2877 // Windows large page support is available on Windows 2003. In order to use
2878 // large page memory, the administrator must first assign additional privilege
2879 // to the user:
2880 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2881 //   + select Local Policies -> User Rights Assignment
2882 //   + double click "Lock pages in memory", add users and/or groups
2883 //   + reboot
2884 // Note the above steps are needed for administrator as well, as administrators
2885 // by default do not have the privilege to lock pages in memory.
2886 //
2887 // Note about Windows 2003: although the API supports committing large page
2888 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2889 // scenario, I found through experiment it only uses large page if the entire
2890 // memory region is reserved and committed in a single VirtualAlloc() call.
2891 // This makes Windows large page support more or less like Solaris ISM, in
2892 // that the entire heap must be committed upfront. This probably will change
2893 // in the future, if so the code below needs to be revisited.
2894 
2895 #ifndef MEM_LARGE_PAGES
2896   #define MEM_LARGE_PAGES 0x20000000
2897 #endif
2898 
2899 // Container for NUMA node list info
2900 class NUMANodeListHolder {
2901  private:
2902   int *_numa_used_node_list;  // allocated below
2903   int _numa_used_node_count;
2904 
2905   void free_node_list() {
2906     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2907   }
2908 
2909  public:
2910   NUMANodeListHolder() {
2911     _numa_used_node_count = 0;
2912     _numa_used_node_list = NULL;
2913     // do rest of initialization in build routine (after function pointers are set up)
2914   }
2915 
2916   ~NUMANodeListHolder() {
2917     free_node_list();
2918   }
2919 
2920   bool build() {
2921     DWORD_PTR proc_aff_mask;
2922     DWORD_PTR sys_aff_mask;
2923     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2924     ULONG highest_node_number;
2925     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2926     free_node_list();
2927     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2928     for (unsigned int i = 0; i <= highest_node_number; i++) {
2929       ULONGLONG proc_mask_numa_node;
2930       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2931       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2932         _numa_used_node_list[_numa_used_node_count++] = i;
2933       }
2934     }
2935     return (_numa_used_node_count > 1);
2936   }
2937 
2938   int get_count() { return _numa_used_node_count; }
2939   int get_node_list_entry(int n) {
2940     // for indexes out of range, returns -1
2941     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2942   }
2943 
2944 } numa_node_list_holder;
2945 
2946 static size_t _large_page_size = 0;
2947 
2948 static bool request_lock_memory_privilege() {
2949   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2950                                 os::current_process_id());
2951 
2952   bool success = false;
2953   HANDLE hToken = NULL;
2954   LUID luid;
2955   if (hProcess != NULL &&
2956       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2957       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2958 
2959     TOKEN_PRIVILEGES tp;
2960     tp.PrivilegeCount = 1;
2961     tp.Privileges[0].Luid = luid;
2962     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2963 
2964     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2965     // privilege. Check GetLastError() too. See MSDN document.
2966     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2967         (GetLastError() == ERROR_SUCCESS)) {
2968       success = true;
2969     }
2970   }
2971 
2972   // Cleanup
2973   if (hProcess != NULL) {
2974     CloseHandle(hProcess);
2975   }
2976   if (hToken != NULL) {
2977     CloseHandle(hToken);
2978   }
2979 
2980   return success;
2981 }
2982 
2983 static bool numa_interleaving_init() {
2984   bool success = false;
2985 
2986   // print a warning if UseNUMAInterleaving flag is specified on command line
2987   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2988 
2989 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2990 
2991   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2992   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2993   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2994 
2995   if (!numa_node_list_holder.build()) {
2996     WARN("Process does not cover multiple NUMA nodes.");
2997     WARN("...Ignoring UseNUMAInterleaving flag.");
2998     return false;
2999   }
3000 
3001   if (log_is_enabled(Debug, os, cpu)) {
3002     Log(os, cpu) log;
3003     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
3004     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
3005       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
3006     }
3007   }
3008 
3009 #undef WARN
3010 
3011   return true;
3012 }
3013 
3014 // this routine is used whenever we need to reserve a contiguous VA range
3015 // but we need to make separate VirtualAlloc calls for each piece of the range
3016 // Reasons for doing this:
3017 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
3018 //  * UseNUMAInterleaving requires a separate node for each piece
3019 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
3020                                          DWORD prot,
3021                                          bool should_inject_error = false) {
3022   char * p_buf;
3023   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
3024   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
3025   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
3026 
3027   // first reserve enough address space in advance since we want to be
3028   // able to break a single contiguous virtual address range into multiple
3029   // large page commits but WS2003 does not allow reserving large page space
3030   // so we just use 4K pages for reserve, this gives us a legal contiguous
3031   // address space. then we will deallocate that reservation, and re alloc
3032   // using large pages
3033   const size_t size_of_reserve = bytes + chunk_size;
3034   if (bytes > size_of_reserve) {
3035     // Overflowed.
3036     return NULL;
3037   }
3038   p_buf = (char *) virtualAlloc(addr,
3039                                 size_of_reserve,  // size of Reserve
3040                                 MEM_RESERVE,
3041                                 PAGE_READWRITE);
3042   // If reservation failed, return NULL
3043   if (p_buf == NULL) return NULL;
3044   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
3045   os::release_memory(p_buf, bytes + chunk_size);
3046 
3047   // we still need to round up to a page boundary (in case we are using large pages)
3048   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
3049   // instead we handle this in the bytes_to_rq computation below
3050   p_buf = align_up(p_buf, page_size);
3051 
3052   // now go through and allocate one chunk at a time until all bytes are
3053   // allocated
3054   size_t  bytes_remaining = bytes;
3055   // An overflow of align_up() would have been caught above
3056   // in the calculation of size_of_reserve.
3057   char * next_alloc_addr = p_buf;
3058   HANDLE hProc = GetCurrentProcess();
3059 
3060 #ifdef ASSERT
3061   // Variable for the failure injection
3062   int ran_num = os::random();
3063   size_t fail_after = ran_num % bytes;
3064 #endif
3065 
3066   int count=0;
3067   while (bytes_remaining) {
3068     // select bytes_to_rq to get to the next chunk_size boundary
3069 
3070     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3071     // Note allocate and commit
3072     char * p_new;
3073 
3074 #ifdef ASSERT
3075     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3076 #else
3077     const bool inject_error_now = false;
3078 #endif
3079 
3080     if (inject_error_now) {
3081       p_new = NULL;
3082     } else {
3083       if (!UseNUMAInterleaving) {
3084         p_new = (char *) virtualAlloc(next_alloc_addr,
3085                                       bytes_to_rq,
3086                                       flags,
3087                                       prot);
3088       } else {
3089         // get the next node to use from the used_node_list
3090         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3091         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3092         p_new = (char *)virtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3093       }
3094     }
3095 
3096     if (p_new == NULL) {
3097       // Free any allocated pages
3098       if (next_alloc_addr > p_buf) {
3099         // Some memory was committed so release it.
3100         size_t bytes_to_release = bytes - bytes_remaining;
3101         // NMT has yet to record any individual blocks, so it
3102         // need to create a dummy 'reserve' record to match
3103         // the release.
3104         MemTracker::record_virtual_memory_reserve((address)p_buf,
3105                                                   bytes_to_release, CALLER_PC);
3106         os::release_memory(p_buf, bytes_to_release);
3107       }
3108 #ifdef ASSERT
3109       if (should_inject_error) {
3110         log_develop_debug(pagesize)("Reserving pages individually failed.");
3111       }
3112 #endif
3113       return NULL;
3114     }
3115 
3116     bytes_remaining -= bytes_to_rq;
3117     next_alloc_addr += bytes_to_rq;
3118     count++;
3119   }
3120   // Although the memory is allocated individually, it is returned as one.
3121   // NMT records it as one block.
3122   if ((flags & MEM_COMMIT) != 0) {
3123     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3124   } else {
3125     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3126   }
3127 
3128   // made it this far, success
3129   return p_buf;
3130 }
3131 
3132 static size_t large_page_init_decide_size() {
3133   // print a warning if any large page related flag is specified on command line
3134   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3135                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3136 
3137 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3138 
3139   if (!request_lock_memory_privilege()) {
3140     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3141     return 0;
3142   }
3143 
3144   size_t size = GetLargePageMinimum();
3145   if (size == 0) {
3146     WARN("Large page is not supported by the processor.");
3147     return 0;
3148   }
3149 
3150 #if defined(IA32) || defined(AMD64)
3151   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3152     WARN("JVM cannot use large pages bigger than 4mb.");
3153     return 0;
3154   }
3155 #endif
3156 
3157   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3158     size = LargePageSizeInBytes;
3159   }
3160 
3161 #undef WARN
3162 
3163   return size;
3164 }
3165 
3166 void os::large_page_init() {
3167   if (!UseLargePages) {
3168     return;
3169   }
3170 
3171   _large_page_size = large_page_init_decide_size();
3172   const size_t default_page_size = (size_t) vm_page_size();
3173   if (_large_page_size > default_page_size) {
3174     _page_sizes.add(_large_page_size);
3175   }
3176 
3177   UseLargePages = _large_page_size != 0;
3178 }
3179 
3180 int os::create_file_for_heap(const char* dir) {
3181 
3182   const char name_template[] = "/jvmheap.XXXXXX";
3183 
3184   size_t fullname_len = strlen(dir) + strlen(name_template);
3185   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3186   if (fullname == NULL) {
3187     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3188     return -1;
3189   }
3190   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3191   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3192 
3193   os::native_path(fullname);
3194 
3195   char *path = _mktemp(fullname);
3196   if (path == NULL) {
3197     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3198     os::free(fullname);
3199     return -1;
3200   }
3201 
3202   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3203 
3204   os::free(fullname);
3205   if (fd < 0) {
3206     warning("Problem opening file for heap (%s)", os::strerror(errno));
3207     return -1;
3208   }
3209   return fd;
3210 }
3211 
3212 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3213 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3214   assert(fd != -1, "File descriptor is not valid");
3215 
3216   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3217 #ifdef _LP64
3218   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3219     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3220 #else
3221   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3222     0, (DWORD)size, NULL);
3223 #endif
3224   if (fileMapping == NULL) {
3225     if (GetLastError() == ERROR_DISK_FULL) {
3226       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3227     }
3228     else {
3229       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3230     }
3231 
3232     return NULL;
3233   }
3234 
3235   LPVOID addr = mapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3236 
3237   CloseHandle(fileMapping);
3238 
3239   return (char*)addr;
3240 }
3241 
3242 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3243   assert(fd != -1, "File descriptor is not valid");
3244   assert(base != NULL, "Base address cannot be NULL");
3245 
3246   release_memory(base, size);
3247   return map_memory_to_file(base, size, fd);
3248 }
3249 
3250 // Multiple threads can race in this code but it's not possible to unmap small sections of
3251 // virtual space to get requested alignment, like posix-like os's.
3252 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3253 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3254   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3255          "Alignment must be a multiple of allocation granularity (page size)");
3256   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3257 
3258   size_t extra_size = size + alignment;
3259   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3260 
3261   char* aligned_base = NULL;
3262   static const int max_attempts = 20;
3263 
3264   for (int attempt = 0; attempt < max_attempts && aligned_base == NULL; attempt ++) {
3265     char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc) :
3266                                          os::reserve_memory(extra_size);
3267     if (extra_base == NULL) {
3268       return NULL;
3269     }
3270     // Do manual alignment
3271     aligned_base = align_up(extra_base, alignment);
3272 
3273     bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
3274                                   os::release_memory(extra_base, extra_size);
3275     assert(rc, "release failed");
3276     if (!rc) {
3277       return NULL;
3278     }
3279 
3280     // Attempt to map, into the just vacated space, the slightly smaller aligned area.
3281     // Which may fail, hence the loop.
3282     aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc) :
3283                                      os::attempt_reserve_memory_at(aligned_base, size);
3284   }
3285 
3286   assert(aligned_base != NULL, "Did not manage to re-map after %d attempts?", max_attempts);
3287 
3288   return aligned_base;
3289 }
3290 
3291 char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
3292   // exec can be ignored
3293   return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
3294 }
3295 
3296 char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd) {
3297   return map_or_reserve_memory_aligned(size, alignment, fd);
3298 }
3299 
3300 char* os::pd_reserve_memory(size_t bytes, bool exec) {
3301   return pd_attempt_reserve_memory_at(NULL /* addr */, bytes, exec);
3302 }
3303 
3304 // Reserve memory at an arbitrary address, only if that area is
3305 // available (and not reserved for something else).
3306 char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec) {
3307   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3308          "reserve alignment");
3309   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3310   char* res;
3311   // note that if UseLargePages is on, all the areas that require interleaving
3312   // will go thru reserve_memory_special rather than thru here.
3313   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3314   if (!use_individual) {
3315     res = (char*)virtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3316   } else {
3317     elapsedTimer reserveTimer;
3318     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3319     // in numa interleaving, we have to allocate pages individually
3320     // (well really chunks of NUMAInterleaveGranularity size)
3321     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3322     if (res == NULL) {
3323       warning("NUMA page allocation failed");
3324     }
3325     if (Verbose && PrintMiscellaneous) {
3326       reserveTimer.stop();
3327       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3328                     reserveTimer.milliseconds(), reserveTimer.ticks());
3329     }
3330   }
3331   assert(res == NULL || addr == NULL || addr == res,
3332          "Unexpected address from reserve.");
3333 
3334   return res;
3335 }
3336 
3337 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
3338   assert(file_desc >= 0, "file_desc is not valid");
3339   return map_memory_to_file(requested_addr, bytes, file_desc);
3340 }
3341 
3342 size_t os::large_page_size() {
3343   return _large_page_size;
3344 }
3345 
3346 bool os::can_commit_large_page_memory() {
3347   // Windows only uses large page memory when the entire region is reserved
3348   // and committed in a single VirtualAlloc() call. This may change in the
3349   // future, but with Windows 2003 it's not possible to commit on demand.
3350   return false;
3351 }
3352 
3353 bool os::can_execute_large_page_memory() {
3354   return true;
3355 }
3356 
3357 static char* reserve_large_pages_individually(size_t size, char* req_addr, bool exec) {
3358   log_debug(pagesize)("Reserving large pages individually.");
3359 
3360   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3361   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3362 
3363   char * p_buf = allocate_pages_individually(size, req_addr, flags, prot, LargePagesIndividualAllocationInjectError);
3364   if (p_buf == NULL) {
3365     // give an appropriate warning message
3366     if (UseNUMAInterleaving) {
3367       warning("NUMA large page allocation failed, UseLargePages flag ignored");
3368     }
3369     if (UseLargePagesIndividualAllocation) {
3370       warning("Individually allocated large pages failed, "
3371               "use -XX:-UseLargePagesIndividualAllocation to turn off");
3372     }
3373     return NULL;
3374   }
3375   return p_buf;
3376 }
3377 
3378 static char* reserve_large_pages_single_range(size_t size, char* req_addr, bool exec) {
3379   log_debug(pagesize)("Reserving large pages in a single large chunk.");
3380 
3381   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3382   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3383 
3384   return (char *) virtualAlloc(req_addr, size, flags, prot);
3385 }
3386 
3387 static char* reserve_large_pages(size_t size, char* req_addr, bool exec) {
3388   // with large pages, there are two cases where we need to use Individual Allocation
3389   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3390   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3391   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3392     return reserve_large_pages_individually(size, req_addr, exec);
3393   }
3394   return reserve_large_pages_single_range(size, req_addr, exec);
3395 }
3396 
3397 static char* find_aligned_address(size_t size, size_t alignment) {
3398   // Temporary reserve memory large enough to ensure we can get the requested
3399   // alignment and still fit the reservation.
3400   char* addr = (char*) virtualAlloc(NULL, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
3401   // Align the address to the requested alignment.
3402   char* aligned_addr = align_up(addr, alignment);
3403   // Free the temporary reservation.
3404   virtualFree(addr, 0, MEM_RELEASE);
3405 
3406   return aligned_addr;
3407 }
3408 
3409 static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exec) {
3410   log_debug(pagesize)("Reserving large pages at an aligned address, alignment=" SIZE_FORMAT "%s",
3411                       byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
3412 
3413   // Will try to find a suitable address at most 20 times. The reason we need to try
3414   // multiple times is that between finding the aligned address and trying to commit
3415   // the large pages another thread might have reserved an overlapping region.
3416   const int attempts_limit = 20;
3417   for (int attempts = 0; attempts < attempts_limit; attempts++)  {
3418     // Find aligned address.
3419     char* aligned_address = find_aligned_address(size, alignment);
3420 
3421     // Try to do the large page reservation using the aligned address.
3422     aligned_address = reserve_large_pages(size, aligned_address, exec);
3423     if (aligned_address != NULL) {
3424       // Reservation at the aligned address succeeded.
3425       guarantee(is_aligned(aligned_address, alignment), "Must be aligned");
3426       return aligned_address;
3427     }
3428   }
3429 
3430   log_debug(pagesize)("Failed reserving large pages at aligned address");
3431   return NULL;
3432 }
3433 
3434 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* addr,
3435                                     bool exec) {
3436   assert(UseLargePages, "only for large pages");
3437   assert(page_size == os::large_page_size(), "Currently only support one large page size on Windows");
3438   assert(is_aligned(addr, alignment), "Must be");
3439   assert(is_aligned(addr, page_size), "Must be");
3440 
3441   if (!is_aligned(bytes, page_size)) {
3442     // Fallback to small pages, Windows does not support mixed mappings.
3443     return NULL;
3444   }
3445 
3446   // The requested alignment can be larger than the page size, for example with G1
3447   // the alignment is bound to the heap region size. So this reservation needs to
3448   // ensure that the requested alignment is met. When there is a requested address
3449   // this solves it self, since it must be properly aligned already.
3450   if (addr == NULL && alignment > page_size) {
3451     return reserve_large_pages_aligned(bytes, alignment, exec);
3452   }
3453 
3454   // No additional requirements, just reserve the large pages.
3455   return reserve_large_pages(bytes, addr, exec);
3456 }
3457 
3458 bool os::pd_release_memory_special(char* base, size_t bytes) {
3459   assert(base != NULL, "Sanity check");
3460   return pd_release_memory(base, bytes);
3461 }
3462 
3463 void os::print_statistics() {
3464 }
3465 
3466 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3467   int err = os::get_last_error();
3468   char buf[256];
3469   size_t buf_len = os::lasterror(buf, sizeof(buf));
3470   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3471           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3472           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3473 }
3474 
3475 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3476   if (bytes == 0) {
3477     // Don't bother the OS with noops.
3478     return true;
3479   }
3480   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3481   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3482   // Don't attempt to print anything if the OS call fails. We're
3483   // probably low on resources, so the print itself may cause crashes.
3484 
3485   // unless we have NUMAInterleaving enabled, the range of a commit
3486   // is always within a reserve covered by a single VirtualAlloc
3487   // in that case we can just do a single commit for the requested size
3488   if (!UseNUMAInterleaving) {
3489     if (virtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3490       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3491       return false;
3492     }
3493     if (exec) {
3494       DWORD oldprot;
3495       // Windows doc says to use VirtualProtect to get execute permissions
3496       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3497         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3498         return false;
3499       }
3500     }
3501     return true;
3502   } else {
3503 
3504     // when NUMAInterleaving is enabled, the commit might cover a range that
3505     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3506     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3507     // returns represents the number of bytes that can be committed in one step.
3508     size_t bytes_remaining = bytes;
3509     char * next_alloc_addr = addr;
3510     while (bytes_remaining > 0) {
3511       MEMORY_BASIC_INFORMATION alloc_info;
3512       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3513       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3514       if (virtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3515                        PAGE_READWRITE) == NULL) {
3516         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3517                                             exec);)
3518         return false;
3519       }
3520       if (exec) {
3521         DWORD oldprot;
3522         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3523                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3524           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3525                                               exec);)
3526           return false;
3527         }
3528       }
3529       bytes_remaining -= bytes_to_rq;
3530       next_alloc_addr += bytes_to_rq;
3531     }
3532   }
3533   // if we made it this far, return true
3534   return true;
3535 }
3536 
3537 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3538                           bool exec) {
3539   // alignment_hint is ignored on this OS
3540   return pd_commit_memory(addr, size, exec);
3541 }
3542 
3543 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3544                                   const char* mesg) {
3545   assert(mesg != NULL, "mesg must be specified");
3546   if (!pd_commit_memory(addr, size, exec)) {
3547     warn_fail_commit_memory(addr, size, exec);
3548     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3549   }
3550 }
3551 
3552 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3553                                   size_t alignment_hint, bool exec,
3554                                   const char* mesg) {
3555   // alignment_hint is ignored on this OS
3556   pd_commit_memory_or_exit(addr, size, exec, mesg);
3557 }
3558 
3559 bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) {
3560   if (bytes == 0) {
3561     // Don't bother the OS with noops.
3562     return true;
3563   }
3564   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3565   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3566   return (virtualFree(addr, bytes, MEM_DECOMMIT) == TRUE);
3567 }
3568 
3569 bool os::pd_release_memory(char* addr, size_t bytes) {
3570   // Given a range we are to release, we require a mapping to start at the beginning of that range;
3571   //  if NUMA or LP we allow the range to contain multiple mappings, which have to cover the range
3572   //  completely; otherwise the range must match an OS mapping exactly.
3573   address start = (address)addr;
3574   address end = start + bytes;
3575   os::win32::mapping_info_t mi;
3576   const bool multiple_mappings_allowed = UseLargePagesIndividualAllocation || UseNUMAInterleaving;
3577   address p = start;
3578   bool first_mapping = true;
3579 
3580   do {
3581     // Find mapping and check it
3582     const char* err = NULL;
3583     if (!os::win32::find_mapping(p, &mi)) {
3584       err = "no mapping found";
3585     } else {
3586       if (first_mapping) {
3587         if (mi.base != start) {
3588           err = "base address mismatch";
3589         }
3590         if (multiple_mappings_allowed ? (mi.size > bytes) : (mi.size != bytes)) {
3591           err = "size mismatch";
3592         }
3593       } else {
3594         assert(p == mi.base && mi.size > 0, "Sanity");
3595         if (mi.base + mi.size > end) {
3596           err = "mapping overlaps end";
3597         }
3598         if (mi.size == 0) {
3599           err = "zero length mapping?"; // Should never happen; just to prevent endlessly looping in release.
3600         }
3601       }
3602     }
3603     // Handle mapping error. We assert in debug, unconditionally print a warning in release.
3604     if (err != NULL) {
3605       log_warning(os)("bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3606 #ifdef ASSERT
3607       os::print_memory_mappings((char*)start, bytes, tty);
3608       assert(false, "bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3609 #endif
3610       return false;
3611     }
3612     // Free this range
3613     if (virtualFree(p, 0, MEM_RELEASE) == FALSE) {
3614       return false;
3615     }
3616     first_mapping = false;
3617     p = mi.base + mi.size;
3618   } while (p < end);
3619 
3620   return true;
3621 }
3622 
3623 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3624   return os::commit_memory(addr, size, !ExecMem);
3625 }
3626 
3627 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3628   return os::uncommit_memory(addr, size);
3629 }
3630 
3631 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3632   uint count = 0;
3633   bool ret = false;
3634   size_t bytes_remaining = bytes;
3635   char * next_protect_addr = addr;
3636 
3637   // Use VirtualQuery() to get the chunk size.
3638   while (bytes_remaining) {
3639     MEMORY_BASIC_INFORMATION alloc_info;
3640     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3641       return false;
3642     }
3643 
3644     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3645     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3646     // but we don't distinguish here as both cases are protected by same API.
3647     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3648     warning("Failed protecting pages individually for chunk #%u", count);
3649     if (!ret) {
3650       return false;
3651     }
3652 
3653     bytes_remaining -= bytes_to_protect;
3654     next_protect_addr += bytes_to_protect;
3655     count++;
3656   }
3657   return ret;
3658 }
3659 
3660 // Set protections specified
3661 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3662                         bool is_committed) {
3663   unsigned int p = 0;
3664   switch (prot) {
3665   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3666   case MEM_PROT_READ: p = PAGE_READONLY; break;
3667   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3668   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3669   default:
3670     ShouldNotReachHere();
3671   }
3672 
3673   DWORD old_status;
3674 
3675   // Strange enough, but on Win32 one can change protection only for committed
3676   // memory, not a big deal anyway, as bytes less or equal than 64K
3677   if (!is_committed) {
3678     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3679                           "cannot commit protection page");
3680   }
3681   // One cannot use os::guard_memory() here, as on Win32 guard page
3682   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3683   //
3684   // Pages in the region become guard pages. Any attempt to access a guard page
3685   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3686   // the guard page status. Guard pages thus act as a one-time access alarm.
3687   bool ret;
3688   if (UseNUMAInterleaving) {
3689     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3690     // so we must protect the chunks individually.
3691     ret = protect_pages_individually(addr, bytes, p, &old_status);
3692   } else {
3693     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3694   }
3695 #ifdef ASSERT
3696   if (!ret) {
3697     int err = os::get_last_error();
3698     char buf[256];
3699     size_t buf_len = os::lasterror(buf, sizeof(buf));
3700     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3701           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3702           buf_len != 0 ? buf : "<no_error_string>", err);
3703   }
3704 #endif
3705   return ret;
3706 }
3707 
3708 bool os::guard_memory(char* addr, size_t bytes) {
3709   DWORD old_status;
3710   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3711 }
3712 
3713 bool os::unguard_memory(char* addr, size_t bytes) {
3714   DWORD old_status;
3715   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3716 }
3717 
3718 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3719 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3720 void os::numa_make_global(char *addr, size_t bytes)    { }
3721 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3722 bool os::numa_topology_changed()                       { return false; }
3723 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3724 int os::numa_get_group_id()                            { return 0; }
3725 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3726   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3727     // Provide an answer for UMA systems
3728     ids[0] = 0;
3729     return 1;
3730   } else {
3731     // check for size bigger than actual groups_num
3732     size = MIN2(size, numa_get_groups_num());
3733     for (int i = 0; i < (int)size; i++) {
3734       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3735     }
3736     return size;
3737   }
3738 }
3739 
3740 int os::numa_get_group_id_for_address(const void* address) {
3741   return 0;
3742 }
3743 
3744 bool os::get_page_info(char *start, page_info* info) {
3745   return false;
3746 }
3747 
3748 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3749                      page_info* page_found) {
3750   return end;
3751 }
3752 
3753 char* os::non_memory_address_word() {
3754   // Must never look like an address returned by reserve_memory,
3755   // even in its subfields (as defined by the CPU immediate fields,
3756   // if the CPU splits constants across multiple instructions).
3757 #ifdef _M_ARM64
3758   // AArch64 has a maximum addressable space of 48-bits
3759   return (char*)((1ull << 48) - 1);
3760 #else
3761   return (char*)-1;
3762 #endif
3763 }
3764 
3765 #define MAX_ERROR_COUNT 100
3766 #define SYS_THREAD_ERROR 0xffffffffUL
3767 
3768 void os::pd_start_thread(Thread* thread) {
3769   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3770   // Returns previous suspend state:
3771   // 0:  Thread was not suspended
3772   // 1:  Thread is running now
3773   // >1: Thread is still suspended.
3774   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3775 }
3776 
3777 
3778 // Short sleep, direct OS call.
3779 //
3780 // ms = 0, means allow others (if any) to run.
3781 //
3782 void os::naked_short_sleep(jlong ms) {
3783   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3784   Sleep(ms);
3785 }
3786 
3787 // Windows does not provide sleep functionality with nanosecond resolution, so we
3788 // try to approximate this with spinning combined with yielding if another thread
3789 // is ready to run on the current processor.
3790 void os::naked_short_nanosleep(jlong ns) {
3791   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3792 
3793   int64_t start = os::javaTimeNanos();
3794   do {
3795     if (SwitchToThread() == 0) {
3796       // Nothing else is ready to run on this cpu, spin a little
3797       SpinPause();
3798     }
3799   } while (os::javaTimeNanos() - start < ns);
3800 }
3801 
3802 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3803 void os::infinite_sleep() {
3804   while (true) {    // sleep forever ...
3805     Sleep(100000);  // ... 100 seconds at a time
3806   }
3807 }
3808 
3809 typedef BOOL (WINAPI * STTSignature)(void);
3810 
3811 void os::naked_yield() {
3812   // Consider passing back the return value from SwitchToThread().
3813   SwitchToThread();
3814 }
3815 
3816 // Win32 only gives you access to seven real priorities at a time,
3817 // so we compress Java's ten down to seven.  It would be better
3818 // if we dynamically adjusted relative priorities.
3819 
3820 int os::java_to_os_priority[CriticalPriority + 1] = {
3821   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3822   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3823   THREAD_PRIORITY_LOWEST,                       // 2
3824   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3825   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3826   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3827   THREAD_PRIORITY_NORMAL,                       // 6
3828   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3829   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3830   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3831   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3832   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3833 };
3834 
3835 int prio_policy1[CriticalPriority + 1] = {
3836   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3837   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3838   THREAD_PRIORITY_LOWEST,                       // 2
3839   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3840   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3841   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3842   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3843   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3844   THREAD_PRIORITY_HIGHEST,                      // 8
3845   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3846   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3847   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3848 };
3849 
3850 static int prio_init() {
3851   // If ThreadPriorityPolicy is 1, switch tables
3852   if (ThreadPriorityPolicy == 1) {
3853     int i;
3854     for (i = 0; i < CriticalPriority + 1; i++) {
3855       os::java_to_os_priority[i] = prio_policy1[i];
3856     }
3857   }
3858   if (UseCriticalJavaThreadPriority) {
3859     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3860   }
3861   return 0;
3862 }
3863 
3864 OSReturn os::set_native_priority(Thread* thread, int priority) {
3865   if (!UseThreadPriorities) return OS_OK;
3866   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3867   return ret ? OS_OK : OS_ERR;
3868 }
3869 
3870 OSReturn os::get_native_priority(const Thread* const thread,
3871                                  int* priority_ptr) {
3872   if (!UseThreadPriorities) {
3873     *priority_ptr = java_to_os_priority[NormPriority];
3874     return OS_OK;
3875   }
3876   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3877   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3878     assert(false, "GetThreadPriority failed");
3879     return OS_ERR;
3880   }
3881   *priority_ptr = os_prio;
3882   return OS_OK;
3883 }
3884 
3885 // GetCurrentThreadId() returns DWORD
3886 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3887 
3888 static int _initial_pid = 0;
3889 
3890 int os::current_process_id() {
3891   return (_initial_pid ? _initial_pid : _getpid());
3892 }
3893 
3894 int    os::win32::_vm_page_size              = 0;
3895 int    os::win32::_vm_allocation_granularity = 0;
3896 int    os::win32::_processor_type            = 0;
3897 // Processor level is not available on non-NT systems, use vm_version instead
3898 int    os::win32::_processor_level           = 0;
3899 julong os::win32::_physical_memory           = 0;
3900 size_t os::win32::_default_stack_size        = 0;
3901 
3902 intx          os::win32::_os_thread_limit    = 0;
3903 volatile intx os::win32::_os_thread_count    = 0;
3904 
3905 bool   os::win32::_is_windows_server         = false;
3906 
3907 // 6573254
3908 // Currently, the bug is observed across all the supported Windows releases,
3909 // including the latest one (as of this writing - Windows Server 2012 R2)
3910 bool   os::win32::_has_exit_bug              = true;
3911 
3912 void os::win32::initialize_system_info() {
3913   SYSTEM_INFO si;
3914   GetSystemInfo(&si);
3915   _vm_page_size    = si.dwPageSize;
3916   _vm_allocation_granularity = si.dwAllocationGranularity;
3917   _processor_type  = si.dwProcessorType;
3918   _processor_level = si.wProcessorLevel;
3919   set_processor_count(si.dwNumberOfProcessors);
3920 
3921   MEMORYSTATUSEX ms;
3922   ms.dwLength = sizeof(ms);
3923 
3924   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3925   // dwMemoryLoad (% of memory in use)
3926   GlobalMemoryStatusEx(&ms);
3927   _physical_memory = ms.ullTotalPhys;
3928 
3929   if (FLAG_IS_DEFAULT(MaxRAM)) {
3930     // Adjust MaxRAM according to the maximum virtual address space available.
3931     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3932   }
3933 
3934   OSVERSIONINFOEX oi;
3935   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3936   GetVersionEx((OSVERSIONINFO*)&oi);
3937   switch (oi.dwPlatformId) {
3938   case VER_PLATFORM_WIN32_NT:
3939     {
3940       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3941       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3942           oi.wProductType == VER_NT_SERVER) {
3943         _is_windows_server = true;
3944       }
3945     }
3946     break;
3947   default: fatal("Unknown platform");
3948   }
3949 
3950   _default_stack_size = os::current_stack_size();
3951   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3952   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3953          "stack size not a multiple of page size");
3954 
3955   initialize_performance_counter();
3956 }
3957 
3958 
3959 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3960                                       int ebuflen) {
3961   char path[MAX_PATH];
3962   DWORD size;
3963   DWORD pathLen = (DWORD)sizeof(path);
3964   HINSTANCE result = NULL;
3965 
3966   // only allow library name without path component
3967   assert(strchr(name, '\\') == NULL, "path not allowed");
3968   assert(strchr(name, ':') == NULL, "path not allowed");
3969   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3970     jio_snprintf(ebuf, ebuflen,
3971                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3972     return NULL;
3973   }
3974 
3975   // search system directory
3976   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3977     if (size >= pathLen) {
3978       return NULL; // truncated
3979     }
3980     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3981       return NULL; // truncated
3982     }
3983     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3984       return result;
3985     }
3986   }
3987 
3988   // try Windows directory
3989   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3990     if (size >= pathLen) {
3991       return NULL; // truncated
3992     }
3993     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3994       return NULL; // truncated
3995     }
3996     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3997       return result;
3998     }
3999   }
4000 
4001   jio_snprintf(ebuf, ebuflen,
4002                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
4003   return NULL;
4004 }
4005 
4006 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
4007 #define EXIT_TIMEOUT 300000 /* 5 minutes */
4008 
4009 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
4010   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
4011   return TRUE;
4012 }
4013 
4014 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
4015   // Basic approach:
4016   //  - Each exiting thread registers its intent to exit and then does so.
4017   //  - A thread trying to terminate the process must wait for all
4018   //    threads currently exiting to complete their exit.
4019 
4020   if (os::win32::has_exit_bug()) {
4021     // The array holds handles of the threads that have started exiting by calling
4022     // _endthreadex().
4023     // Should be large enough to avoid blocking the exiting thread due to lack of
4024     // a free slot.
4025     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
4026     static int handle_count = 0;
4027 
4028     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
4029     static CRITICAL_SECTION crit_sect;
4030     static volatile DWORD process_exiting = 0;
4031     int i, j;
4032     DWORD res;
4033     HANDLE hproc, hthr;
4034 
4035     // We only attempt to register threads until a process exiting
4036     // thread manages to set the process_exiting flag. Any threads
4037     // that come through here after the process_exiting flag is set
4038     // are unregistered and will be caught in the SuspendThread()
4039     // infinite loop below.
4040     bool registered = false;
4041 
4042     // The first thread that reached this point, initializes the critical section.
4043     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
4044       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
4045     } else if (Atomic::load_acquire(&process_exiting) == 0) {
4046       if (what != EPT_THREAD) {
4047         // Atomically set process_exiting before the critical section
4048         // to increase the visibility between racing threads.
4049         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
4050       }
4051       EnterCriticalSection(&crit_sect);
4052 
4053       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
4054         // Remove from the array those handles of the threads that have completed exiting.
4055         for (i = 0, j = 0; i < handle_count; ++i) {
4056           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
4057           if (res == WAIT_TIMEOUT) {
4058             handles[j++] = handles[i];
4059           } else {
4060             if (res == WAIT_FAILED) {
4061               warning("WaitForSingleObject failed (%u) in %s: %d\n",
4062                       GetLastError(), __FILE__, __LINE__);
4063             }
4064             // Don't keep the handle, if we failed waiting for it.
4065             CloseHandle(handles[i]);
4066           }
4067         }
4068 
4069         // If there's no free slot in the array of the kept handles, we'll have to
4070         // wait until at least one thread completes exiting.
4071         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
4072           // Raise the priority of the oldest exiting thread to increase its chances
4073           // to complete sooner.
4074           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
4075           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
4076           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
4077             i = (res - WAIT_OBJECT_0);
4078             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4079             for (; i < handle_count; ++i) {
4080               handles[i] = handles[i + 1];
4081             }
4082           } else {
4083             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4084                     (res == WAIT_FAILED ? "failed" : "timed out"),
4085                     GetLastError(), __FILE__, __LINE__);
4086             // Don't keep handles, if we failed waiting for them.
4087             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4088               CloseHandle(handles[i]);
4089             }
4090             handle_count = 0;
4091           }
4092         }
4093 
4094         // Store a duplicate of the current thread handle in the array of handles.
4095         hproc = GetCurrentProcess();
4096         hthr = GetCurrentThread();
4097         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4098                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
4099           warning("DuplicateHandle failed (%u) in %s: %d\n",
4100                   GetLastError(), __FILE__, __LINE__);
4101 
4102           // We can't register this thread (no more handles) so this thread
4103           // may be racing with a thread that is calling exit(). If the thread
4104           // that is calling exit() has managed to set the process_exiting
4105           // flag, then this thread will be caught in the SuspendThread()
4106           // infinite loop below which closes that race. A small timing
4107           // window remains before the process_exiting flag is set, but it
4108           // is only exposed when we are out of handles.
4109         } else {
4110           ++handle_count;
4111           registered = true;
4112 
4113           // The current exiting thread has stored its handle in the array, and now
4114           // should leave the critical section before calling _endthreadex().
4115         }
4116 
4117       } else if (what != EPT_THREAD && handle_count > 0) {
4118         jlong start_time, finish_time, timeout_left;
4119         // Before ending the process, make sure all the threads that had called
4120         // _endthreadex() completed.
4121 
4122         // Set the priority level of the current thread to the same value as
4123         // the priority level of exiting threads.
4124         // This is to ensure it will be given a fair chance to execute if
4125         // the timeout expires.
4126         hthr = GetCurrentThread();
4127         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4128         start_time = os::javaTimeNanos();
4129         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4130         for (i = 0; ; ) {
4131           int portion_count = handle_count - i;
4132           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4133             portion_count = MAXIMUM_WAIT_OBJECTS;
4134           }
4135           for (j = 0; j < portion_count; ++j) {
4136             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4137           }
4138           timeout_left = (finish_time - start_time) / 1000000L;
4139           if (timeout_left < 0) {
4140             timeout_left = 0;
4141           }
4142           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4143           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4144             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4145                     (res == WAIT_FAILED ? "failed" : "timed out"),
4146                     GetLastError(), __FILE__, __LINE__);
4147             // Reset portion_count so we close the remaining
4148             // handles due to this error.
4149             portion_count = handle_count - i;
4150           }
4151           for (j = 0; j < portion_count; ++j) {
4152             CloseHandle(handles[i + j]);
4153           }
4154           if ((i += portion_count) >= handle_count) {
4155             break;
4156           }
4157           start_time = os::javaTimeNanos();
4158         }
4159         handle_count = 0;
4160       }
4161 
4162       LeaveCriticalSection(&crit_sect);
4163     }
4164 
4165     if (!registered &&
4166         Atomic::load_acquire(&process_exiting) != 0 &&
4167         process_exiting != GetCurrentThreadId()) {
4168       // Some other thread is about to call exit(), so we don't let
4169       // the current unregistered thread proceed to exit() or _endthreadex()
4170       while (true) {
4171         SuspendThread(GetCurrentThread());
4172         // Avoid busy-wait loop, if SuspendThread() failed.
4173         Sleep(EXIT_TIMEOUT);
4174       }
4175     }
4176   }
4177 
4178   // We are here if either
4179   // - there's no 'race at exit' bug on this OS release;
4180   // - initialization of the critical section failed (unlikely);
4181   // - the current thread has registered itself and left the critical section;
4182   // - the process-exiting thread has raised the flag and left the critical section.
4183   if (what == EPT_THREAD) {
4184     _endthreadex((unsigned)exit_code);
4185   } else if (what == EPT_PROCESS) {
4186     ::exit(exit_code);
4187   } else { // EPT_PROCESS_DIE
4188     ::_exit(exit_code);
4189   }
4190 
4191   // Should not reach here
4192   return exit_code;
4193 }
4194 
4195 #undef EXIT_TIMEOUT
4196 
4197 void os::win32::setmode_streams() {
4198   _setmode(_fileno(stdin), _O_BINARY);
4199   _setmode(_fileno(stdout), _O_BINARY);
4200   _setmode(_fileno(stderr), _O_BINARY);
4201 }
4202 
4203 void os::wait_for_keypress_at_exit(void) {
4204   if (PauseAtExit) {
4205     fprintf(stderr, "Press any key to continue...\n");
4206     fgetc(stdin);
4207   }
4208 }
4209 
4210 
4211 bool os::message_box(const char* title, const char* message) {
4212   int result = MessageBox(NULL, message, title,
4213                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4214   return result == IDYES;
4215 }
4216 
4217 #ifndef PRODUCT
4218 #ifndef _WIN64
4219 // Helpers to check whether NX protection is enabled
4220 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4221   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4222       pex->ExceptionRecord->NumberParameters > 0 &&
4223       pex->ExceptionRecord->ExceptionInformation[0] ==
4224       EXCEPTION_INFO_EXEC_VIOLATION) {
4225     return EXCEPTION_EXECUTE_HANDLER;
4226   }
4227   return EXCEPTION_CONTINUE_SEARCH;
4228 }
4229 
4230 void nx_check_protection() {
4231   // If NX is enabled we'll get an exception calling into code on the stack
4232   char code[] = { (char)0xC3 }; // ret
4233   void *code_ptr = (void *)code;
4234   __try {
4235     __asm call code_ptr
4236   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4237     tty->print_raw_cr("NX protection detected.");
4238   }
4239 }
4240 #endif // _WIN64
4241 #endif // PRODUCT
4242 
4243 // This is called _before_ the global arguments have been parsed
4244 void os::init(void) {
4245   _initial_pid = _getpid();
4246 
4247   win32::initialize_system_info();
4248   win32::setmode_streams();
4249   _page_sizes.add(win32::vm_page_size());
4250 
4251   // This may be overridden later when argument processing is done.
4252   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4253 
4254   // Initialize main_process and main_thread
4255   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4256   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4257                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4258     fatal("DuplicateHandle failed\n");
4259   }
4260   main_thread_id = (int) GetCurrentThreadId();
4261 
4262   // initialize fast thread access - only used for 32-bit
4263   win32::initialize_thread_ptr_offset();
4264 }
4265 
4266 // To install functions for atexit processing
4267 extern "C" {
4268   static void perfMemory_exit_helper() {
4269     perfMemory_exit();
4270   }
4271 }
4272 
4273 static jint initSock();
4274 
4275 
4276 // this is called _after_ the global arguments have been parsed
4277 jint os::init_2(void) {
4278 
4279   // This could be set any time but all platforms
4280   // have to set it the same so we have to mirror Solaris.
4281   DEBUG_ONLY(os::set_mutex_init_done();)
4282 
4283   // Setup Windows Exceptions
4284 
4285 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
4286   topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
4287   previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
4288 #endif
4289 
4290   // for debugging float code generation bugs
4291 #if defined(ASSERT) && !defined(_WIN64)
4292   static long fp_control_word = 0;
4293   __asm { fstcw fp_control_word }
4294   // see Intel PPro Manual, Vol. 2, p 7-16
4295   const long invalid   = 0x01;
4296   fp_control_word |= invalid;
4297   __asm { fldcw fp_control_word }
4298 #endif
4299 
4300   // If stack_commit_size is 0, windows will reserve the default size,
4301   // but only commit a small portion of it.
4302   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4303   size_t default_reserve_size = os::win32::default_stack_size();
4304   size_t actual_reserve_size = stack_commit_size;
4305   if (stack_commit_size < default_reserve_size) {
4306     // If stack_commit_size == 0, we want this too
4307     actual_reserve_size = default_reserve_size;
4308   }
4309 
4310   // Check minimum allowable stack size for thread creation and to initialize
4311   // the java system classes, including StackOverflowError - depends on page
4312   // size.  Add two 4K pages for compiler2 recursion in main thread.
4313   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4314   // class initialization depending on 32 or 64 bit VM.
4315   size_t min_stack_allowed =
4316             (size_t)(StackOverflow::stack_guard_zone_size() +
4317                      StackOverflow::stack_shadow_zone_size() +
4318                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4319 
4320   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4321 
4322   if (actual_reserve_size < min_stack_allowed) {
4323     tty->print_cr("\nThe Java thread stack size specified is too small. "
4324                   "Specify at least %dk",
4325                   min_stack_allowed / K);
4326     return JNI_ERR;
4327   }
4328 
4329   JavaThread::set_stack_size_at_create(stack_commit_size);
4330 
4331   // Calculate theoretical max. size of Threads to guard gainst artifical
4332   // out-of-memory situations, where all available address-space has been
4333   // reserved by thread stacks.
4334   assert(actual_reserve_size != 0, "Must have a stack");
4335 
4336   // Calculate the thread limit when we should start doing Virtual Memory
4337   // banging. Currently when the threads will have used all but 200Mb of space.
4338   //
4339   // TODO: consider performing a similar calculation for commit size instead
4340   // as reserve size, since on a 64-bit platform we'll run into that more
4341   // often than running out of virtual memory space.  We can use the
4342   // lower value of the two calculations as the os_thread_limit.
4343   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4344   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4345 
4346   // at exit methods are called in the reverse order of their registration.
4347   // there is no limit to the number of functions registered. atexit does
4348   // not set errno.
4349 
4350   if (PerfAllowAtExitRegistration) {
4351     // only register atexit functions if PerfAllowAtExitRegistration is set.
4352     // atexit functions can be delayed until process exit time, which
4353     // can be problematic for embedded VM situations. Embedded VMs should
4354     // call DestroyJavaVM() to assure that VM resources are released.
4355 
4356     // note: perfMemory_exit_helper atexit function may be removed in
4357     // the future if the appropriate cleanup code can be added to the
4358     // VM_Exit VMOperation's doit method.
4359     if (atexit(perfMemory_exit_helper) != 0) {
4360       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4361     }
4362   }
4363 
4364 #ifndef _WIN64
4365   // Print something if NX is enabled (win32 on AMD64)
4366   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4367 #endif
4368 
4369   // initialize thread priority policy
4370   prio_init();
4371 
4372   UseNUMA = false; // We don't fully support this yet
4373 
4374   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4375     if (!numa_interleaving_init()) {
4376       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4377     } else if (!UseNUMAInterleaving) {
4378       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4379       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4380     }
4381   }
4382 
4383   if (initSock() != JNI_OK) {
4384     return JNI_ERR;
4385   }
4386 
4387   SymbolEngine::recalc_search_path();
4388 
4389   // Initialize data for jdk.internal.misc.Signal
4390   if (!ReduceSignalUsage) {
4391     jdk_misc_signal_init();
4392   }
4393 
4394   // Lookup SetThreadDescription - the docs state we must use runtime-linking of
4395   // kernelbase.dll, so that is what we do.
4396   HINSTANCE _kernelbase = LoadLibrary(TEXT("kernelbase.dll"));
4397   if (_kernelbase != NULL) {
4398     _SetThreadDescription =
4399       reinterpret_cast<SetThreadDescriptionFnPtr>(
4400                                                   GetProcAddress(_kernelbase,
4401                                                                  "SetThreadDescription"));
4402 #ifdef ASSERT
4403     _GetThreadDescription =
4404       reinterpret_cast<GetThreadDescriptionFnPtr>(
4405                                                   GetProcAddress(_kernelbase,
4406                                                                  "GetThreadDescription"));
4407 #endif
4408   }
4409   log_info(os, thread)("The SetThreadDescription API is%s available.", _SetThreadDescription == NULL ? " not" : "");
4410 
4411 
4412   return JNI_OK;
4413 }
4414 
4415 // combine the high and low DWORD into a ULONGLONG
4416 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4417   ULONGLONG value = high_word;
4418   value <<= sizeof(high_word) * 8;
4419   value |= low_word;
4420   return value;
4421 }
4422 
4423 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4424 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4425   ::memset((void*)sbuf, 0, sizeof(struct stat));
4426   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4427   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4428                                   file_data.ftLastWriteTime.dwLowDateTime);
4429   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4430                                   file_data.ftCreationTime.dwLowDateTime);
4431   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4432                                   file_data.ftLastAccessTime.dwLowDateTime);
4433   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4434     sbuf->st_mode |= S_IFDIR;
4435   } else {
4436     sbuf->st_mode |= S_IFREG;
4437   }
4438 }
4439 
4440 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4441   // Get required buffer size to convert to Unicode
4442   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4443                                              MB_ERR_INVALID_CHARS,
4444                                              char_path, -1,
4445                                              NULL, 0);
4446   if (unicode_path_len == 0) {
4447     return EINVAL;
4448   }
4449 
4450   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4451 
4452   int result = MultiByteToWideChar(CP_ACP,
4453                                    MB_ERR_INVALID_CHARS,
4454                                    char_path, -1,
4455                                    *unicode_path, unicode_path_len);
4456   assert(result == unicode_path_len, "length already checked above");
4457 
4458   return ERROR_SUCCESS;
4459 }
4460 
4461 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4462   // Get required buffer size to convert to full path. The return
4463   // value INCLUDES the terminating null character.
4464   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4465   if (full_path_len == 0) {
4466     return EINVAL;
4467   }
4468 
4469   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4470 
4471   // When the buffer has sufficient size, the return value EXCLUDES the
4472   // terminating null character
4473   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4474   assert(result <= full_path_len, "length already checked above");
4475 
4476   return ERROR_SUCCESS;
4477 }
4478 
4479 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4480   *prefix_off = 0;
4481   *needs_fullpath = true;
4482 
4483   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4484     *prefix = L"\\\\?\\";
4485   } else if (buf[0] == '\\' && buf[1] == '\\') {
4486     if (buf[2] == '?' && buf[3] == '\\') {
4487       *prefix = L"";
4488       *needs_fullpath = false;
4489     } else {
4490       *prefix = L"\\\\?\\UNC";
4491       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4492     }
4493   } else {
4494     *prefix = L"\\\\?\\";
4495   }
4496 }
4497 
4498 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4499 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4500 // additional_space is the size of space, in wchar_t, the function will additionally add to
4501 // the allocation of return buffer (such that the size of the returned buffer is at least
4502 // wcslen(buf) + 1 + additional_space).
4503 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4504   if ((path == NULL) || (path[0] == '\0')) {
4505     err = ENOENT;
4506     return NULL;
4507   }
4508 
4509   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4510   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4511   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4512   strncpy(buf, path, buf_len);
4513   os::native_path(buf);
4514 
4515   LPWSTR prefix = NULL;
4516   int prefix_off = 0;
4517   bool needs_fullpath = true;
4518   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4519 
4520   LPWSTR unicode_path = NULL;
4521   err = convert_to_unicode(buf, &unicode_path);
4522   FREE_C_HEAP_ARRAY(char, buf);
4523   if (err != ERROR_SUCCESS) {
4524     return NULL;
4525   }
4526 
4527   LPWSTR converted_path = NULL;
4528   if (needs_fullpath) {
4529     err = get_full_path(unicode_path, &converted_path);
4530   } else {
4531     converted_path = unicode_path;
4532   }
4533 
4534   LPWSTR result = NULL;
4535   if (converted_path != NULL) {
4536     size_t prefix_len = wcslen(prefix);
4537     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4538     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4539     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4540 
4541     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4542     result_len = wcslen(result);
4543     if ((result[result_len - 1] == L'\\') &&
4544         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4545       result[result_len - 1] = L'\0';
4546     }
4547   }
4548 
4549   if (converted_path != unicode_path) {
4550     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4551   }
4552   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4553 
4554   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4555 }
4556 
4557 int os::stat(const char *path, struct stat *sbuf) {
4558   errno_t err;
4559   wchar_t* wide_path = wide_abs_unc_path(path, err);
4560 
4561   if (wide_path == NULL) {
4562     errno = err;
4563     return -1;
4564   }
4565 
4566   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4567   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4568   os::free(wide_path);
4569 
4570   if (!bret) {
4571     errno = ::GetLastError();
4572     return -1;
4573   }
4574 
4575   file_attribute_data_to_stat(sbuf, file_data);
4576   return 0;
4577 }
4578 
4579 static HANDLE create_read_only_file_handle(const char* file) {
4580   errno_t err;
4581   wchar_t* wide_path = wide_abs_unc_path(file, err);
4582 
4583   if (wide_path == NULL) {
4584     errno = err;
4585     return INVALID_HANDLE_VALUE;
4586   }
4587 
4588   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4589                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4590   os::free(wide_path);
4591 
4592   return handle;
4593 }
4594 
4595 bool os::same_files(const char* file1, const char* file2) {
4596 
4597   if (file1 == NULL && file2 == NULL) {
4598     return true;
4599   }
4600 
4601   if (file1 == NULL || file2 == NULL) {
4602     return false;
4603   }
4604 
4605   if (strcmp(file1, file2) == 0) {
4606     return true;
4607   }
4608 
4609   char* native_file1 = os::strdup_check_oom(file1);
4610   native_file1 = os::native_path(native_file1);
4611   char* native_file2 = os::strdup_check_oom(file2);
4612   native_file2 = os::native_path(native_file2);
4613   if (strcmp(native_file1, native_file2) == 0) {
4614     os::free(native_file1);
4615     os::free(native_file2);
4616     return true;
4617   }
4618 
4619   HANDLE handle1 = create_read_only_file_handle(native_file1);
4620   HANDLE handle2 = create_read_only_file_handle(native_file2);
4621   bool result = false;
4622 
4623   // if we could open both paths...
4624   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4625     BY_HANDLE_FILE_INFORMATION fileInfo1;
4626     BY_HANDLE_FILE_INFORMATION fileInfo2;
4627     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4628       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4629       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4630       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4631         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4632         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4633         result = true;
4634       }
4635     }
4636   }
4637 
4638   //free the handles
4639   if (handle1 != INVALID_HANDLE_VALUE) {
4640     ::CloseHandle(handle1);
4641   }
4642 
4643   if (handle2 != INVALID_HANDLE_VALUE) {
4644     ::CloseHandle(handle2);
4645   }
4646 
4647   os::free(native_file1);
4648   os::free(native_file2);
4649 
4650   return result;
4651 }
4652 
4653 #define FT2INT64(ft) \
4654   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4655 
4656 
4657 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4658 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4659 // of a thread.
4660 //
4661 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4662 // the fast estimate available on the platform.
4663 
4664 // current_thread_cpu_time() is not optimized for Windows yet
4665 jlong os::current_thread_cpu_time() {
4666   // return user + sys since the cost is the same
4667   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4668 }
4669 
4670 jlong os::thread_cpu_time(Thread* thread) {
4671   // consistent with what current_thread_cpu_time() returns.
4672   return os::thread_cpu_time(thread, true /* user+sys */);
4673 }
4674 
4675 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4676   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4677 }
4678 
4679 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4680   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4681   // If this function changes, os::is_thread_cpu_time_supported() should too
4682   FILETIME CreationTime;
4683   FILETIME ExitTime;
4684   FILETIME KernelTime;
4685   FILETIME UserTime;
4686 
4687   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4688                       &ExitTime, &KernelTime, &UserTime) == 0) {
4689     return -1;
4690   } else if (user_sys_cpu_time) {
4691     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4692   } else {
4693     return FT2INT64(UserTime) * 100;
4694   }
4695 }
4696 
4697 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4698   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4699   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4700   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4701   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4702 }
4703 
4704 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4705   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4706   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4707   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4708   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4709 }
4710 
4711 bool os::is_thread_cpu_time_supported() {
4712   // see os::thread_cpu_time
4713   FILETIME CreationTime;
4714   FILETIME ExitTime;
4715   FILETIME KernelTime;
4716   FILETIME UserTime;
4717 
4718   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4719                       &KernelTime, &UserTime) == 0) {
4720     return false;
4721   } else {
4722     return true;
4723   }
4724 }
4725 
4726 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4727 // It does have primitives (PDH API) to get CPU usage and run queue length.
4728 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4729 // If we wanted to implement loadavg on Windows, we have a few options:
4730 //
4731 // a) Query CPU usage and run queue length and "fake" an answer by
4732 //    returning the CPU usage if it's under 100%, and the run queue
4733 //    length otherwise.  It turns out that querying is pretty slow
4734 //    on Windows, on the order of 200 microseconds on a fast machine.
4735 //    Note that on the Windows the CPU usage value is the % usage
4736 //    since the last time the API was called (and the first call
4737 //    returns 100%), so we'd have to deal with that as well.
4738 //
4739 // b) Sample the "fake" answer using a sampling thread and store
4740 //    the answer in a global variable.  The call to loadavg would
4741 //    just return the value of the global, avoiding the slow query.
4742 //
4743 // c) Sample a better answer using exponential decay to smooth the
4744 //    value.  This is basically the algorithm used by UNIX kernels.
4745 //
4746 // Note that sampling thread starvation could affect both (b) and (c).
4747 int os::loadavg(double loadavg[], int nelem) {
4748   return -1;
4749 }
4750 
4751 
4752 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4753 bool os::dont_yield() {
4754   return DontYieldALot;
4755 }
4756 
4757 int os::open(const char *path, int oflag, int mode) {
4758   errno_t err;
4759   wchar_t* wide_path = wide_abs_unc_path(path, err);
4760 
4761   if (wide_path == NULL) {
4762     errno = err;
4763     return -1;
4764   }
4765   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4766   os::free(wide_path);
4767 
4768   if (fd == -1) {
4769     errno = ::GetLastError();
4770   }
4771 
4772   return fd;
4773 }
4774 
4775 FILE* os::open(int fd, const char* mode) {
4776   return ::_fdopen(fd, mode);
4777 }
4778 
4779 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
4780   return ::write(fd, buf, nBytes);
4781 }
4782 
4783 int os::close(int fd) {
4784   return ::close(fd);
4785 }
4786 
4787 void os::exit(int num) {
4788   win32::exit_process_or_thread(win32::EPT_PROCESS, num);
4789 }
4790 
4791 void os::_exit(int num) {
4792   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, num);
4793 }
4794 
4795 // Is a (classpath) directory empty?
4796 bool os::dir_is_empty(const char* path) {
4797   errno_t err;
4798   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4799 
4800   if (wide_path == NULL) {
4801     errno = err;
4802     return false;
4803   }
4804 
4805   // Make sure we end with "\\*"
4806   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4807     wcscat(wide_path, L"*");
4808   } else {
4809     wcscat(wide_path, L"\\*");
4810   }
4811 
4812   WIN32_FIND_DATAW fd;
4813   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4814   os::free(wide_path);
4815   bool is_empty = true;
4816 
4817   if (f != INVALID_HANDLE_VALUE) {
4818     while (is_empty && ::FindNextFileW(f, &fd)) {
4819       // An empty directory contains only the current directory file
4820       // and the previous directory file.
4821       if ((wcscmp(fd.cFileName, L".") != 0) &&
4822           (wcscmp(fd.cFileName, L"..") != 0)) {
4823         is_empty = false;
4824       }
4825     }
4826     FindClose(f);
4827   } else {
4828     errno = ::GetLastError();
4829   }
4830 
4831   return is_empty;
4832 }
4833 
4834 // create binary file, rewriting existing file if required
4835 int os::create_binary_file(const char* path, bool rewrite_existing) {
4836   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4837   oflags |= rewrite_existing ? _O_TRUNC : _O_EXCL;
4838   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4839 }
4840 
4841 // return current position of file pointer
4842 jlong os::current_file_offset(int fd) {
4843   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4844 }
4845 
4846 // move file pointer to the specified offset
4847 jlong os::seek_to_file_offset(int fd, jlong offset) {
4848   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4849 }
4850 
4851 
4852 jlong os::lseek(int fd, jlong offset, int whence) {
4853   return (jlong) ::_lseeki64(fd, offset, whence);
4854 }
4855 
4856 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4857   OVERLAPPED ov;
4858   DWORD nread;
4859   BOOL result;
4860 
4861   ZeroMemory(&ov, sizeof(ov));
4862   ov.Offset = (DWORD)offset;
4863   ov.OffsetHigh = (DWORD)(offset >> 32);
4864 
4865   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4866 
4867   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4868 
4869   return result ? nread : 0;
4870 }
4871 
4872 
4873 // This method is a slightly reworked copy of JDK's sysNativePath
4874 // from src/windows/hpi/src/path_md.c
4875 
4876 // Convert a pathname to native format.  On win32, this involves forcing all
4877 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4878 // sometimes rejects '/') and removing redundant separators.  The input path is
4879 // assumed to have been converted into the character encoding used by the local
4880 // system.  Because this might be a double-byte encoding, care is taken to
4881 // treat double-byte lead characters correctly.
4882 //
4883 // This procedure modifies the given path in place, as the result is never
4884 // longer than the original.  There is no error return; this operation always
4885 // succeeds.
4886 char * os::native_path(char *path) {
4887   char *src = path, *dst = path, *end = path;
4888   char *colon = NULL;  // If a drive specifier is found, this will
4889                        // point to the colon following the drive letter
4890 
4891   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4892   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4893           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4894 
4895   // Check for leading separators
4896 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4897   while (isfilesep(*src)) {
4898     src++;
4899   }
4900 
4901   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4902     // Remove leading separators if followed by drive specifier.  This
4903     // hack is necessary to support file URLs containing drive
4904     // specifiers (e.g., "file://c:/path").  As a side effect,
4905     // "/c:/path" can be used as an alternative to "c:/path".
4906     *dst++ = *src++;
4907     colon = dst;
4908     *dst++ = ':';
4909     src++;
4910   } else {
4911     src = path;
4912     if (isfilesep(src[0]) && isfilesep(src[1])) {
4913       // UNC pathname: Retain first separator; leave src pointed at
4914       // second separator so that further separators will be collapsed
4915       // into the second separator.  The result will be a pathname
4916       // beginning with "\\\\" followed (most likely) by a host name.
4917       src = dst = path + 1;
4918       path[0] = '\\';     // Force first separator to '\\'
4919     }
4920   }
4921 
4922   end = dst;
4923 
4924   // Remove redundant separators from remainder of path, forcing all
4925   // separators to be '\\' rather than '/'. Also, single byte space
4926   // characters are removed from the end of the path because those
4927   // are not legal ending characters on this operating system.
4928   //
4929   while (*src != '\0') {
4930     if (isfilesep(*src)) {
4931       *dst++ = '\\'; src++;
4932       while (isfilesep(*src)) src++;
4933       if (*src == '\0') {
4934         // Check for trailing separator
4935         end = dst;
4936         if (colon == dst - 2) break;  // "z:\\"
4937         if (dst == path + 1) break;   // "\\"
4938         if (dst == path + 2 && isfilesep(path[0])) {
4939           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4940           // beginning of a UNC pathname.  Even though it is not, by
4941           // itself, a valid UNC pathname, we leave it as is in order
4942           // to be consistent with the path canonicalizer as well
4943           // as the win32 APIs, which treat this case as an invalid
4944           // UNC pathname rather than as an alias for the root
4945           // directory of the current drive.
4946           break;
4947         }
4948         end = --dst;  // Path does not denote a root directory, so
4949                       // remove trailing separator
4950         break;
4951       }
4952       end = dst;
4953     } else {
4954       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4955         *dst++ = *src++;
4956         if (*src) *dst++ = *src++;
4957         end = dst;
4958       } else {  // Copy a single-byte character
4959         char c = *src++;
4960         *dst++ = c;
4961         // Space is not a legal ending character
4962         if (c != ' ') end = dst;
4963       }
4964     }
4965   }
4966 
4967   *end = '\0';
4968 
4969   // For "z:", add "." to work around a bug in the C runtime library
4970   if (colon == dst - 1) {
4971     path[2] = '.';
4972     path[3] = '\0';
4973   }
4974 
4975   return path;
4976 }
4977 
4978 // This code is a copy of JDK's sysSetLength
4979 // from src/windows/hpi/src/sys_api_md.c
4980 
4981 int os::ftruncate(int fd, jlong length) {
4982   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4983   long high = (long)(length >> 32);
4984   DWORD ret;
4985 
4986   if (h == (HANDLE)(-1)) {
4987     return -1;
4988   }
4989 
4990   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4991   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4992     return -1;
4993   }
4994 
4995   if (::SetEndOfFile(h) == FALSE) {
4996     return -1;
4997   }
4998 
4999   return 0;
5000 }
5001 
5002 int os::get_fileno(FILE* fp) {
5003   return _fileno(fp);
5004 }
5005 
5006 // This code is a copy of JDK's sysSync
5007 // from src/windows/hpi/src/sys_api_md.c
5008 // except for the legacy workaround for a bug in Win 98
5009 
5010 int os::fsync(int fd) {
5011   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
5012 
5013   if ((!::FlushFileBuffers(handle)) &&
5014       (GetLastError() != ERROR_ACCESS_DENIED)) {
5015     // from winerror.h
5016     return -1;
5017   }
5018   return 0;
5019 }
5020 
5021 static int nonSeekAvailable(int, long *);
5022 static int stdinAvailable(int, long *);
5023 
5024 // This code is a copy of JDK's sysAvailable
5025 // from src/windows/hpi/src/sys_api_md.c
5026 
5027 int os::available(int fd, jlong *bytes) {
5028   jlong cur, end;
5029   struct _stati64 stbuf64;
5030 
5031   if (::_fstati64(fd, &stbuf64) >= 0) {
5032     int mode = stbuf64.st_mode;
5033     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
5034       int ret;
5035       long lpbytes;
5036       if (fd == 0) {
5037         ret = stdinAvailable(fd, &lpbytes);
5038       } else {
5039         ret = nonSeekAvailable(fd, &lpbytes);
5040       }
5041       (*bytes) = (jlong)(lpbytes);
5042       return ret;
5043     }
5044     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
5045       return FALSE;
5046     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
5047       return FALSE;
5048     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
5049       return FALSE;
5050     }
5051     *bytes = end - cur;
5052     return TRUE;
5053   } else {
5054     return FALSE;
5055   }
5056 }
5057 
5058 void os::flockfile(FILE* fp) {
5059   _lock_file(fp);
5060 }
5061 
5062 void os::funlockfile(FILE* fp) {
5063   _unlock_file(fp);
5064 }
5065 
5066 // This code is a copy of JDK's nonSeekAvailable
5067 // from src/windows/hpi/src/sys_api_md.c
5068 
5069 static int nonSeekAvailable(int fd, long *pbytes) {
5070   // This is used for available on non-seekable devices
5071   // (like both named and anonymous pipes, such as pipes
5072   //  connected to an exec'd process).
5073   // Standard Input is a special case.
5074   HANDLE han;
5075 
5076   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
5077     return FALSE;
5078   }
5079 
5080   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
5081     // PeekNamedPipe fails when at EOF.  In that case we
5082     // simply make *pbytes = 0 which is consistent with the
5083     // behavior we get on Solaris when an fd is at EOF.
5084     // The only alternative is to raise an Exception,
5085     // which isn't really warranted.
5086     //
5087     if (::GetLastError() != ERROR_BROKEN_PIPE) {
5088       return FALSE;
5089     }
5090     *pbytes = 0;
5091   }
5092   return TRUE;
5093 }
5094 
5095 #define MAX_INPUT_EVENTS 2000
5096 
5097 // This code is a copy of JDK's stdinAvailable
5098 // from src/windows/hpi/src/sys_api_md.c
5099 
5100 static int stdinAvailable(int fd, long *pbytes) {
5101   HANDLE han;
5102   DWORD numEventsRead = 0;  // Number of events read from buffer
5103   DWORD numEvents = 0;      // Number of events in buffer
5104   DWORD i = 0;              // Loop index
5105   DWORD curLength = 0;      // Position marker
5106   DWORD actualLength = 0;   // Number of bytes readable
5107   BOOL error = FALSE;       // Error holder
5108   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
5109 
5110   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
5111     return FALSE;
5112   }
5113 
5114   // Construct an array of input records in the console buffer
5115   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
5116   if (error == 0) {
5117     return nonSeekAvailable(fd, pbytes);
5118   }
5119 
5120   // lpBuffer must fit into 64K or else PeekConsoleInput fails
5121   if (numEvents > MAX_INPUT_EVENTS) {
5122     numEvents = MAX_INPUT_EVENTS;
5123   }
5124 
5125   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
5126   if (lpBuffer == NULL) {
5127     return FALSE;
5128   }
5129 
5130   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
5131   if (error == 0) {
5132     os::free(lpBuffer);
5133     return FALSE;
5134   }
5135 
5136   // Examine input records for the number of bytes available
5137   for (i=0; i<numEvents; i++) {
5138     if (lpBuffer[i].EventType == KEY_EVENT) {
5139 
5140       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
5141                                       &(lpBuffer[i].Event);
5142       if (keyRecord->bKeyDown == TRUE) {
5143         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
5144         curLength++;
5145         if (*keyPressed == '\r') {
5146           actualLength = curLength;
5147         }
5148       }
5149     }
5150   }
5151 
5152   if (lpBuffer != NULL) {
5153     os::free(lpBuffer);
5154   }
5155 
5156   *pbytes = (long) actualLength;
5157   return TRUE;
5158 }
5159 
5160 // Map a block of memory.
5161 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5162                         char *addr, size_t bytes, bool read_only,
5163                         bool allow_exec) {
5164 
5165   errno_t err;
5166   wchar_t* wide_path = wide_abs_unc_path(file_name, err);
5167 
5168   if (wide_path == NULL) {
5169     return NULL;
5170   }
5171 
5172   HANDLE hFile;
5173   char* base;
5174 
5175   hFile = CreateFileW(wide_path, GENERIC_READ, FILE_SHARE_READ, NULL,
5176                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5177   if (hFile == INVALID_HANDLE_VALUE) {
5178     log_info(os)("CreateFileW() failed: GetLastError->%ld.", GetLastError());
5179     os::free(wide_path);
5180     return NULL;
5181   }
5182   os::free(wide_path);
5183 
5184   if (allow_exec) {
5185     // CreateFileMapping/MapViewOfFileEx can't map executable memory
5186     // unless it comes from a PE image (which the shared archive is not.)
5187     // Even VirtualProtect refuses to give execute access to mapped memory
5188     // that was not previously executable.
5189     //
5190     // Instead, stick the executable region in anonymous memory.  Yuck.
5191     // Penalty is that ~4 pages will not be shareable - in the future
5192     // we might consider DLLizing the shared archive with a proper PE
5193     // header so that mapping executable + sharing is possible.
5194 
5195     base = (char*) virtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5196                                 PAGE_READWRITE);
5197     if (base == NULL) {
5198       CloseHandle(hFile);
5199       return NULL;
5200     }
5201 
5202     // Record virtual memory allocation
5203     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5204 
5205     DWORD bytes_read;
5206     OVERLAPPED overlapped;
5207     overlapped.Offset = (DWORD)file_offset;
5208     overlapped.OffsetHigh = 0;
5209     overlapped.hEvent = NULL;
5210     // ReadFile guarantees that if the return value is true, the requested
5211     // number of bytes were read before returning.
5212     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5213     if (!res) {
5214       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5215       release_memory(base, bytes);
5216       CloseHandle(hFile);
5217       return NULL;
5218     }
5219   } else {
5220     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5221                                     NULL /* file_name */);
5222     if (hMap == NULL) {
5223       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5224       CloseHandle(hFile);
5225       return NULL;
5226     }
5227 
5228     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5229     base = (char*)mapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5230                                   (DWORD)bytes, addr);
5231     if (base == NULL) {
5232       CloseHandle(hMap);
5233       CloseHandle(hFile);
5234       return NULL;
5235     }
5236 
5237     if (CloseHandle(hMap) == 0) {
5238       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5239       CloseHandle(hFile);
5240       return base;
5241     }
5242   }
5243 
5244   if (allow_exec) {
5245     DWORD old_protect;
5246     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5247     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5248 
5249     if (!res) {
5250       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5251       // Don't consider this a hard error, on IA32 even if the
5252       // VirtualProtect fails, we should still be able to execute
5253       CloseHandle(hFile);
5254       return base;
5255     }
5256   }
5257 
5258   if (CloseHandle(hFile) == 0) {
5259     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5260     return base;
5261   }
5262 
5263   return base;
5264 }
5265 
5266 
5267 // Remap a block of memory.
5268 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5269                           char *addr, size_t bytes, bool read_only,
5270                           bool allow_exec) {
5271   // This OS does not allow existing memory maps to be remapped so we
5272   // would have to unmap the memory before we remap it.
5273 
5274   // Because there is a small window between unmapping memory and mapping
5275   // it in again with different protections, CDS archives are mapped RW
5276   // on windows, so this function isn't called.
5277   ShouldNotReachHere();
5278   return NULL;
5279 }
5280 
5281 
5282 // Unmap a block of memory.
5283 // Returns true=success, otherwise false.
5284 
5285 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5286   MEMORY_BASIC_INFORMATION mem_info;
5287   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5288     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5289     return false;
5290   }
5291 
5292   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5293   // Instead, executable region was allocated using VirtualAlloc(). See
5294   // pd_map_memory() above.
5295   //
5296   // The following flags should match the 'exec_access' flages used for
5297   // VirtualProtect() in pd_map_memory().
5298   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5299       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5300     return pd_release_memory(addr, bytes);
5301   }
5302 
5303   BOOL result = unmapViewOfFile(addr);
5304   if (result == 0) {
5305     return false;
5306   }
5307   return true;
5308 }
5309 
5310 void os::pause() {
5311   char filename[MAX_PATH];
5312   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5313     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5314   } else {
5315     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5316   }
5317 
5318   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5319   if (fd != -1) {
5320     struct stat buf;
5321     ::close(fd);
5322     while (::stat(filename, &buf) == 0) {
5323       Sleep(100);
5324     }
5325   } else {
5326     jio_fprintf(stderr,
5327                 "Could not open pause file '%s', continuing immediately.\n", filename);
5328   }
5329 }
5330 
5331 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5332 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5333 
5334 os::ThreadCrashProtection::ThreadCrashProtection() {
5335   _protected_thread = Thread::current();
5336   assert(_protected_thread->is_JfrSampler_thread(), "should be JFRSampler");
5337 }
5338 
5339 // See the caveats for this class in os_windows.hpp
5340 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5341 // into this method and returns false. If no OS EXCEPTION was raised, returns
5342 // true.
5343 // The callback is supposed to provide the method that should be protected.
5344 //
5345 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5346   bool success = true;
5347   __try {
5348     _crash_protection = this;
5349     cb.call();
5350   } __except(EXCEPTION_EXECUTE_HANDLER) {
5351     // only for protection, nothing to do
5352     success = false;
5353   }
5354   _crash_protection = NULL;
5355   _protected_thread = NULL;
5356   return success;
5357 }
5358 
5359 
5360 class HighResolutionInterval : public CHeapObj<mtThread> {
5361   // The default timer resolution seems to be 10 milliseconds.
5362   // (Where is this written down?)
5363   // If someone wants to sleep for only a fraction of the default,
5364   // then we set the timer resolution down to 1 millisecond for
5365   // the duration of their interval.
5366   // We carefully set the resolution back, since otherwise we
5367   // seem to incur an overhead (3%?) that we don't need.
5368   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5369   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5370   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5371   // timeBeginPeriod() if the relative error exceeded some threshold.
5372   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5373   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5374   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5375   // resolution timers running.
5376  private:
5377   jlong resolution;
5378  public:
5379   HighResolutionInterval(jlong ms) {
5380     resolution = ms % 10L;
5381     if (resolution != 0) {
5382       MMRESULT result = timeBeginPeriod(1L);
5383     }
5384   }
5385   ~HighResolutionInterval() {
5386     if (resolution != 0) {
5387       MMRESULT result = timeEndPeriod(1L);
5388     }
5389     resolution = 0L;
5390   }
5391 };
5392 
5393 // An Event wraps a win32 "CreateEvent" kernel handle.
5394 //
5395 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5396 //
5397 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5398 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5399 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5400 //     In addition, an unpark() operation might fetch the handle field, but the
5401 //     event could recycle between the fetch and the SetEvent() operation.
5402 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5403 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5404 //     on an stale but recycled handle would be harmless, but in practice this might
5405 //     confuse other non-Sun code, so it's not a viable approach.
5406 //
5407 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5408 //     with the Event.  The event handle is never closed.  This could be construed
5409 //     as handle leakage, but only up to the maximum # of threads that have been extant
5410 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5411 //     permit a process to have hundreds of thousands of open handles.
5412 //
5413 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5414 //     and release unused handles.
5415 //
5416 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5417 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5418 //
5419 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5420 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5421 //
5422 // We use (2).
5423 //
5424 // TODO-FIXME:
5425 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5426 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5427 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5428 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5429 //     into a single win32 CreateEvent() handle.
5430 //
5431 // Assumption:
5432 //    Only one parker can exist on an event, which is why we allocate
5433 //    them per-thread. Multiple unparkers can coexist.
5434 //
5435 // _Event transitions in park()
5436 //   -1 => -1 : illegal
5437 //    1 =>  0 : pass - return immediately
5438 //    0 => -1 : block; then set _Event to 0 before returning
5439 //
5440 // _Event transitions in unpark()
5441 //    0 => 1 : just return
5442 //    1 => 1 : just return
5443 //   -1 => either 0 or 1; must signal target thread
5444 //         That is, we can safely transition _Event from -1 to either
5445 //         0 or 1.
5446 //
5447 // _Event serves as a restricted-range semaphore.
5448 //   -1 : thread is blocked, i.e. there is a waiter
5449 //    0 : neutral: thread is running or ready,
5450 //        could have been signaled after a wait started
5451 //    1 : signaled - thread is running or ready
5452 //
5453 // Another possible encoding of _Event would be with
5454 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5455 //
5456 
5457 int os::PlatformEvent::park(jlong Millis) {
5458   // Transitions for _Event:
5459   //   -1 => -1 : illegal
5460   //    1 =>  0 : pass - return immediately
5461   //    0 => -1 : block; then set _Event to 0 before returning
5462 
5463   guarantee(_ParkHandle != NULL , "Invariant");
5464   guarantee(Millis > 0          , "Invariant");
5465 
5466   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5467   // the initial park() operation.
5468   // Consider: use atomic decrement instead of CAS-loop
5469 
5470   int v;
5471   for (;;) {
5472     v = _Event;
5473     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5474   }
5475   guarantee((v == 0) || (v == 1), "invariant");
5476   if (v != 0) return OS_OK;
5477 
5478   // Do this the hard way by blocking ...
5479   // TODO: consider a brief spin here, gated on the success of recent
5480   // spin attempts by this thread.
5481   //
5482   // We decompose long timeouts into series of shorter timed waits.
5483   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5484   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5485   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5486   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5487   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5488   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5489   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5490   // for the already waited time.  This policy does not admit any new outcomes.
5491   // In the future, however, we might want to track the accumulated wait time and
5492   // adjust Millis accordingly if we encounter a spurious wakeup.
5493 
5494   const int MAXTIMEOUT = 0x10000000;
5495   DWORD rv = WAIT_TIMEOUT;
5496   while (_Event < 0 && Millis > 0) {
5497     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5498     if (Millis > MAXTIMEOUT) {
5499       prd = MAXTIMEOUT;
5500     }
5501     HighResolutionInterval *phri = NULL;
5502     if (!ForceTimeHighResolution) {
5503       phri = new HighResolutionInterval(prd);
5504     }
5505     rv = ::WaitForSingleObject(_ParkHandle, prd);
5506     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5507     if (rv == WAIT_TIMEOUT) {
5508       Millis -= prd;
5509     }
5510     delete phri; // if it is NULL, harmless
5511   }
5512   v = _Event;
5513   _Event = 0;
5514   // see comment at end of os::PlatformEvent::park() below:
5515   OrderAccess::fence();
5516   // If we encounter a nearly simultanous timeout expiry and unpark()
5517   // we return OS_OK indicating we awoke via unpark().
5518   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5519   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5520 }
5521 
5522 void os::PlatformEvent::park() {
5523   // Transitions for _Event:
5524   //   -1 => -1 : illegal
5525   //    1 =>  0 : pass - return immediately
5526   //    0 => -1 : block; then set _Event to 0 before returning
5527 
5528   guarantee(_ParkHandle != NULL, "Invariant");
5529   // Invariant: Only the thread associated with the Event/PlatformEvent
5530   // may call park().
5531   // Consider: use atomic decrement instead of CAS-loop
5532   int v;
5533   for (;;) {
5534     v = _Event;
5535     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5536   }
5537   guarantee((v == 0) || (v == 1), "invariant");
5538   if (v != 0) return;
5539 
5540   // Do this the hard way by blocking ...
5541   // TODO: consider a brief spin here, gated on the success of recent
5542   // spin attempts by this thread.
5543   while (_Event < 0) {
5544     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5545     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5546   }
5547 
5548   // Usually we'll find _Event == 0 at this point, but as
5549   // an optional optimization we clear it, just in case can
5550   // multiple unpark() operations drove _Event up to 1.
5551   _Event = 0;
5552   OrderAccess::fence();
5553   guarantee(_Event >= 0, "invariant");
5554 }
5555 
5556 void os::PlatformEvent::unpark() {
5557   guarantee(_ParkHandle != NULL, "Invariant");
5558 
5559   // Transitions for _Event:
5560   //    0 => 1 : just return
5561   //    1 => 1 : just return
5562   //   -1 => either 0 or 1; must signal target thread
5563   //         That is, we can safely transition _Event from -1 to either
5564   //         0 or 1.
5565   // See also: "Semaphores in Plan 9" by Mullender & Cox
5566   //
5567   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5568   // that it will take two back-to-back park() calls for the owning
5569   // thread to block. This has the benefit of forcing a spurious return
5570   // from the first park() call after an unpark() call which will help
5571   // shake out uses of park() and unpark() without condition variables.
5572 
5573   if (Atomic::xchg(&_Event, 1) >= 0) return;
5574 
5575   ::SetEvent(_ParkHandle);
5576 }
5577 
5578 
5579 // JSR166
5580 // -------------------------------------------------------
5581 
5582 // The Windows implementation of Park is very straightforward: Basic
5583 // operations on Win32 Events turn out to have the right semantics to
5584 // use them directly.
5585 
5586 void Parker::park(bool isAbsolute, jlong time) {
5587   guarantee(_ParkHandle != NULL, "invariant");
5588   // First, demultiplex/decode time arguments
5589   if (time < 0) { // don't wait
5590     return;
5591   } else if (time == 0 && !isAbsolute) {
5592     time = INFINITE;
5593   } else if (isAbsolute) {
5594     time -= os::javaTimeMillis(); // convert to relative time
5595     if (time <= 0) {  // already elapsed
5596       return;
5597     }
5598   } else { // relative
5599     time /= 1000000;  // Must coarsen from nanos to millis
5600     if (time == 0) {  // Wait for the minimal time unit if zero
5601       time = 1;
5602     }
5603   }
5604 
5605   JavaThread* thread = JavaThread::current();
5606 
5607   // Don't wait if interrupted or already triggered
5608   if (thread->is_interrupted(false) ||
5609       WaitForSingleObject(_ParkHandle, 0) == WAIT_OBJECT_0) {
5610     ResetEvent(_ParkHandle);
5611     return;
5612   } else {
5613     ThreadBlockInVM tbivm(thread);
5614     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5615 
5616     WaitForSingleObject(_ParkHandle, time);
5617     ResetEvent(_ParkHandle);
5618   }
5619 }
5620 
5621 void Parker::unpark() {
5622   guarantee(_ParkHandle != NULL, "invariant");
5623   SetEvent(_ParkHandle);
5624 }
5625 
5626 // Platform Monitor implementation
5627 
5628 // Must already be locked
5629 int os::PlatformMonitor::wait(jlong millis) {
5630   assert(millis >= 0, "negative timeout");
5631   int ret = OS_TIMEOUT;
5632   int status = SleepConditionVariableCS(&_cond, &_mutex,
5633                                         millis == 0 ? INFINITE : millis);
5634   if (status != 0) {
5635     ret = OS_OK;
5636   }
5637   #ifndef PRODUCT
5638   else {
5639     DWORD err = GetLastError();
5640     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5641   }
5642   #endif
5643   return ret;
5644 }
5645 
5646 // Run the specified command in a separate process. Return its exit value,
5647 // or -1 on failure (e.g. can't create a new process).
5648 int os::fork_and_exec(const char* cmd) {
5649   STARTUPINFO si;
5650   PROCESS_INFORMATION pi;
5651   DWORD exit_code;
5652 
5653   char * cmd_string;
5654   const char * cmd_prefix = "cmd /C ";
5655   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5656   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5657   if (cmd_string == NULL) {
5658     return -1;
5659   }
5660   cmd_string[0] = '\0';
5661   strcat(cmd_string, cmd_prefix);
5662   strcat(cmd_string, cmd);
5663 
5664   // now replace all '\n' with '&'
5665   char * substring = cmd_string;
5666   while ((substring = strchr(substring, '\n')) != NULL) {
5667     substring[0] = '&';
5668     substring++;
5669   }
5670   memset(&si, 0, sizeof(si));
5671   si.cb = sizeof(si);
5672   memset(&pi, 0, sizeof(pi));
5673   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5674                             cmd_string,    // command line
5675                             NULL,   // process security attribute
5676                             NULL,   // thread security attribute
5677                             TRUE,   // inherits system handles
5678                             0,      // no creation flags
5679                             NULL,   // use parent's environment block
5680                             NULL,   // use parent's starting directory
5681                             &si,    // (in) startup information
5682                             &pi);   // (out) process information
5683 
5684   if (rslt) {
5685     // Wait until child process exits.
5686     WaitForSingleObject(pi.hProcess, INFINITE);
5687 
5688     GetExitCodeProcess(pi.hProcess, &exit_code);
5689 
5690     // Close process and thread handles.
5691     CloseHandle(pi.hProcess);
5692     CloseHandle(pi.hThread);
5693   } else {
5694     exit_code = -1;
5695   }
5696 
5697   FREE_C_HEAP_ARRAY(char, cmd_string);
5698   return (int)exit_code;
5699 }
5700 
5701 bool os::find(address addr, outputStream* st) {
5702   int offset = -1;
5703   bool result = false;
5704   char buf[256];
5705   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5706     st->print(PTR_FORMAT " ", addr);
5707     if (strlen(buf) < sizeof(buf) - 1) {
5708       char* p = strrchr(buf, '\\');
5709       if (p) {
5710         st->print("%s", p + 1);
5711       } else {
5712         st->print("%s", buf);
5713       }
5714     } else {
5715         // The library name is probably truncated. Let's omit the library name.
5716         // See also JDK-8147512.
5717     }
5718     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5719       st->print("::%s + 0x%x", buf, offset);
5720     }
5721     st->cr();
5722     result = true;
5723   }
5724   return result;
5725 }
5726 
5727 static jint initSock() {
5728   WSADATA wsadata;
5729 
5730   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5731     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5732                 ::GetLastError());
5733     return JNI_ERR;
5734   }
5735   return JNI_OK;
5736 }
5737 
5738 struct hostent* os::get_host_by_name(char* name) {
5739   return (struct hostent*)gethostbyname(name);
5740 }
5741 
5742 int os::socket_close(int fd) {
5743   return ::closesocket(fd);
5744 }
5745 
5746 int os::socket(int domain, int type, int protocol) {
5747   return ::socket(domain, type, protocol);
5748 }
5749 
5750 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5751   return ::connect(fd, him, len);
5752 }
5753 
5754 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5755   return ::recv(fd, buf, (int)nBytes, flags);
5756 }
5757 
5758 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5759   return ::send(fd, buf, (int)nBytes, flags);
5760 }
5761 
5762 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5763   return ::send(fd, buf, (int)nBytes, flags);
5764 }
5765 
5766 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5767 #if defined(IA32)
5768   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5769 #elif defined(AMD64) || defined(_M_ARM64)
5770   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5771 #endif
5772 
5773 // returns true if thread could be suspended,
5774 // false otherwise
5775 static bool do_suspend(HANDLE* h) {
5776   if (h != NULL) {
5777     if (SuspendThread(*h) != ~0) {
5778       return true;
5779     }
5780   }
5781   return false;
5782 }
5783 
5784 // resume the thread
5785 // calling resume on an active thread is a no-op
5786 static void do_resume(HANDLE* h) {
5787   if (h != NULL) {
5788     ResumeThread(*h);
5789   }
5790 }
5791 
5792 // retrieve a suspend/resume context capable handle
5793 // from the tid. Caller validates handle return value.
5794 void get_thread_handle_for_extended_context(HANDLE* h,
5795                                             OSThread::thread_id_t tid) {
5796   if (h != NULL) {
5797     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5798   }
5799 }
5800 
5801 // Thread sampling implementation
5802 //
5803 void os::SuspendedThreadTask::internal_do_task() {
5804   CONTEXT    ctxt;
5805   HANDLE     h = NULL;
5806 
5807   // get context capable handle for thread
5808   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5809 
5810   // sanity
5811   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5812     return;
5813   }
5814 
5815   // suspend the thread
5816   if (do_suspend(&h)) {
5817     ctxt.ContextFlags = sampling_context_flags;
5818     // get thread context
5819     GetThreadContext(h, &ctxt);
5820     SuspendedThreadTaskContext context(_thread, &ctxt);
5821     // pass context to Thread Sampling impl
5822     do_task(context);
5823     // resume thread
5824     do_resume(&h);
5825   }
5826 
5827   // close handle
5828   CloseHandle(h);
5829 }
5830 
5831 bool os::start_debugging(char *buf, int buflen) {
5832   int len = (int)strlen(buf);
5833   char *p = &buf[len];
5834 
5835   jio_snprintf(p, buflen-len,
5836              "\n\n"
5837              "Do you want to debug the problem?\n\n"
5838              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5839              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5840              "Otherwise, select 'No' to abort...",
5841              os::current_process_id(), os::current_thread_id());
5842 
5843   bool yes = os::message_box("Unexpected Error", buf);
5844 
5845   if (yes) {
5846     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5847     // exception. If VM is running inside a debugger, the debugger will
5848     // catch the exception. Otherwise, the breakpoint exception will reach
5849     // the default windows exception handler, which can spawn a debugger and
5850     // automatically attach to the dying VM.
5851     os::breakpoint();
5852     yes = false;
5853   }
5854   return yes;
5855 }
5856 
5857 void* os::get_default_process_handle() {
5858   return (void*)GetModuleHandle(NULL);
5859 }
5860 
5861 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5862 // which is used to find statically linked in agents.
5863 // Additionally for windows, takes into account __stdcall names.
5864 // Parameters:
5865 //            sym_name: Symbol in library we are looking for
5866 //            lib_name: Name of library to look in, NULL for shared libs.
5867 //            is_absolute_path == true if lib_name is absolute path to agent
5868 //                                     such as "C:/a/b/L.dll"
5869 //            == false if only the base name of the library is passed in
5870 //               such as "L"
5871 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5872                                     bool is_absolute_path) {
5873   char *agent_entry_name;
5874   size_t len;
5875   size_t name_len;
5876   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5877   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5878   const char *start;
5879 
5880   if (lib_name != NULL) {
5881     len = name_len = strlen(lib_name);
5882     if (is_absolute_path) {
5883       // Need to strip path, prefix and suffix
5884       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5885         lib_name = ++start;
5886       } else {
5887         // Need to check for drive prefix
5888         if ((start = strchr(lib_name, ':')) != NULL) {
5889           lib_name = ++start;
5890         }
5891       }
5892       if (len <= (prefix_len + suffix_len)) {
5893         return NULL;
5894       }
5895       lib_name += prefix_len;
5896       name_len = strlen(lib_name) - suffix_len;
5897     }
5898   }
5899   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5900   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5901   if (agent_entry_name == NULL) {
5902     return NULL;
5903   }
5904   if (lib_name != NULL) {
5905     const char *p = strrchr(sym_name, '@');
5906     if (p != NULL && p != sym_name) {
5907       // sym_name == _Agent_OnLoad@XX
5908       strncpy(agent_entry_name, sym_name, (p - sym_name));
5909       agent_entry_name[(p-sym_name)] = '\0';
5910       // agent_entry_name == _Agent_OnLoad
5911       strcat(agent_entry_name, "_");
5912       strncat(agent_entry_name, lib_name, name_len);
5913       strcat(agent_entry_name, p);
5914       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5915     } else {
5916       strcpy(agent_entry_name, sym_name);
5917       strcat(agent_entry_name, "_");
5918       strncat(agent_entry_name, lib_name, name_len);
5919     }
5920   } else {
5921     strcpy(agent_entry_name, sym_name);
5922   }
5923   return agent_entry_name;
5924 }
5925 
5926 /*
5927   All the defined signal names for Windows.
5928 
5929   NOTE that not all of these names are accepted by FindSignal!
5930 
5931   For various reasons some of these may be rejected at runtime.
5932 
5933   Here are the names currently accepted by a user of sun.misc.Signal with
5934   1.4.1 (ignoring potential interaction with use of chaining, etc):
5935 
5936      (LIST TBD)
5937 
5938 */
5939 int os::get_signal_number(const char* name) {
5940   static const struct {
5941     const char* name;
5942     int         number;
5943   } siglabels [] =
5944     // derived from version 6.0 VC98/include/signal.h
5945   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5946   "FPE",        SIGFPE,         // floating point exception
5947   "SEGV",       SIGSEGV,        // segment violation
5948   "INT",        SIGINT,         // interrupt
5949   "TERM",       SIGTERM,        // software term signal from kill
5950   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5951   "ILL",        SIGILL};        // illegal instruction
5952   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5953     if (strcmp(name, siglabels[i].name) == 0) {
5954       return siglabels[i].number;
5955     }
5956   }
5957   return -1;
5958 }
5959 
5960 // Fast current thread access
5961 
5962 int os::win32::_thread_ptr_offset = 0;
5963 
5964 static void call_wrapper_dummy() {}
5965 
5966 // We need to call the os_exception_wrapper once so that it sets
5967 // up the offset from FS of the thread pointer.
5968 void os::win32::initialize_thread_ptr_offset() {
5969   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5970                            NULL, methodHandle(), NULL, NULL);
5971 }
5972 
5973 bool os::supports_map_sync() {
5974   return false;
5975 }
5976 
5977 #ifdef ASSERT
5978 static void check_meminfo(MEMORY_BASIC_INFORMATION* minfo) {
5979   assert(minfo->State == MEM_FREE || minfo->State == MEM_COMMIT || minfo->State == MEM_RESERVE, "Invalid state");
5980   if (minfo->State != MEM_FREE) {
5981     assert(minfo->AllocationBase != NULL && minfo->BaseAddress >= minfo->AllocationBase, "Invalid pointers");
5982     assert(minfo->RegionSize > 0, "Invalid region size");
5983   }
5984 }
5985 #endif
5986 
5987 
5988 static bool checkedVirtualQuery(address addr, MEMORY_BASIC_INFORMATION* minfo) {
5989   ZeroMemory(minfo, sizeof(MEMORY_BASIC_INFORMATION));
5990   if (::VirtualQuery(addr, minfo, sizeof(MEMORY_BASIC_INFORMATION)) == sizeof(MEMORY_BASIC_INFORMATION)) {
5991     DEBUG_ONLY(check_meminfo(minfo);)
5992     return true;
5993   }
5994   return false;
5995 }
5996 
5997 // Given a pointer pointing into an allocation (an area allocated with VirtualAlloc),
5998 //  return information about that allocation.
5999 bool os::win32::find_mapping(address addr, mapping_info_t* mi) {
6000   // Query at addr to find allocation base; then, starting at allocation base,
6001   //  query all regions, until we either find the next allocation or a free area.
6002   ZeroMemory(mi, sizeof(mapping_info_t));
6003   MEMORY_BASIC_INFORMATION minfo;
6004   address allocation_base = NULL;
6005   address allocation_end = NULL;
6006   bool rc = false;
6007   if (checkedVirtualQuery(addr, &minfo)) {
6008     if (minfo.State != MEM_FREE) {
6009       allocation_base = (address)minfo.AllocationBase;
6010       allocation_end = allocation_base;
6011       // Iterate through all regions in this allocation to find its end. While we are here, also count things.
6012       for (;;) {
6013         bool rc = checkedVirtualQuery(allocation_end, &minfo);
6014         if (rc == false ||                                       // VirtualQuery error, end of allocation?
6015            minfo.State == MEM_FREE ||                            // end of allocation, free memory follows
6016            (address)minfo.AllocationBase != allocation_base)     // end of allocation, a new one starts
6017         {
6018           break;
6019         }
6020         const size_t region_size = minfo.RegionSize;
6021         mi->regions ++;
6022         if (minfo.State == MEM_COMMIT) {
6023           mi->committed_size += minfo.RegionSize;
6024         }
6025         allocation_end += region_size;
6026       }
6027       if (allocation_base != NULL && allocation_end > allocation_base) {
6028         mi->base = allocation_base;
6029         mi->size = allocation_end - allocation_base;
6030         rc = true;
6031       }
6032     }
6033   }
6034 #ifdef ASSERT
6035   if (rc) {
6036     assert(mi->size > 0 && mi->size >= mi->committed_size, "Sanity");
6037     assert(addr >= mi->base && addr < mi->base + mi->size, "Sanity");
6038     assert(mi->regions > 0, "Sanity");
6039   }
6040 #endif
6041   return rc;
6042 }
6043 
6044 // Helper for print_one_mapping: print n words, both as hex and ascii.
6045 // Use Safefetch for all values.
6046 static void print_snippet(const void* p, outputStream* st) {
6047   static const int num_words = LP64_ONLY(3) NOT_LP64(6);
6048   static const int num_bytes = num_words * sizeof(int);
6049   intptr_t v[num_words];
6050   const int errval = 0xDE210244;
6051   for (int i = 0; i < num_words; i++) {
6052     v[i] = SafeFetchN((intptr_t*)p + i, errval);
6053     if (v[i] == errval &&
6054         SafeFetchN((intptr_t*)p + i, ~errval) == ~errval) {
6055       return;
6056     }
6057   }
6058   st->put('[');
6059   for (int i = 0; i < num_words; i++) {
6060     st->print(INTPTR_FORMAT " ", v[i]);
6061   }
6062   const char* b = (char*)v;
6063   st->put('\"');
6064   for (int i = 0; i < num_bytes; i++) {
6065     st->put(::isgraph(b[i]) ? b[i] : '.');
6066   }
6067   st->put('\"');
6068   st->put(']');
6069 }
6070 
6071 // Helper function for print_memory_mappings:
6072 //  Given a MEMORY_BASIC_INFORMATION, containing information about a non-free region:
6073 //  print out all regions in that allocation. If any of those regions
6074 //  fall outside the given range [start, end), indicate that in the output.
6075 // Return the pointer to the end of the allocation.
6076 static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start, address end, outputStream* st) {
6077   // Print it like this:
6078   //
6079   // Base: <xxxxx>: [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx       (region 1)
6080   //                [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx       (region 2)
6081   assert(minfo->State != MEM_FREE, "Not inside an allocation.");
6082   address allocation_base = (address)minfo->AllocationBase;
6083   #define IS_IN(p) (p >= start && p < end)
6084   bool first_line = true;
6085   bool is_dll = false;
6086   for(;;) {
6087     if (first_line) {
6088       st->print("Base " PTR_FORMAT ": ", p2i(allocation_base));
6089     } else {
6090       st->print_raw(NOT_LP64 ("                 ")
6091                     LP64_ONLY("                         "));
6092     }
6093     address region_start = (address)minfo->BaseAddress;
6094     address region_end = region_start + minfo->RegionSize;
6095     assert(region_end > region_start, "Sanity");
6096     if (region_end <= start) {
6097       st->print("<outside range> ");
6098     } else if (region_start >= end) {
6099       st->print("<outside range> ");
6100     } else if (!IS_IN(region_start) || !IS_IN(region_end - 1)) {
6101       st->print("<partly outside range> ");
6102     }
6103     st->print("[" PTR_FORMAT "-" PTR_FORMAT "), state=", p2i(region_start), p2i(region_end));
6104     switch (minfo->State) {
6105       case MEM_COMMIT:  st->print_raw("MEM_COMMIT "); break;
6106       case MEM_FREE:    st->print_raw("MEM_FREE   "); break;
6107       case MEM_RESERVE: st->print_raw("MEM_RESERVE"); break;
6108       default: st->print("%x?", (unsigned)minfo->State);
6109     }
6110     st->print(", prot=%3x, type=", (unsigned)minfo->Protect);
6111     switch (minfo->Type) {
6112       case MEM_IMAGE:   st->print_raw("MEM_IMAGE  "); break;
6113       case MEM_MAPPED:  st->print_raw("MEM_MAPPED "); break;
6114       case MEM_PRIVATE: st->print_raw("MEM_PRIVATE"); break;
6115       default: st->print("%x?", (unsigned)minfo->State);
6116     }
6117     // At the start of every allocation, print some more information about this mapping.
6118     // Notes:
6119     //  - this could be beefed up a lot, similar to os::print_location
6120     //  - for now we just query the allocation start point. This may be confusing for cases where
6121     //    the kernel merges multiple mappings.
6122     if (first_line) {
6123       char buf[MAX_PATH];
6124       if (os::dll_address_to_library_name(allocation_base, buf, sizeof(buf), nullptr)) {
6125         st->print(", %s", buf);
6126         is_dll = true;
6127       }
6128     }
6129     // If memory is accessible, and we do not know anything else about it, print a snippet
6130     if (!is_dll &&
6131         minfo->State == MEM_COMMIT &&
6132         !(minfo->Protect & PAGE_NOACCESS || minfo->Protect & PAGE_GUARD)) {
6133       st->print_raw(", ");
6134       print_snippet(region_start, st);
6135     }
6136     st->cr();
6137     // Next region...
6138     bool rc = checkedVirtualQuery(region_end, minfo);
6139     if (rc == false ||                                         // VirtualQuery error, end of allocation?
6140        (minfo->State == MEM_FREE) ||                           // end of allocation, free memory follows
6141        ((address)minfo->AllocationBase != allocation_base) ||  // end of allocation, a new one starts
6142        (region_end > end))                                     // end of range to print.
6143     {
6144       return region_end;
6145     }
6146     first_line = false;
6147   }
6148   #undef IS_IN
6149   ShouldNotReachHere();
6150   return NULL;
6151 }
6152 
6153 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
6154   MEMORY_BASIC_INFORMATION minfo;
6155   address start = (address)addr;
6156   address end = start + bytes;
6157   address p = start;
6158   if (p == nullptr) { // Lets skip the zero pages.
6159     p += os::vm_allocation_granularity();
6160   }
6161   address p2 = p; // guard against wraparounds
6162   int fuse = 0;
6163 
6164   while (p < end && p >= p2) {
6165     p2 = p;
6166     // Probe for the next mapping.
6167     if (checkedVirtualQuery(p, &minfo)) {
6168       if (minfo.State != MEM_FREE) {
6169         // Found one. Print it out.
6170         address p2 = print_one_mapping(&minfo, start, end, st);
6171         assert(p2 > p, "Sanity");
6172         p = p2;
6173       } else {
6174         // Note: for free regions, most of MEMORY_BASIC_INFORMATION is undefined.
6175         //  Only region dimensions are not: use those to jump to the end of
6176         //  the free range.
6177         address region_start = (address)minfo.BaseAddress;
6178         address region_end = region_start + minfo.RegionSize;
6179         assert(p >= region_start && p < region_end, "Sanity");
6180         p = region_end;
6181       }
6182     } else {
6183       // MSDN doc on VirtualQuery is unclear about what it means if it returns an error.
6184       //  In particular, whether querying an address outside any mappings would report
6185       //  a MEM_FREE region or just return an error. From experiments, it seems to return
6186       //  a MEM_FREE region for unmapped areas in valid address space and an error if we
6187       //  are outside valid address space.
6188       // Here, we advance the probe pointer by alloc granularity. But if the range to print
6189       //  is large, this may take a long time. Therefore lets stop right away if the address
6190       //  is outside of what we know are valid addresses on Windows. Also, add a loop fuse.
6191       static const address end_virt = (address)(LP64_ONLY(0x7ffffffffffULL) NOT_LP64(3*G));
6192       if (p >= end_virt) {
6193         break;
6194       } else {
6195         // Advance probe pointer, but with a fuse to break long loops.
6196         if (fuse++ == 100000) {
6197           break;
6198         }
6199         p += os::vm_allocation_granularity();
6200       }
6201     }
6202   }
6203 }
--- EOF ---