1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/moduleEntry.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmClasses.hpp" 30 #include "classfile/vmSymbols.hpp" 31 #include "code/codeCache.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "gc/shared/gcVMOperations.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "jvm.h" 37 #include "logging/log.hpp" 38 #include "logging/logStream.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/compressedOops.inline.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "prims/jvmtiAgent.hpp" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.hpp" 48 #include "runtime/frame.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/interfaceSupport.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/javaThread.hpp" 54 #include "runtime/jniHandles.hpp" 55 #include "runtime/mutexLocker.hpp" 56 #include "runtime/os.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/safefetch.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/threadCrashProtection.hpp" 61 #include "runtime/threadSMR.hpp" 62 #include "runtime/vmOperations.hpp" 63 #include "runtime/vm_version.hpp" 64 #include "sanitizers/address.hpp" 65 #include "services/attachListener.hpp" 66 #include "services/mallocTracker.hpp" 67 #include "services/mallocHeader.inline.hpp" 68 #include "services/memTracker.inline.hpp" 69 #include "services/nmtPreInit.hpp" 70 #include "services/nmtCommon.hpp" 71 #include "services/threadService.hpp" 72 #include "utilities/align.hpp" 73 #include "utilities/count_trailing_zeros.hpp" 74 #include "utilities/defaultStream.hpp" 75 #include "utilities/events.hpp" 76 #include "utilities/powerOfTwo.hpp" 77 78 #ifndef _WINDOWS 79 # include <poll.h> 80 #endif 81 82 # include <signal.h> 83 # include <errno.h> 84 85 OSThread* os::_starting_thread = nullptr; 86 volatile unsigned int os::_rand_seed = 1234567; 87 int os::_processor_count = 0; 88 int os::_initial_active_processor_count = 0; 89 os::PageSizes os::_page_sizes; 90 91 DEBUG_ONLY(bool os::_mutex_init_done = false;) 92 93 int os::snprintf(char* buf, size_t len, const char* fmt, ...) { 94 va_list args; 95 va_start(args, fmt); 96 int result = os::vsnprintf(buf, len, fmt, args); 97 va_end(args); 98 return result; 99 } 100 101 int os::snprintf_checked(char* buf, size_t len, const char* fmt, ...) { 102 va_list args; 103 va_start(args, fmt); 104 int result = os::vsnprintf(buf, len, fmt, args); 105 va_end(args); 106 assert(result >= 0, "os::snprintf error"); 107 assert(static_cast<size_t>(result) < len, "os::snprintf truncated"); 108 return result; 109 } 110 111 // Fill in buffer with current local time as an ISO-8601 string. 112 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. 113 // Returns buffer, or null if it failed. 114 char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) { 115 const jlong now = javaTimeMillis(); 116 return os::iso8601_time(now, buffer, buffer_length, utc); 117 } 118 119 // Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value 120 // E.g., yyyy-mm-ddThh:mm:ss-zzzz. 121 // Returns buffer, or null if it failed. 122 // This would mostly be a call to 123 // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....) 124 // except that on Windows the %z behaves badly, so we do it ourselves. 125 // Also, people wanted milliseconds on there, 126 // and strftime doesn't do milliseconds. 127 char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t buffer_length, bool utc) { 128 // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0" 129 130 // Sanity check the arguments 131 if (buffer == nullptr) { 132 assert(false, "null buffer"); 133 return nullptr; 134 } 135 if (buffer_length < os::iso8601_timestamp_size) { 136 assert(false, "buffer_length too small"); 137 return nullptr; 138 } 139 const int milliseconds_per_microsecond = 1000; 140 const time_t seconds_since_19700101 = 141 milliseconds_since_19700101 / milliseconds_per_microsecond; 142 const int milliseconds_after_second = 143 milliseconds_since_19700101 % milliseconds_per_microsecond; 144 // Convert the time value to a tm and timezone variable 145 struct tm time_struct; 146 if (utc) { 147 if (gmtime_pd(&seconds_since_19700101, &time_struct) == nullptr) { 148 assert(false, "Failed gmtime_pd"); 149 return nullptr; 150 } 151 } else { 152 if (localtime_pd(&seconds_since_19700101, &time_struct) == nullptr) { 153 assert(false, "Failed localtime_pd"); 154 return nullptr; 155 } 156 } 157 158 const time_t seconds_per_minute = 60; 159 const time_t minutes_per_hour = 60; 160 const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour; 161 162 // No offset when dealing with UTC 163 time_t UTC_to_local = 0; 164 if (!utc) { 165 #if defined(_ALLBSD_SOURCE) || defined(_GNU_SOURCE) 166 UTC_to_local = -(time_struct.tm_gmtoff); 167 #elif defined(_WINDOWS) 168 long zone; 169 _get_timezone(&zone); 170 UTC_to_local = static_cast<time_t>(zone); 171 #else 172 UTC_to_local = timezone; 173 #endif 174 175 // tm_gmtoff already includes adjustment for daylight saving 176 #if !defined(_ALLBSD_SOURCE) && !defined(_GNU_SOURCE) 177 // If daylight savings time is in effect, 178 // we are 1 hour East of our time zone 179 if (time_struct.tm_isdst > 0) { 180 UTC_to_local = UTC_to_local - seconds_per_hour; 181 } 182 #endif 183 } 184 185 // Compute the time zone offset. 186 // localtime_pd() sets timezone to the difference (in seconds) 187 // between UTC and local time. 188 // ISO 8601 says we need the difference between local time and UTC, 189 // we change the sign of the localtime_pd() result. 190 const time_t local_to_UTC = -(UTC_to_local); 191 // Then we have to figure out if if we are ahead (+) or behind (-) UTC. 192 char sign_local_to_UTC = '+'; 193 time_t abs_local_to_UTC = local_to_UTC; 194 if (local_to_UTC < 0) { 195 sign_local_to_UTC = '-'; 196 abs_local_to_UTC = -(abs_local_to_UTC); 197 } 198 // Convert time zone offset seconds to hours and minutes. 199 const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour); 200 const time_t zone_min = 201 ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute); 202 203 // Print an ISO 8601 date and time stamp into the buffer 204 const int year = 1900 + time_struct.tm_year; 205 const int month = 1 + time_struct.tm_mon; 206 const int printed = jio_snprintf(buffer, buffer_length, 207 "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d", 208 year, 209 month, 210 time_struct.tm_mday, 211 time_struct.tm_hour, 212 time_struct.tm_min, 213 time_struct.tm_sec, 214 milliseconds_after_second, 215 sign_local_to_UTC, 216 zone_hours, 217 zone_min); 218 if (printed == 0) { 219 assert(false, "Failed jio_printf"); 220 return nullptr; 221 } 222 return buffer; 223 } 224 225 OSReturn os::set_priority(Thread* thread, ThreadPriority p) { 226 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 227 228 if ((p >= MinPriority && p <= MaxPriority) || 229 (p == CriticalPriority && thread->is_ConcurrentGC_thread())) { 230 int priority = java_to_os_priority[p]; 231 return set_native_priority(thread, priority); 232 } else { 233 assert(false, "Should not happen"); 234 return OS_ERR; 235 } 236 } 237 238 // The mapping from OS priority back to Java priority may be inexact because 239 // Java priorities can map M:1 with native priorities. If you want the definite 240 // Java priority then use JavaThread::java_priority() 241 OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) { 242 int p; 243 int os_prio; 244 OSReturn ret = get_native_priority(thread, &os_prio); 245 if (ret != OS_OK) return ret; 246 247 if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) { 248 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ; 249 } else { 250 // niceness values are in reverse order 251 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ; 252 } 253 priority = (ThreadPriority)p; 254 return OS_OK; 255 } 256 257 bool os::dll_build_name(char* buffer, size_t size, const char* fname) { 258 int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX); 259 return (n != -1); 260 } 261 262 #if !defined(LINUX) && !defined(_WINDOWS) 263 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { 264 committed_start = start; 265 committed_size = size; 266 return true; 267 } 268 #endif 269 270 // Helper for dll_locate_lib. 271 // Pass buffer and printbuffer as we already printed the path to buffer 272 // when we called get_current_directory. This way we avoid another buffer 273 // of size MAX_PATH. 274 static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t printbuflen, 275 const char* pname, char lastchar, const char* fname) { 276 277 // Concatenate path and file name, but don't print double path separators. 278 const char *filesep = (WINDOWS_ONLY(lastchar == ':' ||) lastchar == os::file_separator()[0]) ? 279 "" : os::file_separator(); 280 int ret = jio_snprintf(printbuffer, printbuflen, "%s%s%s", pname, filesep, fname); 281 // Check whether file exists. 282 if (ret != -1) { 283 struct stat statbuf; 284 return os::stat(buffer, &statbuf) == 0; 285 } 286 return false; 287 } 288 289 // Frees all memory allocated on the heap for the 290 // supplied array of arrays of chars (a), where n 291 // is the number of elements in the array. 292 static void free_array_of_char_arrays(char** a, size_t n) { 293 while (n > 0) { 294 n--; 295 if (a[n] != nullptr) { 296 FREE_C_HEAP_ARRAY(char, a[n]); 297 } 298 } 299 FREE_C_HEAP_ARRAY(char*, a); 300 } 301 302 bool os::dll_locate_lib(char *buffer, size_t buflen, 303 const char* pname, const char* fname) { 304 bool retval = false; 305 306 size_t fullfnamelen = strlen(JNI_LIB_PREFIX) + strlen(fname) + strlen(JNI_LIB_SUFFIX); 307 char* fullfname = NEW_C_HEAP_ARRAY(char, fullfnamelen + 1, mtInternal); 308 if (dll_build_name(fullfname, fullfnamelen + 1, fname)) { 309 const size_t pnamelen = pname ? strlen(pname) : 0; 310 311 if (pnamelen == 0) { 312 // If no path given, use current working directory. 313 const char* p = get_current_directory(buffer, buflen); 314 if (p != nullptr) { 315 const size_t plen = strlen(buffer); 316 const char lastchar = buffer[plen - 1]; 317 retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen, 318 "", lastchar, fullfname); 319 } 320 } else if (strchr(pname, *os::path_separator()) != nullptr) { 321 // A list of paths. Search for the path that contains the library. 322 size_t n; 323 char** pelements = split_path(pname, &n, fullfnamelen); 324 if (pelements != nullptr) { 325 for (size_t i = 0; i < n; i++) { 326 char* path = pelements[i]; 327 // Really shouldn't be null, but check can't hurt. 328 size_t plen = (path == nullptr) ? 0 : strlen(path); 329 if (plen == 0) { 330 continue; // Skip the empty path values. 331 } 332 const char lastchar = path[plen - 1]; 333 retval = conc_path_file_and_check(buffer, buffer, buflen, path, lastchar, fullfname); 334 if (retval) break; 335 } 336 // Release the storage allocated by split_path. 337 free_array_of_char_arrays(pelements, n); 338 } 339 } else { 340 // A definite path. 341 const char lastchar = pname[pnamelen-1]; 342 retval = conc_path_file_and_check(buffer, buffer, buflen, pname, lastchar, fullfname); 343 } 344 } 345 346 FREE_C_HEAP_ARRAY(char*, fullfname); 347 return retval; 348 } 349 350 // --------------------- sun.misc.Signal (optional) --------------------- 351 352 353 // SIGBREAK is sent by the keyboard to query the VM state 354 #ifndef SIGBREAK 355 #define SIGBREAK SIGQUIT 356 #endif 357 358 // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread. 359 360 361 static void signal_thread_entry(JavaThread* thread, TRAPS) { 362 os::set_priority(thread, NearMaxPriority); 363 while (true) { 364 int sig; 365 { 366 // FIXME : Currently we have not decided what should be the status 367 // for this java thread blocked here. Once we decide about 368 // that we should fix this. 369 sig = os::signal_wait(); 370 } 371 if (sig == os::sigexitnum_pd()) { 372 // Terminate the signal thread 373 return; 374 } 375 376 switch (sig) { 377 case SIGBREAK: { 378 #if INCLUDE_SERVICES 379 // Check if the signal is a trigger to start the Attach Listener - in that 380 // case don't print stack traces. 381 if (!DisableAttachMechanism) { 382 // Attempt to transit state to AL_INITIALIZING. 383 AttachListenerState cur_state = AttachListener::transit_state(AL_INITIALIZING, AL_NOT_INITIALIZED); 384 if (cur_state == AL_INITIALIZING) { 385 // Attach Listener has been started to initialize. Ignore this signal. 386 continue; 387 } else if (cur_state == AL_NOT_INITIALIZED) { 388 // Start to initialize. 389 if (AttachListener::is_init_trigger()) { 390 // Attach Listener has been initialized. 391 // Accept subsequent request. 392 continue; 393 } else { 394 // Attach Listener could not be started. 395 // So we need to transit the state to AL_NOT_INITIALIZED. 396 AttachListener::set_state(AL_NOT_INITIALIZED); 397 } 398 } else if (AttachListener::check_socket_file()) { 399 // Attach Listener has been started, but unix domain socket file 400 // does not exist. So restart Attach Listener. 401 continue; 402 } 403 } 404 #endif 405 // Print stack traces 406 // Any SIGBREAK operations added here should make sure to flush 407 // the output stream (e.g. tty->flush()) after output. See 4803766. 408 // Each module also prints an extra carriage return after its output. 409 VM_PrintThreads op(tty, PrintConcurrentLocks, false /* no extended info */, true /* print JNI handle info */); 410 VMThread::execute(&op); 411 VM_FindDeadlocks op1(tty); 412 VMThread::execute(&op1); 413 Universe::print_heap_at_SIGBREAK(); 414 if (PrintClassHistogram) { 415 VM_GC_HeapInspection op1(tty, true /* force full GC before heap inspection */); 416 VMThread::execute(&op1); 417 } 418 if (JvmtiExport::should_post_data_dump()) { 419 JvmtiExport::post_data_dump(); 420 } 421 break; 422 } 423 default: { 424 // Dispatch the signal to java 425 HandleMark hm(THREAD); 426 Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD); 427 if (klass != nullptr) { 428 JavaValue result(T_VOID); 429 JavaCallArguments args; 430 args.push_int(sig); 431 JavaCalls::call_static( 432 &result, 433 klass, 434 vmSymbols::dispatch_name(), 435 vmSymbols::int_void_signature(), 436 &args, 437 THREAD 438 ); 439 } 440 if (HAS_PENDING_EXCEPTION) { 441 // tty is initialized early so we don't expect it to be null, but 442 // if it is we can't risk doing an initialization that might 443 // trigger additional out-of-memory conditions 444 if (tty != nullptr) { 445 char klass_name[256]; 446 char tmp_sig_name[16]; 447 const char* sig_name = "UNKNOWN"; 448 InstanceKlass::cast(PENDING_EXCEPTION->klass())-> 449 name()->as_klass_external_name(klass_name, 256); 450 if (os::exception_name(sig, tmp_sig_name, 16) != nullptr) 451 sig_name = tmp_sig_name; 452 warning("Exception %s occurred dispatching signal %s to handler" 453 "- the VM may need to be forcibly terminated", 454 klass_name, sig_name ); 455 } 456 CLEAR_PENDING_EXCEPTION; 457 } 458 } 459 } 460 } 461 } 462 463 void os::init_before_ergo() { 464 initialize_initial_active_processor_count(); 465 // We need to initialize large page support here because ergonomics takes some 466 // decisions depending on large page support and the calculated large page size. 467 large_page_init(); 468 469 StackOverflow::initialize_stack_zone_sizes(); 470 471 // VM version initialization identifies some characteristics of the 472 // platform that are used during ergonomic decisions. 473 VM_Version::init_before_ergo(); 474 } 475 476 void os::initialize_jdk_signal_support(TRAPS) { 477 if (!ReduceSignalUsage) { 478 // Setup JavaThread for processing signals 479 const char* name = "Signal Dispatcher"; 480 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK); 481 482 JavaThread* thread = new JavaThread(&signal_thread_entry); 483 JavaThread::vm_exit_on_osthread_failure(thread); 484 485 JavaThread::start_internal_daemon(THREAD, thread, thread_oop, NearMaxPriority); 486 } 487 } 488 489 490 void os::terminate_signal_thread() { 491 if (!ReduceSignalUsage) 492 signal_notify(sigexitnum_pd()); 493 } 494 495 496 // --------------------- loading libraries --------------------- 497 498 typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *); 499 extern struct JavaVM_ main_vm; 500 501 static void* _native_java_library = nullptr; 502 503 void* os::native_java_library() { 504 if (_native_java_library == nullptr) { 505 char buffer[JVM_MAXPATHLEN]; 506 char ebuf[1024]; 507 508 // Load java dll 509 if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), 510 "java")) { 511 _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf)); 512 } 513 if (_native_java_library == nullptr) { 514 vm_exit_during_initialization("Unable to load native library", ebuf); 515 } 516 517 #if defined(__OpenBSD__) 518 // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so 519 // ignore errors 520 if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), 521 "net")) { 522 dll_load(buffer, ebuf, sizeof(ebuf)); 523 } 524 #endif 525 } 526 return _native_java_library; 527 } 528 529 /* 530 * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists. 531 * If check_lib == true then we are looking for an 532 * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if 533 * this library is statically linked into the image. 534 * If check_lib == false then we will look for the appropriate symbol in the 535 * executable if agent_lib->is_static_lib() == true or in the shared library 536 * referenced by 'handle'. 537 */ 538 void* os::find_agent_function(JvmtiAgent *agent_lib, bool check_lib, 539 const char *syms[], size_t syms_len) { 540 assert(agent_lib != nullptr, "sanity check"); 541 const char *lib_name; 542 void *handle = agent_lib->os_lib(); 543 void *entryName = nullptr; 544 char *agent_function_name; 545 size_t i; 546 547 // If checking then use the agent name otherwise test is_static_lib() to 548 // see how to process this lookup 549 lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : nullptr); 550 for (i = 0; i < syms_len; i++) { 551 agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path()); 552 if (agent_function_name == nullptr) { 553 break; 554 } 555 entryName = dll_lookup(handle, agent_function_name); 556 FREE_C_HEAP_ARRAY(char, agent_function_name); 557 if (entryName != nullptr) { 558 break; 559 } 560 } 561 return entryName; 562 } 563 564 // See if the passed in agent is statically linked into the VM image. 565 bool os::find_builtin_agent(JvmtiAgent* agent, const char *syms[], 566 size_t syms_len) { 567 void *ret; 568 void *proc_handle; 569 void *save_handle; 570 571 assert(agent != nullptr, "sanity check"); 572 if (agent->name() == nullptr) { 573 return false; 574 } 575 proc_handle = get_default_process_handle(); 576 // Check for Agent_OnLoad/Attach_lib_name function 577 save_handle = agent->os_lib(); 578 // We want to look in this process' symbol table. 579 agent->set_os_lib(proc_handle); 580 ret = find_agent_function(agent, true, syms, syms_len); 581 if (ret != nullptr) { 582 // Found an entry point like Agent_OnLoad_lib_name so we have a static agent 583 agent->set_static_lib(); 584 agent->set_loaded(); 585 return true; 586 } 587 agent->set_os_lib(save_handle); 588 return false; 589 } 590 591 // --------------------- heap allocation utilities --------------------- 592 593 char *os::strdup(const char *str, MEMFLAGS flags) { 594 size_t size = strlen(str); 595 char *dup_str = (char *)malloc(size + 1, flags); 596 if (dup_str == nullptr) return nullptr; 597 strcpy(dup_str, str); 598 return dup_str; 599 } 600 601 char* os::strdup_check_oom(const char* str, MEMFLAGS flags) { 602 char* p = os::strdup(str, flags); 603 if (p == nullptr) { 604 vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom"); 605 } 606 return p; 607 } 608 609 #ifdef ASSERT 610 static void check_crash_protection() { 611 assert(!ThreadCrashProtection::is_crash_protected(Thread::current_or_null()), 612 "not allowed when crash protection is set"); 613 } 614 static void break_if_ptr_caught(void* ptr) { 615 if (p2i(ptr) == (intptr_t)MallocCatchPtr) { 616 log_warning(malloc, free)("ptr caught: " PTR_FORMAT, p2i(ptr)); 617 breakpoint(); 618 } 619 } 620 #endif // ASSERT 621 622 void* os::malloc(size_t size, MEMFLAGS flags) { 623 return os::malloc(size, flags, CALLER_PC); 624 } 625 626 void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { 627 628 // Special handling for NMT preinit phase before arguments are parsed 629 void* rc = nullptr; 630 if (NMTPreInit::handle_malloc(&rc, size)) { 631 // No need to fill with 0 because DumpSharedSpaces doesn't use these 632 // early allocations. 633 return rc; 634 } 635 636 DEBUG_ONLY(check_crash_protection()); 637 638 // On malloc(0), implementations of malloc(3) have the choice to return either 639 // null or a unique non-null pointer. To unify libc behavior across our platforms 640 // we chose the latter. 641 size = MAX2((size_t)1, size); 642 643 // Observe MallocLimit 644 if (MemTracker::check_exceeds_limit(size, memflags)) { 645 return nullptr; 646 } 647 648 const size_t outer_size = size + MemTracker::overhead_per_malloc(); 649 650 // Check for overflow. 651 if (outer_size < size) { 652 return nullptr; 653 } 654 655 ALLOW_C_FUNCTION(::malloc, void* const outer_ptr = ::malloc(outer_size);) 656 if (outer_ptr == nullptr) { 657 return nullptr; 658 } 659 660 void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack); 661 662 if (DumpSharedSpaces) { 663 // Need to deterministically fill all the alignment gaps in C++ structures. 664 ::memset(inner_ptr, 0, size); 665 } else { 666 DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);) 667 } 668 DEBUG_ONLY(break_if_ptr_caught(inner_ptr);) 669 return inner_ptr; 670 } 671 672 void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { 673 return os::realloc(memblock, size, flags, CALLER_PC); 674 } 675 676 void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { 677 678 // Special handling for NMT preinit phase before arguments are parsed 679 void* rc = nullptr; 680 if (NMTPreInit::handle_realloc(&rc, memblock, size, memflags)) { 681 return rc; 682 } 683 684 if (memblock == nullptr) { 685 return os::malloc(size, memflags, stack); 686 } 687 688 DEBUG_ONLY(check_crash_protection()); 689 690 // On realloc(p, 0), implementers of realloc(3) have the choice to return either 691 // null or a unique non-null pointer. To unify libc behavior across our platforms 692 // we chose the latter. 693 size = MAX2((size_t)1, size); 694 695 if (MemTracker::enabled()) { 696 // NMT realloc handling 697 698 const size_t new_outer_size = size + MemTracker::overhead_per_malloc(); 699 700 // Handle size overflow. 701 if (new_outer_size < size) { 702 return nullptr; 703 } 704 705 const size_t old_size = MallocTracker::malloc_header(memblock)->size(); 706 707 // Observe MallocLimit 708 if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, memflags)) { 709 return nullptr; 710 } 711 712 // Perform integrity checks on and mark the old block as dead *before* calling the real realloc(3) since it 713 // may invalidate the old block, including its header. 714 MallocHeader* header = MallocHeader::resolve_checked(memblock); 715 assert(memflags == header->flags(), "weird NMT flags mismatch (new:\"%s\" != old:\"%s\")\n", 716 NMTUtil::flag_to_name(memflags), NMTUtil::flag_to_name(header->flags())); 717 const MallocHeader::FreeInfo free_info = header->free_info(); 718 719 header->mark_block_as_dead(); 720 721 // the real realloc 722 ALLOW_C_FUNCTION(::realloc, void* const new_outer_ptr = ::realloc(header, new_outer_size);) 723 724 if (new_outer_ptr == nullptr) { 725 // realloc(3) failed and the block still exists. 726 // We have however marked it as dead, revert this change. 727 header->revive(); 728 return nullptr; 729 } 730 // realloc(3) succeeded, variable header now points to invalid memory and we need to deaccount the old block. 731 MemTracker::deaccount(free_info); 732 733 // After a successful realloc(3), we account the resized block with its new size 734 // to NMT. 735 void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack); 736 737 #ifdef ASSERT 738 assert(old_size == free_info.size, "Sanity"); 739 if (old_size < size) { 740 // We also zap the newly extended region. 741 ::memset((char*)new_inner_ptr + old_size, uninitBlockPad, size - old_size); 742 } 743 #endif 744 745 rc = new_inner_ptr; 746 747 } else { 748 749 // NMT disabled. 750 ALLOW_C_FUNCTION(::realloc, rc = ::realloc(memblock, size);) 751 if (rc == nullptr) { 752 return nullptr; 753 } 754 755 } 756 757 DEBUG_ONLY(break_if_ptr_caught(rc);) 758 759 return rc; 760 } 761 762 void os::free(void *memblock) { 763 764 // Special handling for NMT preinit phase before arguments are parsed 765 if (NMTPreInit::handle_free(memblock)) { 766 return; 767 } 768 769 if (memblock == nullptr) { 770 return; 771 } 772 773 DEBUG_ONLY(break_if_ptr_caught(memblock);) 774 775 // When NMT is enabled this checks for heap overwrites, then deaccounts the old block. 776 void* const old_outer_ptr = MemTracker::record_free(memblock); 777 778 ALLOW_C_FUNCTION(::free, ::free(old_outer_ptr);) 779 } 780 781 void os::init_random(unsigned int initval) { 782 _rand_seed = initval; 783 } 784 785 786 int os::next_random(unsigned int rand_seed) { 787 /* standard, well-known linear congruential random generator with 788 * next_rand = (16807*seed) mod (2**31-1) 789 * see 790 * (1) "Random Number Generators: Good Ones Are Hard to Find", 791 * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988), 792 * (2) "Two Fast Implementations of the 'Minimal Standard' Random 793 * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88. 794 */ 795 const unsigned int a = 16807; 796 const unsigned int m = 2147483647; 797 const int q = m / a; assert(q == 127773, "weird math"); 798 const int r = m % a; assert(r == 2836, "weird math"); 799 800 // compute az=2^31p+q 801 unsigned int lo = a * (rand_seed & 0xFFFF); 802 unsigned int hi = a * (rand_seed >> 16); 803 lo += (hi & 0x7FFF) << 16; 804 805 // if q overflowed, ignore the overflow and increment q 806 if (lo > m) { 807 lo &= m; 808 ++lo; 809 } 810 lo += hi >> 15; 811 812 // if (p+q) overflowed, ignore the overflow and increment (p+q) 813 if (lo > m) { 814 lo &= m; 815 ++lo; 816 } 817 return lo; 818 } 819 820 int os::random() { 821 // Make updating the random seed thread safe. 822 while (true) { 823 unsigned int seed = _rand_seed; 824 unsigned int rand = next_random(seed); 825 if (Atomic::cmpxchg(&_rand_seed, seed, rand, memory_order_relaxed) == seed) { 826 return static_cast<int>(rand); 827 } 828 } 829 } 830 831 // The INITIALIZED state is distinguished from the SUSPENDED state because the 832 // conditions in which a thread is first started are different from those in which 833 // a suspension is resumed. These differences make it hard for us to apply the 834 // tougher checks when starting threads that we want to do when resuming them. 835 // However, when start_thread is called as a result of Thread.start, on a Java 836 // thread, the operation is synchronized on the Java Thread object. So there 837 // cannot be a race to start the thread and hence for the thread to exit while 838 // we are working on it. Non-Java threads that start Java threads either have 839 // to do so in a context in which races are impossible, or should do appropriate 840 // locking. 841 842 void os::start_thread(Thread* thread) { 843 OSThread* osthread = thread->osthread(); 844 osthread->set_state(RUNNABLE); 845 pd_start_thread(thread); 846 } 847 848 void os::abort(bool dump_core) { 849 abort(dump_core && CreateCoredumpOnCrash, nullptr, nullptr); 850 } 851 852 //--------------------------------------------------------------------------- 853 // Helper functions for fatal error handler 854 855 bool os::print_function_and_library_name(outputStream* st, 856 address addr, 857 char* buf, int buflen, 858 bool shorten_paths, 859 bool demangle, 860 bool strip_arguments) { 861 // If no scratch buffer given, allocate one here on stack. 862 // (used during error handling; its a coin toss, really, if on-stack allocation 863 // is worse than (raw) C-heap allocation in that case). 864 char* p = buf; 865 if (p == nullptr) { 866 p = (char*)::alloca(O_BUFLEN); 867 buflen = O_BUFLEN; 868 } 869 int offset = 0; 870 bool have_function_name = dll_address_to_function_name(addr, p, buflen, 871 &offset, demangle); 872 bool is_function_descriptor = false; 873 #ifdef HAVE_FUNCTION_DESCRIPTORS 874 // When we deal with a function descriptor instead of a real code pointer, try to 875 // resolve it. There is a small chance that a random pointer given to this function 876 // may just happen to look like a valid descriptor, but this is rare and worth the 877 // risk to see resolved function names. But we will print a little suffix to mark 878 // this as a function descriptor for the reader (see below). 879 if (!have_function_name && os::is_readable_pointer(addr)) { 880 address addr2 = (address)os::resolve_function_descriptor(addr); 881 if (have_function_name = is_function_descriptor = 882 dll_address_to_function_name(addr2, p, buflen, &offset, demangle)) { 883 addr = addr2; 884 } 885 } 886 #endif // HAVE_FUNCTION_DESCRIPTORS 887 888 if (have_function_name) { 889 // Print function name, optionally demangled 890 if (demangle && strip_arguments) { 891 char* args_start = strchr(p, '('); 892 if (args_start != nullptr) { 893 *args_start = '\0'; 894 } 895 } 896 // Print offset. Omit printing if offset is zero, which makes the output 897 // more readable if we print function pointers. 898 if (offset == 0) { 899 st->print("%s", p); 900 } else { 901 st->print("%s+%d", p, offset); 902 } 903 } else { 904 st->print(PTR_FORMAT, p2i(addr)); 905 } 906 offset = 0; 907 908 const bool have_library_name = dll_address_to_library_name(addr, p, buflen, &offset); 909 if (have_library_name) { 910 // Cut path parts 911 if (shorten_paths) { 912 char* p2 = strrchr(p, os::file_separator()[0]); 913 if (p2 != nullptr) { 914 p = p2 + 1; 915 } 916 } 917 st->print(" in %s", p); 918 if (!have_function_name) { // Omit offset if we already printed the function offset 919 st->print("+%d", offset); 920 } 921 } 922 923 // Write a trailing marker if this was a function descriptor 924 if (have_function_name && is_function_descriptor) { 925 st->print_raw(" (FD)"); 926 } 927 928 return have_function_name || have_library_name; 929 } 930 931 ATTRIBUTE_NO_ASAN static void print_hex_readable_pointer(outputStream* st, address p, 932 int unitsize) { 933 switch (unitsize) { 934 case 1: st->print("%02x", *(u1*)p); break; 935 case 2: st->print("%04x", *(u2*)p); break; 936 case 4: st->print("%08x", *(u4*)p); break; 937 case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break; 938 } 939 } 940 941 void os::print_hex_dump(outputStream* st, address start, address end, int unitsize, 942 int bytes_per_line, address logical_start) { 943 assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking"); 944 945 start = align_down(start, unitsize); 946 logical_start = align_down(logical_start, unitsize); 947 bytes_per_line = align_up(bytes_per_line, 8); 948 949 int cols = 0; 950 int cols_per_line = bytes_per_line / unitsize; 951 952 address p = start; 953 address logical_p = logical_start; 954 955 // Print out the addresses as if we were starting from logical_start. 956 st->print(PTR_FORMAT ": ", p2i(logical_p)); 957 while (p < end) { 958 if (is_readable_pointer(p)) { 959 print_hex_readable_pointer(st, p, unitsize); 960 } else { 961 st->print("%*.*s", 2*unitsize, 2*unitsize, "????????????????"); 962 } 963 p += unitsize; 964 logical_p += unitsize; 965 cols++; 966 if (cols >= cols_per_line && p < end) { 967 cols = 0; 968 st->cr(); 969 st->print(PTR_FORMAT ": ", p2i(logical_p)); 970 } else { 971 st->print(" "); 972 } 973 } 974 st->cr(); 975 } 976 977 void os::print_dhm(outputStream* st, const char* startStr, long sec) { 978 long days = sec/86400; 979 long hours = (sec/3600) - (days * 24); 980 long minutes = (sec/60) - (days * 1440) - (hours * 60); 981 if (startStr == nullptr) startStr = ""; 982 st->print_cr("%s %ld days %ld:%02ld hours", startStr, days, hours, minutes); 983 } 984 985 void os::print_tos(outputStream* st, address sp) { 986 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); 987 print_hex_dump(st, sp, sp + 512, sizeof(intptr_t)); 988 } 989 990 void os::print_instructions(outputStream* st, address pc, int unitsize) { 991 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); 992 print_hex_dump(st, pc - 256, pc + 256, unitsize); 993 } 994 995 void os::print_environment_variables(outputStream* st, const char** env_list) { 996 if (env_list) { 997 st->print_cr("Environment Variables:"); 998 999 for (int i = 0; env_list[i] != nullptr; i++) { 1000 char *envvar = ::getenv(env_list[i]); 1001 if (envvar != nullptr) { 1002 st->print("%s", env_list[i]); 1003 st->print("="); 1004 st->print("%s", envvar); 1005 // Use separate cr() printing to avoid unnecessary buffer operations that might cause truncation. 1006 st->cr(); 1007 } 1008 } 1009 } 1010 } 1011 1012 void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1013 // cpu 1014 st->print("CPU:"); 1015 #if defined(__APPLE__) && !defined(ZERO) 1016 if (VM_Version::is_cpu_emulated()) { 1017 st->print(" (EMULATED)"); 1018 } 1019 #endif 1020 st->print(" total %d", os::processor_count()); 1021 // It's not safe to query number of active processors after crash 1022 // st->print("(active %d)", os::active_processor_count()); but we can 1023 // print the initial number of active processors. 1024 // We access the raw value here because the assert in the accessor will 1025 // fail if the crash occurs before initialization of this value. 1026 st->print(" (initial active %d)", _initial_active_processor_count); 1027 st->print(" %s", VM_Version::features_string()); 1028 st->cr(); 1029 pd_print_cpu_info(st, buf, buflen); 1030 } 1031 1032 // Print a one line string summarizing the cpu, number of cores, memory, and operating system version 1033 void os::print_summary_info(outputStream* st, char* buf, size_t buflen) { 1034 st->print("Host: "); 1035 #ifndef PRODUCT 1036 if (get_host_name(buf, buflen)) { 1037 st->print("%s, ", buf); 1038 } 1039 #endif // PRODUCT 1040 get_summary_cpu_info(buf, buflen); 1041 st->print("%s, ", buf); 1042 size_t mem = physical_memory()/G; 1043 if (mem == 0) { // for low memory systems 1044 mem = physical_memory()/M; 1045 st->print("%d cores, " SIZE_FORMAT "M, ", processor_count(), mem); 1046 } else { 1047 st->print("%d cores, " SIZE_FORMAT "G, ", processor_count(), mem); 1048 } 1049 get_summary_os_info(buf, buflen); 1050 st->print_raw(buf); 1051 st->cr(); 1052 } 1053 1054 void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) { 1055 const int secs_per_day = 86400; 1056 const int secs_per_hour = 3600; 1057 const int secs_per_min = 60; 1058 1059 time_t tloc; 1060 (void)time(&tloc); 1061 char* timestring = ctime(&tloc); // ctime adds newline. 1062 // edit out the newline 1063 char* nl = strchr(timestring, '\n'); 1064 if (nl != nullptr) { 1065 *nl = '\0'; 1066 } 1067 1068 struct tm tz; 1069 if (localtime_pd(&tloc, &tz) != nullptr) { 1070 wchar_t w_buf[80]; 1071 size_t n = ::wcsftime(w_buf, 80, L"%Z", &tz); 1072 if (n > 0) { 1073 ::wcstombs(buf, w_buf, buflen); 1074 st->print("Time: %s %s", timestring, buf); 1075 } else { 1076 st->print("Time: %s", timestring); 1077 } 1078 } else { 1079 st->print("Time: %s", timestring); 1080 } 1081 1082 double t = os::elapsedTime(); 1083 // NOTE: a crash using printf("%f",...) on Linux was historically noted here. 1084 int eltime = (int)t; // elapsed time in seconds 1085 int eltimeFraction = (int) ((t - eltime) * 1000000); 1086 1087 // print elapsed time in a human-readable format: 1088 int eldays = eltime / secs_per_day; 1089 int day_secs = eldays * secs_per_day; 1090 int elhours = (eltime - day_secs) / secs_per_hour; 1091 int hour_secs = elhours * secs_per_hour; 1092 int elmins = (eltime - day_secs - hour_secs) / secs_per_min; 1093 int minute_secs = elmins * secs_per_min; 1094 int elsecs = (eltime - day_secs - hour_secs - minute_secs); 1095 st->print_cr(" elapsed time: %d.%06d seconds (%dd %dh %dm %ds)", eltime, eltimeFraction, eldays, elhours, elmins, elsecs); 1096 } 1097 1098 1099 // Check if pointer can be read from (4-byte read access). 1100 // Helps to prove validity of a non-null pointer. 1101 // Returns true in very early stages of VM life when stub is not yet generated. 1102 bool os::is_readable_pointer(const void* p) { 1103 int* const aligned = (int*) align_down((intptr_t)p, 4); 1104 int cafebabe = 0xcafebabe; // tester value 1 1105 int deadbeef = 0xdeadbeef; // tester value 2 1106 return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef); 1107 } 1108 1109 bool os::is_readable_range(const void* from, const void* to) { 1110 if ((uintptr_t)from >= (uintptr_t)to) return false; 1111 for (uintptr_t p = align_down((uintptr_t)from, min_page_size()); p < (uintptr_t)to; p += min_page_size()) { 1112 if (!is_readable_pointer((const void*)p)) { 1113 return false; 1114 } 1115 } 1116 return true; 1117 } 1118 1119 1120 // moved from debug.cpp (used to be find()) but still called from there 1121 // The verbose parameter is only set by the debug code in one case 1122 void os::print_location(outputStream* st, intptr_t x, bool verbose) { 1123 address addr = (address)x; 1124 // Handle null first, so later checks don't need to protect against it. 1125 if (addr == nullptr) { 1126 st->print_cr("0x0 is nullptr"); 1127 return; 1128 } 1129 1130 // Check if addr points into a code blob. 1131 CodeBlob* b = CodeCache::find_blob(addr); 1132 if (b != nullptr) { 1133 b->dump_for_addr(addr, st, verbose); 1134 return; 1135 } 1136 1137 // Check if addr points into Java heap. 1138 if (Universe::heap()->print_location(st, addr)) { 1139 return; 1140 } 1141 1142 bool accessible = is_readable_pointer(addr); 1143 1144 // Check if addr is a JNI handle. 1145 if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) { 1146 if (JNIHandles::is_global_handle((jobject) addr)) { 1147 st->print_cr(INTPTR_FORMAT " is a global jni handle", p2i(addr)); 1148 return; 1149 } 1150 if (JNIHandles::is_weak_global_handle((jobject) addr)) { 1151 st->print_cr(INTPTR_FORMAT " is a weak global jni handle", p2i(addr)); 1152 return; 1153 } 1154 } 1155 1156 // Check if addr belongs to a Java thread. 1157 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 1158 // If the addr is a java thread print information about that. 1159 if (addr == (address)thread) { 1160 if (verbose) { 1161 thread->print_on(st); 1162 } else { 1163 st->print_cr(INTPTR_FORMAT " is a thread", p2i(addr)); 1164 } 1165 return; 1166 } 1167 // If the addr is in the stack region for this thread then report that 1168 // and print thread info 1169 if (thread->is_in_full_stack(addr)) { 1170 st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: " 1171 INTPTR_FORMAT, p2i(addr), p2i(thread)); 1172 if (verbose) thread->print_on(st); 1173 return; 1174 } 1175 } 1176 1177 // Check if in metaspace and print types that have vptrs 1178 if (Metaspace::contains(addr)) { 1179 if (Klass::is_valid((Klass*)addr)) { 1180 st->print_cr(INTPTR_FORMAT " is a pointer to class: ", p2i(addr)); 1181 ((Klass*)addr)->print_on(st); 1182 } else if (Method::is_valid_method((const Method*)addr)) { 1183 ((Method*)addr)->print_value_on(st); 1184 st->cr(); 1185 } else { 1186 // Use addr->print() from the debugger instead (not here) 1187 st->print_cr(INTPTR_FORMAT " is pointing into metadata", p2i(addr)); 1188 } 1189 return; 1190 } 1191 1192 // Compressed klass needs to be decoded first. 1193 #ifdef _LP64 1194 if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) { 1195 narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr; 1196 Klass* k = CompressedKlassPointers::decode_raw(narrow_klass); 1197 1198 if (Klass::is_valid(k)) { 1199 st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k)); 1200 k->print_on(st); 1201 return; 1202 } 1203 } 1204 #endif 1205 1206 // Still nothing? If NMT is enabled, we can ask what it thinks... 1207 if (MemTracker::print_containing_region(addr, st)) { 1208 return; 1209 } 1210 1211 // Try an OS specific find 1212 if (os::find(addr, st)) { 1213 return; 1214 } 1215 1216 if (accessible) { 1217 st->print(INTPTR_FORMAT " points into unknown readable memory:", p2i(addr)); 1218 if (is_aligned(addr, sizeof(intptr_t))) { 1219 st->print(" " PTR_FORMAT " |", *(intptr_t*)addr); 1220 } 1221 for (address p = addr; p < align_up(addr + 1, sizeof(intptr_t)); ++p) { 1222 st->print(" %02x", *(u1*)p); 1223 } 1224 st->cr(); 1225 return; 1226 } 1227 1228 st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr)); 1229 } 1230 1231 bool is_pointer_bad(intptr_t* ptr) { 1232 return !is_aligned(ptr, sizeof(uintptr_t)) || !os::is_readable_pointer(ptr); 1233 } 1234 1235 // Looks like all platforms can use the same function to check if C 1236 // stack is walkable beyond current frame. 1237 // Returns true if this is not the case, i.e. the frame is possibly 1238 // the first C frame on the stack. 1239 bool os::is_first_C_frame(frame* fr) { 1240 1241 #ifdef _WINDOWS 1242 return true; // native stack isn't walkable on windows this way. 1243 #endif 1244 // Load up sp, fp, sender sp and sender fp, check for reasonable values. 1245 // Check usp first, because if that's bad the other accessors may fault 1246 // on some architectures. Ditto ufp second, etc. 1247 1248 if (is_pointer_bad(fr->sp())) return true; 1249 1250 uintptr_t ufp = (uintptr_t)fr->fp(); 1251 if (is_pointer_bad(fr->fp())) return true; 1252 1253 uintptr_t old_sp = (uintptr_t)fr->sender_sp(); 1254 if ((uintptr_t)fr->sender_sp() == (uintptr_t)-1 || is_pointer_bad(fr->sender_sp())) return true; 1255 1256 uintptr_t old_fp = (uintptr_t)fr->link_or_null(); 1257 if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp || 1258 is_pointer_bad(fr->link_or_null())) return true; 1259 1260 // stack grows downwards; if old_fp is below current fp or if the stack 1261 // frame is too large, either the stack is corrupted or fp is not saved 1262 // on stack (i.e. on x86, ebp may be used as general register). The stack 1263 // is not walkable beyond current frame. 1264 if (old_fp < ufp) return true; 1265 if (old_fp - ufp > 64 * K) return true; 1266 1267 return false; 1268 } 1269 1270 // Set up the boot classpath. 1271 1272 char* os::format_boot_path(const char* format_string, 1273 const char* home, 1274 int home_len, 1275 char fileSep, 1276 char pathSep) { 1277 assert((fileSep == '/' && pathSep == ':') || 1278 (fileSep == '\\' && pathSep == ';'), "unexpected separator chars"); 1279 1280 // Scan the format string to determine the length of the actual 1281 // boot classpath, and handle platform dependencies as well. 1282 int formatted_path_len = 0; 1283 const char* p; 1284 for (p = format_string; *p != 0; ++p) { 1285 if (*p == '%') formatted_path_len += home_len - 1; 1286 ++formatted_path_len; 1287 } 1288 1289 char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal); 1290 1291 // Create boot classpath from format, substituting separator chars and 1292 // java home directory. 1293 char* q = formatted_path; 1294 for (p = format_string; *p != 0; ++p) { 1295 switch (*p) { 1296 case '%': 1297 strcpy(q, home); 1298 q += home_len; 1299 break; 1300 case '/': 1301 *q++ = fileSep; 1302 break; 1303 case ':': 1304 *q++ = pathSep; 1305 break; 1306 default: 1307 *q++ = *p; 1308 } 1309 } 1310 *q = '\0'; 1311 1312 assert((q - formatted_path) == formatted_path_len, "formatted_path size botched"); 1313 return formatted_path; 1314 } 1315 1316 // This function is a proxy to fopen, it tries to add a non standard flag ('e' or 'N') 1317 // that ensures automatic closing of the file on exec. If it can not find support in 1318 // the underlying c library, it will make an extra system call (fcntl) to ensure automatic 1319 // closing of the file on exec. 1320 FILE* os::fopen(const char* path, const char* mode) { 1321 char modified_mode[20]; 1322 assert(strlen(mode) + 1 < sizeof(modified_mode), "mode chars plus one extra must fit in buffer"); 1323 os::snprintf_checked(modified_mode, sizeof(modified_mode), "%s" LINUX_ONLY("e") BSD_ONLY("e") WINDOWS_ONLY("N"), mode); 1324 FILE* file = ::fopen(path, modified_mode); 1325 1326 #if !(defined LINUX || defined BSD || defined _WINDOWS) 1327 // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N' 1328 // is not supported as mode in fopen 1329 if (file != nullptr) { 1330 int fd = fileno(file); 1331 if (fd != -1) { 1332 int fd_flags = fcntl(fd, F_GETFD); 1333 if (fd_flags != -1) { 1334 fcntl(fd, F_SETFD, fd_flags | FD_CLOEXEC); 1335 } 1336 } 1337 } 1338 #endif 1339 1340 return file; 1341 } 1342 1343 bool os::set_boot_path(char fileSep, char pathSep) { 1344 const char* home = Arguments::get_java_home(); 1345 int home_len = (int)strlen(home); 1346 1347 struct stat st; 1348 1349 // modular image if "modules" jimage exists 1350 char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep); 1351 if (jimage == nullptr) return false; 1352 bool has_jimage = (os::stat(jimage, &st) == 0); 1353 if (has_jimage) { 1354 Arguments::set_boot_class_path(jimage, true); 1355 FREE_C_HEAP_ARRAY(char, jimage); 1356 return true; 1357 } 1358 FREE_C_HEAP_ARRAY(char, jimage); 1359 1360 // check if developer build with exploded modules 1361 char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep); 1362 if (base_classes == nullptr) return false; 1363 if (os::stat(base_classes, &st) == 0) { 1364 Arguments::set_boot_class_path(base_classes, false); 1365 FREE_C_HEAP_ARRAY(char, base_classes); 1366 return true; 1367 } 1368 FREE_C_HEAP_ARRAY(char, base_classes); 1369 1370 return false; 1371 } 1372 1373 bool os::file_exists(const char* filename) { 1374 struct stat statbuf; 1375 if (filename == nullptr || strlen(filename) == 0) { 1376 return false; 1377 } 1378 return os::stat(filename, &statbuf) == 0; 1379 } 1380 1381 // Splits a path, based on its separator, the number of 1382 // elements is returned back in "elements". 1383 // file_name_length is used as a modifier for each path's 1384 // length when compared to JVM_MAXPATHLEN. So if you know 1385 // each returned path will have something appended when 1386 // in use, you can pass the length of that in 1387 // file_name_length, to ensure we detect if any path 1388 // exceeds the maximum path length once prepended onto 1389 // the sub-path/file name. 1390 // It is the callers responsibility to: 1391 // a> check the value of "elements", which may be 0. 1392 // b> ignore any empty path elements 1393 // c> free up the data. 1394 char** os::split_path(const char* path, size_t* elements, size_t file_name_length) { 1395 *elements = (size_t)0; 1396 if (path == nullptr || strlen(path) == 0 || file_name_length == (size_t)nullptr) { 1397 return nullptr; 1398 } 1399 const char psepchar = *os::path_separator(); 1400 char* inpath = NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal); 1401 strcpy(inpath, path); 1402 size_t count = 1; 1403 char* p = strchr(inpath, psepchar); 1404 // Get a count of elements to allocate memory 1405 while (p != nullptr) { 1406 count++; 1407 p++; 1408 p = strchr(p, psepchar); 1409 } 1410 1411 char** opath = NEW_C_HEAP_ARRAY(char*, count, mtInternal); 1412 1413 // do the actual splitting 1414 p = inpath; 1415 for (size_t i = 0 ; i < count ; i++) { 1416 size_t len = strcspn(p, os::path_separator()); 1417 if (len + file_name_length > JVM_MAXPATHLEN) { 1418 // release allocated storage before exiting the vm 1419 free_array_of_char_arrays(opath, i++); 1420 vm_exit_during_initialization("The VM tried to use a path that exceeds the maximum path length for " 1421 "this system. Review path-containing parameters and properties, such as " 1422 "sun.boot.library.path, to identify potential sources for this path."); 1423 } 1424 // allocate the string and add terminator storage 1425 char* s = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); 1426 strncpy(s, p, len); 1427 s[len] = '\0'; 1428 opath[i] = s; 1429 p += len + 1; 1430 } 1431 FREE_C_HEAP_ARRAY(char, inpath); 1432 *elements = count; 1433 return opath; 1434 } 1435 1436 // Returns true if the current stack pointer is above the stack shadow 1437 // pages, false otherwise. 1438 bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp) { 1439 if (!thread->is_Java_thread()) return false; 1440 // Check if we have StackShadowPages above the guard zone. This parameter 1441 // is dependent on the depth of the maximum VM call stack possible from 1442 // the handler for stack overflow. 'instanceof' in the stack overflow 1443 // handler or a println uses at least 8k stack of VM and native code 1444 // respectively. 1445 const int framesize_in_bytes = 1446 Interpreter::size_top_interpreter_activation(method()) * wordSize; 1447 1448 address limit = JavaThread::cast(thread)->stack_overflow_state()->shadow_zone_safe_limit(); 1449 return sp > (limit + framesize_in_bytes); 1450 } 1451 1452 size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) { 1453 assert(min_pages > 0, "sanity"); 1454 if (UseLargePages) { 1455 const size_t max_page_size = region_size / min_pages; 1456 1457 for (size_t page_size = page_sizes().largest(); page_size != 0; 1458 page_size = page_sizes().next_smaller(page_size)) { 1459 if (page_size <= max_page_size) { 1460 if (!must_be_aligned || is_aligned(region_size, page_size)) { 1461 return page_size; 1462 } 1463 } 1464 } 1465 } 1466 1467 return vm_page_size(); 1468 } 1469 1470 size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) { 1471 return page_size_for_region(region_size, min_pages, true); 1472 } 1473 1474 size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) { 1475 return page_size_for_region(region_size, min_pages, false); 1476 } 1477 1478 #ifndef MAX_PATH 1479 #define MAX_PATH (2 * K) 1480 #endif 1481 1482 void os::pause() { 1483 char filename[MAX_PATH]; 1484 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 1485 jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); 1486 } else { 1487 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 1488 } 1489 1490 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 1491 if (fd != -1) { 1492 struct stat buf; 1493 ::close(fd); 1494 while (::stat(filename, &buf) == 0) { 1495 #if defined(_WINDOWS) 1496 Sleep(100); 1497 #else 1498 (void)::poll(nullptr, 0, 100); 1499 #endif 1500 } 1501 } else { 1502 jio_fprintf(stderr, 1503 "Could not open pause file '%s', continuing immediately.\n", filename); 1504 } 1505 } 1506 1507 static const char* errno_to_string (int e, bool short_text) { 1508 #define ALL_SHARED_ENUMS(X) \ 1509 X(E2BIG, "Argument list too long") \ 1510 X(EACCES, "Permission denied") \ 1511 X(EADDRINUSE, "Address in use") \ 1512 X(EADDRNOTAVAIL, "Address not available") \ 1513 X(EAFNOSUPPORT, "Address family not supported") \ 1514 X(EAGAIN, "Resource unavailable, try again") \ 1515 X(EALREADY, "Connection already in progress") \ 1516 X(EBADF, "Bad file descriptor") \ 1517 X(EBADMSG, "Bad message") \ 1518 X(EBUSY, "Device or resource busy") \ 1519 X(ECANCELED, "Operation canceled") \ 1520 X(ECHILD, "No child processes") \ 1521 X(ECONNABORTED, "Connection aborted") \ 1522 X(ECONNREFUSED, "Connection refused") \ 1523 X(ECONNRESET, "Connection reset") \ 1524 X(EDEADLK, "Resource deadlock would occur") \ 1525 X(EDESTADDRREQ, "Destination address required") \ 1526 X(EDOM, "Mathematics argument out of domain of function") \ 1527 X(EEXIST, "File exists") \ 1528 X(EFAULT, "Bad address") \ 1529 X(EFBIG, "File too large") \ 1530 X(EHOSTUNREACH, "Host is unreachable") \ 1531 X(EIDRM, "Identifier removed") \ 1532 X(EILSEQ, "Illegal byte sequence") \ 1533 X(EINPROGRESS, "Operation in progress") \ 1534 X(EINTR, "Interrupted function") \ 1535 X(EINVAL, "Invalid argument") \ 1536 X(EIO, "I/O error") \ 1537 X(EISCONN, "Socket is connected") \ 1538 X(EISDIR, "Is a directory") \ 1539 X(ELOOP, "Too many levels of symbolic links") \ 1540 X(EMFILE, "Too many open files") \ 1541 X(EMLINK, "Too many links") \ 1542 X(EMSGSIZE, "Message too large") \ 1543 X(ENAMETOOLONG, "Filename too long") \ 1544 X(ENETDOWN, "Network is down") \ 1545 X(ENETRESET, "Connection aborted by network") \ 1546 X(ENETUNREACH, "Network unreachable") \ 1547 X(ENFILE, "Too many files open in system") \ 1548 X(ENOBUFS, "No buffer space available") \ 1549 X(ENODATA, "No message is available on the STREAM head read queue") \ 1550 X(ENODEV, "No such device") \ 1551 X(ENOENT, "No such file or directory") \ 1552 X(ENOEXEC, "Executable file format error") \ 1553 X(ENOLCK, "No locks available") \ 1554 X(ENOLINK, "Reserved") \ 1555 X(ENOMEM, "Not enough space") \ 1556 X(ENOMSG, "No message of the desired type") \ 1557 X(ENOPROTOOPT, "Protocol not available") \ 1558 X(ENOSPC, "No space left on device") \ 1559 X(ENOSR, "No STREAM resources") \ 1560 X(ENOSTR, "Not a STREAM") \ 1561 X(ENOSYS, "Function not supported") \ 1562 X(ENOTCONN, "The socket is not connected") \ 1563 X(ENOTDIR, "Not a directory") \ 1564 X(ENOTEMPTY, "Directory not empty") \ 1565 X(ENOTSOCK, "Not a socket") \ 1566 X(ENOTSUP, "Not supported") \ 1567 X(ENOTTY, "Inappropriate I/O control operation") \ 1568 X(ENXIO, "No such device or address") \ 1569 X(EOPNOTSUPP, "Operation not supported on socket") \ 1570 X(EOVERFLOW, "Value too large to be stored in data type") \ 1571 X(EPERM, "Operation not permitted") \ 1572 X(EPIPE, "Broken pipe") \ 1573 X(EPROTO, "Protocol error") \ 1574 X(EPROTONOSUPPORT, "Protocol not supported") \ 1575 X(EPROTOTYPE, "Protocol wrong type for socket") \ 1576 X(ERANGE, "Result too large") \ 1577 X(EROFS, "Read-only file system") \ 1578 X(ESPIPE, "Invalid seek") \ 1579 X(ESRCH, "No such process") \ 1580 X(ETIME, "Stream ioctl() timeout") \ 1581 X(ETIMEDOUT, "Connection timed out") \ 1582 X(ETXTBSY, "Text file busy") \ 1583 X(EWOULDBLOCK, "Operation would block") \ 1584 X(EXDEV, "Cross-device link") 1585 1586 #define DEFINE_ENTRY(e, text) { e, #e, text }, 1587 1588 static const struct { 1589 int v; 1590 const char* short_text; 1591 const char* long_text; 1592 } table [] = { 1593 1594 ALL_SHARED_ENUMS(DEFINE_ENTRY) 1595 1596 // The following enums are not defined on all platforms. 1597 #ifdef ESTALE 1598 DEFINE_ENTRY(ESTALE, "Reserved") 1599 #endif 1600 #ifdef EDQUOT 1601 DEFINE_ENTRY(EDQUOT, "Reserved") 1602 #endif 1603 #ifdef EMULTIHOP 1604 DEFINE_ENTRY(EMULTIHOP, "Reserved") 1605 #endif 1606 1607 // End marker. 1608 { -1, "Unknown errno", "Unknown error" } 1609 1610 }; 1611 1612 #undef DEFINE_ENTRY 1613 #undef ALL_FLAGS 1614 1615 int i = 0; 1616 while (table[i].v != -1 && table[i].v != e) { 1617 i ++; 1618 } 1619 1620 return short_text ? table[i].short_text : table[i].long_text; 1621 1622 } 1623 1624 const char* os::strerror(int e) { 1625 return errno_to_string(e, false); 1626 } 1627 1628 const char* os::errno_name(int e) { 1629 return errno_to_string(e, true); 1630 } 1631 1632 #define trace_page_size_params(size) byte_size_in_exact_unit(size), exact_unit_for_byte_size(size) 1633 1634 void os::trace_page_sizes(const char* str, 1635 const size_t region_min_size, 1636 const size_t region_max_size, 1637 const size_t page_size, 1638 const char* base, 1639 const size_t size) { 1640 1641 log_info(pagesize)("%s: " 1642 " min=" SIZE_FORMAT "%s" 1643 " max=" SIZE_FORMAT "%s" 1644 " base=" PTR_FORMAT 1645 " page_size=" SIZE_FORMAT "%s" 1646 " size=" SIZE_FORMAT "%s", 1647 str, 1648 trace_page_size_params(region_min_size), 1649 trace_page_size_params(region_max_size), 1650 p2i(base), 1651 trace_page_size_params(page_size), 1652 trace_page_size_params(size)); 1653 } 1654 1655 void os::trace_page_sizes_for_requested_size(const char* str, 1656 const size_t requested_size, 1657 const size_t page_size, 1658 const size_t alignment, 1659 const char* base, 1660 const size_t size) { 1661 1662 log_info(pagesize)("%s:" 1663 " req_size=" SIZE_FORMAT "%s" 1664 " base=" PTR_FORMAT 1665 " page_size=" SIZE_FORMAT "%s" 1666 " alignment=" SIZE_FORMAT "%s" 1667 " size=" SIZE_FORMAT "%s", 1668 str, 1669 trace_page_size_params(requested_size), 1670 p2i(base), 1671 trace_page_size_params(page_size), 1672 trace_page_size_params(alignment), 1673 trace_page_size_params(size)); 1674 } 1675 1676 1677 // This is the working definition of a server class machine: 1678 // >= 2 physical CPU's and >=2GB of memory, with some fuzz 1679 // because the graphics memory (?) sometimes masks physical memory. 1680 // If you want to change the definition of a server class machine 1681 // on some OS or platform, e.g., >=4GB on Windows platforms, 1682 // then you'll have to parameterize this method based on that state, 1683 // as was done for logical processors here, or replicate and 1684 // specialize this method for each platform. (Or fix os to have 1685 // some inheritance structure and use subclassing. Sigh.) 1686 // If you want some platform to always or never behave as a server 1687 // class machine, change the setting of AlwaysActAsServerClassMachine 1688 // and NeverActAsServerClassMachine in globals*.hpp. 1689 bool os::is_server_class_machine() { 1690 // First check for the early returns 1691 if (NeverActAsServerClassMachine) { 1692 return false; 1693 } 1694 if (AlwaysActAsServerClassMachine) { 1695 return true; 1696 } 1697 // Then actually look at the machine 1698 bool result = false; 1699 const unsigned int server_processors = 2; 1700 const julong server_memory = 2UL * G; 1701 // We seem not to get our full complement of memory. 1702 // We allow some part (1/8?) of the memory to be "missing", 1703 // based on the sizes of DIMMs, and maybe graphics cards. 1704 const julong missing_memory = 256UL * M; 1705 1706 /* Is this a server class machine? */ 1707 if ((os::active_processor_count() >= (int)server_processors) && 1708 (os::physical_memory() >= (server_memory - missing_memory))) { 1709 const unsigned int logical_processors = 1710 VM_Version::logical_processors_per_package(); 1711 if (logical_processors > 1) { 1712 const unsigned int physical_packages = 1713 os::active_processor_count() / logical_processors; 1714 if (physical_packages >= server_processors) { 1715 result = true; 1716 } 1717 } else { 1718 result = true; 1719 } 1720 } 1721 return result; 1722 } 1723 1724 void os::initialize_initial_active_processor_count() { 1725 assert(_initial_active_processor_count == 0, "Initial active processor count already set."); 1726 _initial_active_processor_count = active_processor_count(); 1727 log_debug(os)("Initial active processor count set to %d" , _initial_active_processor_count); 1728 } 1729 1730 bool os::create_stack_guard_pages(char* addr, size_t bytes) { 1731 return os::pd_create_stack_guard_pages(addr, bytes); 1732 } 1733 1734 char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) { 1735 char* result = pd_reserve_memory(bytes, executable); 1736 if (result != nullptr) { 1737 MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags); 1738 } 1739 return result; 1740 } 1741 1742 char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable) { 1743 char* result = pd_attempt_reserve_memory_at(addr, bytes, executable); 1744 if (result != nullptr) { 1745 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); 1746 } else { 1747 log_debug(os)("Attempt to reserve memory at " INTPTR_FORMAT " for " 1748 SIZE_FORMAT " bytes failed, errno %d", p2i(addr), bytes, get_last_error()); 1749 } 1750 return result; 1751 } 1752 1753 static void assert_nonempty_range(const char* addr, size_t bytes) { 1754 assert(addr != nullptr && bytes > 0, "invalid range [" PTR_FORMAT ", " PTR_FORMAT ")", 1755 p2i(addr), p2i(addr) + bytes); 1756 } 1757 1758 bool os::commit_memory(char* addr, size_t bytes, bool executable) { 1759 assert_nonempty_range(addr, bytes); 1760 bool res = pd_commit_memory(addr, bytes, executable); 1761 if (res) { 1762 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); 1763 } 1764 return res; 1765 } 1766 1767 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, 1768 bool executable) { 1769 assert_nonempty_range(addr, size); 1770 bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); 1771 if (res) { 1772 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); 1773 } 1774 return res; 1775 } 1776 1777 void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, 1778 const char* mesg) { 1779 assert_nonempty_range(addr, bytes); 1780 pd_commit_memory_or_exit(addr, bytes, executable, mesg); 1781 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); 1782 } 1783 1784 void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, 1785 bool executable, const char* mesg) { 1786 assert_nonempty_range(addr, size); 1787 os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg); 1788 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); 1789 } 1790 1791 bool os::uncommit_memory(char* addr, size_t bytes, bool executable) { 1792 assert_nonempty_range(addr, bytes); 1793 bool res; 1794 if (MemTracker::enabled()) { 1795 Tracker tkr(Tracker::uncommit); 1796 res = pd_uncommit_memory(addr, bytes, executable); 1797 if (res) { 1798 tkr.record((address)addr, bytes); 1799 } 1800 } else { 1801 res = pd_uncommit_memory(addr, bytes, executable); 1802 } 1803 return res; 1804 } 1805 1806 bool os::release_memory(char* addr, size_t bytes) { 1807 assert_nonempty_range(addr, bytes); 1808 bool res; 1809 if (MemTracker::enabled()) { 1810 // Note: Tracker contains a ThreadCritical. 1811 Tracker tkr(Tracker::release); 1812 res = pd_release_memory(addr, bytes); 1813 if (res) { 1814 tkr.record((address)addr, bytes); 1815 } 1816 } else { 1817 res = pd_release_memory(addr, bytes); 1818 } 1819 if (!res) { 1820 log_info(os)("os::release_memory failed (" PTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), bytes); 1821 } 1822 return res; 1823 } 1824 1825 // Prints all mappings 1826 void os::print_memory_mappings(outputStream* st) { 1827 os::print_memory_mappings(nullptr, SIZE_MAX, st); 1828 } 1829 1830 // Pretouching must use a store, not just a load. On many OSes loads from 1831 // fresh memory would be satisfied from a single mapped page containing all 1832 // zeros. We need to store something to each page to get them backed by 1833 // their own memory, which is the effect we want here. An atomic add of 1834 // zero is used instead of a simple store, allowing the memory to be used 1835 // while pretouch is in progress, rather than requiring users of the memory 1836 // to wait until the entire range has been touched. This is technically 1837 // a UB data race, but doesn't cause any problems for us. 1838 void os::pretouch_memory(void* start, void* end, size_t page_size) { 1839 assert(start <= end, "invalid range: " PTR_FORMAT " -> " PTR_FORMAT, p2i(start), p2i(end)); 1840 assert(is_power_of_2(page_size), "page size misaligned: %zu", page_size); 1841 assert(page_size >= sizeof(int), "page size too small: %zu", page_size); 1842 if (start < end) { 1843 // We're doing concurrent-safe touch and memory state has page 1844 // granularity, so we can touch anywhere in a page. Touch at the 1845 // beginning of each page to simplify iteration. 1846 char* cur = static_cast<char*>(align_down(start, page_size)); 1847 void* last = align_down(static_cast<char*>(end) - 1, page_size); 1848 assert(cur <= last, "invariant"); 1849 // Iterate from first page through last (inclusive), being careful to 1850 // avoid overflow if the last page abuts the end of the address range. 1851 for ( ; true; cur += page_size) { 1852 Atomic::add(reinterpret_cast<int*>(cur), 0, memory_order_relaxed); 1853 if (cur >= last) break; 1854 } 1855 } 1856 } 1857 1858 char* os::map_memory_to_file(size_t bytes, int file_desc) { 1859 // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(), 1860 // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file. 1861 // On all current implementations null is interpreted as any available address. 1862 char* result = os::map_memory_to_file(nullptr /* addr */, bytes, file_desc); 1863 if (result != nullptr) { 1864 MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC); 1865 } 1866 return result; 1867 } 1868 1869 char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc) { 1870 char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc); 1871 if (result != nullptr) { 1872 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); 1873 } 1874 return result; 1875 } 1876 1877 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 1878 char *addr, size_t bytes, bool read_only, 1879 bool allow_exec, MEMFLAGS flags) { 1880 char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); 1881 if (result != nullptr) { 1882 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags); 1883 } 1884 return result; 1885 } 1886 1887 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 1888 char *addr, size_t bytes, bool read_only, 1889 bool allow_exec) { 1890 return pd_remap_memory(fd, file_name, file_offset, addr, bytes, 1891 read_only, allow_exec); 1892 } 1893 1894 bool os::unmap_memory(char *addr, size_t bytes) { 1895 bool result; 1896 if (MemTracker::enabled()) { 1897 Tracker tkr(Tracker::release); 1898 result = pd_unmap_memory(addr, bytes); 1899 if (result) { 1900 tkr.record((address)addr, bytes); 1901 } 1902 } else { 1903 result = pd_unmap_memory(addr, bytes); 1904 } 1905 return result; 1906 } 1907 1908 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { 1909 pd_free_memory(addr, bytes, alignment_hint); 1910 } 1911 1912 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 1913 pd_realign_memory(addr, bytes, alignment_hint); 1914 } 1915 1916 char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size, 1917 char* addr, bool executable) { 1918 1919 assert(is_aligned(addr, alignment), "Unaligned request address"); 1920 1921 char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable); 1922 if (result != nullptr) { 1923 // The memory is committed 1924 MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC); 1925 } 1926 1927 return result; 1928 } 1929 1930 bool os::release_memory_special(char* addr, size_t bytes) { 1931 bool res; 1932 if (MemTracker::enabled()) { 1933 // Note: Tracker contains a ThreadCritical. 1934 Tracker tkr(Tracker::release); 1935 res = pd_release_memory_special(addr, bytes); 1936 if (res) { 1937 tkr.record((address)addr, bytes); 1938 } 1939 } else { 1940 res = pd_release_memory_special(addr, bytes); 1941 } 1942 return res; 1943 } 1944 1945 // Convenience wrapper around naked_short_sleep to allow for longer sleep 1946 // times. Only for use by non-JavaThreads. 1947 void os::naked_sleep(jlong millis) { 1948 assert(!Thread::current()->is_Java_thread(), "not for use by JavaThreads"); 1949 const jlong limit = 999; 1950 while (millis > limit) { 1951 naked_short_sleep(limit); 1952 millis -= limit; 1953 } 1954 naked_short_sleep(millis); 1955 } 1956 1957 1958 ////// Implementation of PageSizes 1959 1960 void os::PageSizes::add(size_t page_size) { 1961 assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); 1962 _v |= page_size; 1963 } 1964 1965 bool os::PageSizes::contains(size_t page_size) const { 1966 assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); 1967 return (_v & page_size) != 0; 1968 } 1969 1970 size_t os::PageSizes::next_smaller(size_t page_size) const { 1971 assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); 1972 size_t v2 = _v & (page_size - 1); 1973 if (v2 == 0) { 1974 return 0; 1975 } 1976 return round_down_power_of_2(v2); 1977 } 1978 1979 size_t os::PageSizes::next_larger(size_t page_size) const { 1980 assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); 1981 if (page_size == max_power_of_2<size_t>()) { // Shift by 32/64 would be UB 1982 return 0; 1983 } 1984 // Remove current and smaller page sizes 1985 size_t v2 = _v & ~(page_size + (page_size - 1)); 1986 if (v2 == 0) { 1987 return 0; 1988 } 1989 return (size_t)1 << count_trailing_zeros(v2); 1990 } 1991 1992 size_t os::PageSizes::largest() const { 1993 const size_t max = max_power_of_2<size_t>(); 1994 if (contains(max)) { 1995 return max; 1996 } 1997 return next_smaller(max); 1998 } 1999 2000 size_t os::PageSizes::smallest() const { 2001 // Strictly speaking the set should not contain sizes < os::vm_page_size(). 2002 // But this is not enforced. 2003 return next_larger(1); 2004 } 2005 2006 void os::PageSizes::print_on(outputStream* st) const { 2007 bool first = true; 2008 for (size_t sz = smallest(); sz != 0; sz = next_larger(sz)) { 2009 if (first) { 2010 first = false; 2011 } else { 2012 st->print_raw(", "); 2013 } 2014 if (sz < M) { 2015 st->print(SIZE_FORMAT "k", sz / K); 2016 } else if (sz < G) { 2017 st->print(SIZE_FORMAT "M", sz / M); 2018 } else { 2019 st->print(SIZE_FORMAT "G", sz / G); 2020 } 2021 } 2022 if (first) { 2023 st->print("empty"); 2024 } 2025 } 2026 2027 // Check minimum allowable stack sizes for thread creation and to initialize 2028 // the java system classes, including StackOverflowError - depends on page 2029 // size. 2030 // The space needed for frames during startup is platform dependent. It 2031 // depends on word size, platform calling conventions, C frame layout and 2032 // interpreter/C1/C2 design decisions. Therefore this is given in a 2033 // platform (os/cpu) dependent constant. 2034 // To this, space for guard mechanisms is added, which depends on the 2035 // page size which again depends on the concrete system the VM is running 2036 // on. Space for libc guard pages is not included in this size. 2037 jint os::set_minimum_stack_sizes() { 2038 2039 _java_thread_min_stack_allowed = _java_thread_min_stack_allowed + 2040 StackOverflow::stack_guard_zone_size() + 2041 StackOverflow::stack_shadow_zone_size(); 2042 2043 _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size()); 2044 _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, _os_min_stack_allowed); 2045 2046 size_t stack_size_in_bytes = ThreadStackSize * K; 2047 if (stack_size_in_bytes != 0 && 2048 stack_size_in_bytes < _java_thread_min_stack_allowed) { 2049 // The '-Xss' and '-XX:ThreadStackSize=N' options both set 2050 // ThreadStackSize so we go with "Java thread stack size" instead 2051 // of "ThreadStackSize" to be more friendly. 2052 tty->print_cr("\nThe Java thread stack size specified is too small. " 2053 "Specify at least " SIZE_FORMAT "k", 2054 _java_thread_min_stack_allowed / K); 2055 return JNI_ERR; 2056 } 2057 2058 // Make the stack size a multiple of the page size so that 2059 // the yellow/red zones can be guarded. 2060 JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size())); 2061 2062 // Reminder: a compiler thread is a Java thread. 2063 _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed + 2064 StackOverflow::stack_guard_zone_size() + 2065 StackOverflow::stack_shadow_zone_size(); 2066 2067 _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size()); 2068 _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, _os_min_stack_allowed); 2069 2070 stack_size_in_bytes = CompilerThreadStackSize * K; 2071 if (stack_size_in_bytes != 0 && 2072 stack_size_in_bytes < _compiler_thread_min_stack_allowed) { 2073 tty->print_cr("\nThe CompilerThreadStackSize specified is too small. " 2074 "Specify at least " SIZE_FORMAT "k", 2075 _compiler_thread_min_stack_allowed / K); 2076 return JNI_ERR; 2077 } 2078 2079 _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size()); 2080 _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, _os_min_stack_allowed); 2081 2082 stack_size_in_bytes = VMThreadStackSize * K; 2083 if (stack_size_in_bytes != 0 && 2084 stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) { 2085 tty->print_cr("\nThe VMThreadStackSize specified is too small. " 2086 "Specify at least " SIZE_FORMAT "k", 2087 _vm_internal_thread_min_stack_allowed / K); 2088 return JNI_ERR; 2089 } 2090 return JNI_OK; 2091 }