1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_OS_HPP 26 #define SHARE_VM_RUNTIME_OS_HPP 27 28 #include "jvmtifiles/jvmti.h" 29 #include "runtime/atomic.hpp" 30 #include "runtime/extendedPC.hpp" 31 #include "utilities/top.hpp" 32 #ifdef TARGET_OS_FAMILY_linux 33 # include "jvm_linux.h" 34 # include <setjmp.h> 35 #endif 36 #ifdef TARGET_OS_FAMILY_solaris 37 # include "jvm_solaris.h" 38 # include <setjmp.h> 39 #endif 40 #ifdef TARGET_OS_FAMILY_windows 41 # include "jvm_windows.h" 42 #endif 43 #ifdef TARGET_OS_FAMILY_aix 44 # include "jvm_aix.h" 45 # include <setjmp.h> 46 #endif 47 #ifdef TARGET_OS_FAMILY_bsd 48 # include "jvm_bsd.h" 49 # include <setjmp.h> 50 # ifdef __APPLE__ 51 # include <mach/mach_time.h> 52 # endif 53 #endif 54 55 class AgentLibrary; 56 class methodHandle; 57 class instanceKlassHandle; 58 59 // os defines the interface to operating system; this includes traditional 60 // OS services (time, I/O) as well as other functionality with system- 61 // dependent code. 62 63 typedef void (*dll_func)(...); 64 65 class Thread; 66 class JavaThread; 67 class Event; 68 class DLL; 69 class FileHandle; 70 class NativeCallStack; 71 72 template<class E> class GrowableArray; 73 74 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose 75 76 // Platform-independent error return values from OS functions 77 enum OSReturn { 78 OS_OK = 0, // Operation was successful 79 OS_ERR = -1, // Operation failed 80 OS_INTRPT = -2, // Operation was interrupted 81 OS_TIMEOUT = -3, // Operation timed out 82 OS_NOMEM = -5, // Operation failed for lack of memory 83 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource 84 }; 85 86 enum ThreadPriority { // JLS 20.20.1-3 87 NoPriority = -1, // Initial non-priority value 88 MinPriority = 1, // Minimum priority 89 NormPriority = 5, // Normal (non-daemon) priority 90 NearMaxPriority = 9, // High priority, used for VMThread 91 MaxPriority = 10, // Highest priority, used for WatcherThread 92 // ensures that VMThread doesn't starve profiler 93 CriticalPriority = 11 // Critical thread priority 94 }; 95 96 // Executable parameter flag for os::commit_memory() and 97 // os::commit_memory_or_exit(). 98 const bool ExecMem = true; 99 100 // Typedef for structured exception handling support 101 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 102 103 class MallocTracker; 104 105 class os: AllStatic { 106 friend class VMStructs; 107 friend class MallocTracker; 108 public: 109 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) 110 111 private: 112 static OSThread* _starting_thread; 113 static address _polling_page; 114 static volatile int32_t * _mem_serialize_page; 115 static uintptr_t _serialize_page_mask; 116 public: 117 static size_t _page_sizes[page_sizes_max]; 118 119 private: 120 static void init_page_sizes(size_t default_page_size) { 121 _page_sizes[0] = default_page_size; 122 _page_sizes[1] = 0; // sentinel 123 } 124 125 static char* pd_reserve_memory(size_t bytes, char* addr = 0, 126 size_t alignment_hint = 0); 127 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); 128 static void pd_split_reserved_memory(char *base, size_t size, 129 size_t split, bool realloc); 130 static bool pd_commit_memory(char* addr, size_t bytes, bool executable); 131 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 132 bool executable); 133 // Same as pd_commit_memory() that either succeeds or calls 134 // vm_exit_out_of_memory() with the specified mesg. 135 static void pd_commit_memory_or_exit(char* addr, size_t bytes, 136 bool executable, const char* mesg); 137 static void pd_commit_memory_or_exit(char* addr, size_t size, 138 size_t alignment_hint, 139 bool executable, const char* mesg); 140 static bool pd_uncommit_memory(char* addr, size_t bytes); 141 static bool pd_release_memory(char* addr, size_t bytes); 142 143 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, 144 char *addr, size_t bytes, bool read_only = false, 145 bool allow_exec = false); 146 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, 147 char *addr, size_t bytes, bool read_only, 148 bool allow_exec); 149 static bool pd_unmap_memory(char *addr, size_t bytes); 150 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); 151 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); 152 153 static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned); 154 155 static void initialize_initial_active_processor_count(); 156 157 LINUX_ONLY(static void pd_init_container_support();) 158 159 public: 160 static void init(void); // Called before command line parsing 161 162 static void init_container_support() { // Called during command line parsing. 163 LINUX_ONLY(pd_init_container_support();) 164 } 165 166 static void init_before_ergo(void); // Called after command line parsing 167 // before VM ergonomics processing. 168 static jint init_2(void); // Called after command line parsing 169 // and VM ergonomics processing 170 static void init_globals(void) { // Called from init_globals() in init.cpp 171 init_globals_ext(); 172 } 173 174 // File names are case-insensitive on windows only 175 // Override me as needed 176 static int file_name_strcmp(const char* s1, const char* s2); 177 178 // get/unset environment variable 179 static bool getenv(const char* name, char* buffer, int len); 180 static bool unsetenv(const char* name); 181 182 static bool have_special_privileges(); 183 184 static jlong javaTimeMillis(); 185 static jlong javaTimeNanos(); 186 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); 187 static void run_periodic_checks(); 188 189 190 // Returns the elapsed time in seconds since the vm started. 191 static double elapsedTime(); 192 193 // Returns real time in seconds since an arbitrary point 194 // in the past. 195 static bool getTimesSecs(double* process_real_time, 196 double* process_user_time, 197 double* process_system_time); 198 199 // Interface to the performance counter 200 static jlong elapsed_counter(); 201 static jlong elapsed_frequency(); 202 203 // The "virtual time" of a thread is the amount of time a thread has 204 // actually run. The first function indicates whether the OS supports 205 // this functionality for the current thread, and if so: 206 // * the second enables vtime tracking (if that is required). 207 // * the third tells whether vtime is enabled. 208 // * the fourth returns the elapsed virtual time for the current 209 // thread. 210 static bool supports_vtime(); 211 static bool enable_vtime(); 212 static bool vtime_enabled(); 213 static double elapsedVTime(); 214 215 // Return current local time in a string (YYYY-MM-DD HH:MM:SS). 216 // It is MT safe, but not async-safe, as reading time zone 217 // information may require a lock on some platforms. 218 static char* local_time_string(char *buf, size_t buflen); 219 static struct tm* localtime_pd (const time_t* clock, struct tm* res); 220 // Fill in buffer with current local time as an ISO-8601 string. 221 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. 222 // Returns buffer, or NULL if it failed. 223 static char* iso8601_time(char* buffer, size_t buffer_length); 224 225 // Interface for detecting multiprocessor system 226 static inline bool is_MP() { 227 // During bootstrap if _processor_count is not yet initialized 228 // we claim to be MP as that is safest. If any platform has a 229 // stub generator that might be triggered in this phase and for 230 // which being declared MP when in fact not, is a problem - then 231 // the bootstrap routine for the stub generator needs to check 232 // the processor count directly and leave the bootstrap routine 233 // in place until called after initialization has ocurred. 234 return (_processor_count != 1) || AssumeMP; 235 } 236 static julong available_memory(); 237 static julong physical_memory(); 238 static bool has_allocatable_memory_limit(julong* limit); 239 static bool is_server_class_machine(); 240 241 // number of CPUs 242 static int processor_count() { 243 return _processor_count; 244 } 245 static void set_processor_count(int count) { _processor_count = count; } 246 247 // Returns the number of CPUs this process is currently allowed to run on. 248 // Note that on some OSes this can change dynamically. 249 static int active_processor_count(); 250 251 // At startup the number of active CPUs this process is allowed to run on. 252 // This value does not change dynamically. May be different from active_processor_count(). 253 static int initial_active_processor_count() { 254 assert(_initial_active_processor_count > 0, "Initial active processor count not set yet."); 255 return _initial_active_processor_count; 256 } 257 258 // Bind processes to processors. 259 // This is a two step procedure: 260 // first you generate a distribution of processes to processors, 261 // then you bind processes according to that distribution. 262 // Compute a distribution for number of processes to processors. 263 // Stores the processor id's into the distribution array argument. 264 // Returns true if it worked, false if it didn't. 265 static bool distribute_processes(uint length, uint* distribution); 266 // Binds the current process to a processor. 267 // Returns true if it worked, false if it didn't. 268 static bool bind_to_processor(uint processor_id); 269 270 // Give a name to the current thread. 271 static void set_native_thread_name(const char *name); 272 273 // Interface for stack banging (predetect possible stack overflow for 274 // exception processing) There are guard pages, and above that shadow 275 // pages for stack overflow checking. 276 static bool uses_stack_guard_pages(); 277 static bool allocate_stack_guard_pages(); 278 static void bang_stack_shadow_pages(); 279 static bool stack_shadow_pages_available(Thread *thread, methodHandle method); 280 281 // OS interface to Virtual Memory 282 283 // Return the default page size. 284 static int vm_page_size(); 285 286 // Returns the page size to use for a region of memory. 287 // region_size / min_pages will always be greater than or equal to the 288 // returned value. The returned value will divide region_size. 289 static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages); 290 291 // Returns the page size to use for a region of memory. 292 // region_size / min_pages will always be greater than or equal to the 293 // returned value. The returned value might not divide region_size. 294 static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages); 295 296 // Return the largest page size that can be used 297 static size_t max_page_size() { 298 // The _page_sizes array is sorted in descending order. 299 return _page_sizes[0]; 300 } 301 302 // Methods for tracing page sizes returned by the above method; enabled by 303 // TracePageSizes. The region_{min,max}_size parameters should be the values 304 // passed to page_size_for_region() and page_size should be the result of that 305 // call. The (optional) base and size parameters should come from the 306 // ReservedSpace base() and size() methods. 307 static void trace_page_sizes(const char* str, const size_t* page_sizes, 308 int count) PRODUCT_RETURN; 309 static void trace_page_sizes(const char* str, const size_t region_min_size, 310 const size_t region_max_size, 311 const size_t page_size, 312 const char* base = NULL, 313 const size_t size = 0) PRODUCT_RETURN; 314 315 static int vm_allocation_granularity(); 316 static char* reserve_memory(size_t bytes, char* addr = 0, 317 size_t alignment_hint = 0); 318 static char* reserve_memory(size_t bytes, char* addr, 319 size_t alignment_hint, MEMFLAGS flags); 320 static char* reserve_memory_aligned(size_t size, size_t alignment); 321 static char* attempt_reserve_memory_at(size_t bytes, char* addr); 322 static void split_reserved_memory(char *base, size_t size, 323 size_t split, bool realloc); 324 static bool commit_memory(char* addr, size_t bytes, bool executable); 325 static bool commit_memory(char* addr, size_t size, size_t alignment_hint, 326 bool executable); 327 // Same as commit_memory() that either succeeds or calls 328 // vm_exit_out_of_memory() with the specified mesg. 329 static void commit_memory_or_exit(char* addr, size_t bytes, 330 bool executable, const char* mesg); 331 static void commit_memory_or_exit(char* addr, size_t size, 332 size_t alignment_hint, 333 bool executable, const char* mesg); 334 static bool uncommit_memory(char* addr, size_t bytes); 335 static bool release_memory(char* addr, size_t bytes); 336 337 // Touch memory pages that cover the memory range from start to end (exclusive) 338 // to make the OS back the memory range with actual memory. 339 // Current implementation may not touch the last page if unaligned addresses 340 // are passed. 341 static void pretouch_memory(char* start, char* end); 342 343 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; 344 static bool protect_memory(char* addr, size_t bytes, ProtType prot, 345 bool is_committed = true); 346 347 static bool guard_memory(char* addr, size_t bytes); 348 static bool unguard_memory(char* addr, size_t bytes); 349 static bool create_stack_guard_pages(char* addr, size_t bytes); 350 static bool pd_create_stack_guard_pages(char* addr, size_t bytes); 351 static bool remove_stack_guard_pages(char* addr, size_t bytes); 352 353 static char* map_memory(int fd, const char* file_name, size_t file_offset, 354 char *addr, size_t bytes, bool read_only = false, 355 bool allow_exec = false); 356 static char* remap_memory(int fd, const char* file_name, size_t file_offset, 357 char *addr, size_t bytes, bool read_only, 358 bool allow_exec); 359 static bool unmap_memory(char *addr, size_t bytes); 360 static void free_memory(char *addr, size_t bytes, size_t alignment_hint); 361 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); 362 363 // NUMA-specific interface 364 static bool numa_has_static_binding(); 365 static bool numa_has_group_homing(); 366 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); 367 static void numa_make_global(char *addr, size_t bytes); 368 static size_t numa_get_groups_num(); 369 static size_t numa_get_leaf_groups(int *ids, size_t size); 370 static bool numa_topology_changed(); 371 static int numa_get_group_id(); 372 373 // Page manipulation 374 struct page_info { 375 size_t size; 376 int lgrp_id; 377 }; 378 static bool get_page_info(char *start, page_info* info); 379 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); 380 381 static char* non_memory_address_word(); 382 // reserve, commit and pin the entire memory region 383 static char* reserve_memory_special(size_t size, size_t alignment, 384 char* addr, bool executable); 385 static bool release_memory_special(char* addr, size_t bytes); 386 static void large_page_init(); 387 static size_t large_page_size(); 388 static bool can_commit_large_page_memory(); 389 static bool can_execute_large_page_memory(); 390 391 // OS interface to polling page 392 static address get_polling_page() { return _polling_page; } 393 static void set_polling_page(address page) { _polling_page = page; } 394 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } 395 static void make_polling_page_unreadable(); 396 static void make_polling_page_readable(); 397 398 // Routines used to serialize the thread state without using membars 399 static void serialize_thread_states(); 400 401 // Since we write to the serialize page from every thread, we 402 // want stores to be on unique cache lines whenever possible 403 // in order to minimize CPU cross talk. We pre-compute the 404 // amount to shift the thread* to make this offset unique to 405 // each thread. 406 static int get_serialize_page_shift_count() { 407 return SerializePageShiftCount; 408 } 409 410 static void set_serialize_page_mask(uintptr_t mask) { 411 _serialize_page_mask = mask; 412 } 413 414 static unsigned int get_serialize_page_mask() { 415 return _serialize_page_mask; 416 } 417 418 static void set_memory_serialize_page(address page); 419 420 static address get_memory_serialize_page() { 421 return (address)_mem_serialize_page; 422 } 423 424 static inline void write_memory_serialize_page(JavaThread *thread) { 425 uintptr_t page_offset = ((uintptr_t)thread >> 426 get_serialize_page_shift_count()) & 427 get_serialize_page_mask(); 428 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; 429 } 430 431 static bool is_memory_serialize_page(JavaThread *thread, address addr) { 432 if (UseMembar) return false; 433 // Previously this function calculated the exact address of this 434 // thread's serialize page, and checked if the faulting address 435 // was equal. However, some platforms mask off faulting addresses 436 // to the page size, so now we just check that the address is 437 // within the page. This makes the thread argument unnecessary, 438 // but we retain the NULL check to preserve existing behaviour. 439 if (thread == NULL) return false; 440 address page = (address) _mem_serialize_page; 441 return addr >= page && addr < (page + os::vm_page_size()); 442 } 443 444 static void block_on_serialize_page_trap(); 445 446 // threads 447 448 enum ThreadType { 449 vm_thread, 450 cgc_thread, // Concurrent GC thread 451 pgc_thread, // Parallel GC thread 452 java_thread, 453 compiler_thread, 454 watcher_thread, 455 os_thread 456 }; 457 458 static bool create_thread(Thread* thread, 459 ThreadType thr_type, 460 size_t stack_size = 0); 461 462 // The "main thread", also known as "starting thread", is the thread 463 // that loads/creates the JVM via JNI_CreateJavaVM. 464 static bool create_main_thread(JavaThread* thread); 465 466 // The primordial thread is the initial process thread. The java 467 // launcher never uses the primordial thread as the main thread, but 468 // applications that host the JVM directly may do so. Some platforms 469 // need special-case handling of the primordial thread if it attaches 470 // to the VM. 471 static bool is_primordial_thread(void) 472 #if defined(_WINDOWS) || defined(BSD) 473 // No way to identify the primordial thread. 474 { return false; } 475 #else 476 ; 477 #endif 478 479 static bool create_attached_thread(JavaThread* thread); 480 static void pd_start_thread(Thread* thread); 481 static void start_thread(Thread* thread); 482 483 static void initialize_thread(Thread* thr); 484 static void free_thread(OSThread* osthread); 485 486 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit 487 static intx current_thread_id(); 488 static int current_process_id(); 489 static int sleep(Thread* thread, jlong ms, bool interruptable); 490 // Short standalone OS sleep suitable for slow path spin loop. 491 // Ignores Thread.interrupt() (so keep it short). 492 // ms = 0, will sleep for the least amount of time allowed by the OS. 493 static void naked_short_sleep(jlong ms); 494 static void infinite_sleep(); // never returns, use with CAUTION 495 static void yield(); // Yields to all threads with same priority 496 enum YieldResult { 497 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran 498 YIELD_NONEREADY = 0, // No other runnable/ready threads. 499 // platform-specific yield return immediately 500 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY 501 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong" 502 // yield that can be used in lieu of blocking. 503 } ; 504 static YieldResult NakedYield () ; 505 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority 506 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing 507 static OSReturn set_priority(Thread* thread, ThreadPriority priority); 508 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); 509 510 static void interrupt(Thread* thread); 511 static bool is_interrupted(Thread* thread, bool clear_interrupted); 512 513 static int pd_self_suspend_thread(Thread* thread); 514 515 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); 516 static frame fetch_frame_from_context(void* ucVoid); 517 518 static ExtendedPC get_thread_pc(Thread *thread); 519 static void breakpoint(); 520 521 static address current_stack_pointer(); 522 static address current_stack_base(); 523 static size_t current_stack_size(); 524 525 static void verify_stack_alignment() PRODUCT_RETURN; 526 527 static int message_box(const char* title, const char* message); 528 static char* do_you_want_to_debug(const char* message); 529 530 // run cmd in a separate process and return its exit code; or -1 on failures 531 static int fork_and_exec(char *cmd, bool use_vfork_if_available = false); 532 533 // os::exit() is merged with vm_exit() 534 // static void exit(int num); 535 536 // Terminate the VM, but don't exit the process 537 static void shutdown(); 538 539 // Terminate with an error. Default is to generate a core file on platforms 540 // that support such things. This calls shutdown() and then aborts. 541 static void abort(bool dump_core = true); 542 543 // Die immediately, no exit hook, no abort hook, no cleanup. 544 static void die(); 545 546 // File i/o operations 547 static const int default_file_open_flags(); 548 static int open(const char *path, int oflag, int mode); 549 static FILE* open(int fd, const char* mode); 550 static int close(int fd); 551 static jlong lseek(int fd, jlong offset, int whence); 552 static char* native_path(char *path); 553 static int ftruncate(int fd, jlong length); 554 static int fsync(int fd); 555 static int available(int fd, jlong *bytes); 556 557 //File i/o operations 558 559 static size_t read(int fd, void *buf, unsigned int nBytes); 560 static size_t read_at(int fd, void *buf, unsigned int nBytes, jlong offset); 561 static size_t restartable_read(int fd, void *buf, unsigned int nBytes); 562 static size_t write(int fd, const void *buf, unsigned int nBytes); 563 564 // Reading directories. 565 static DIR* opendir(const char* dirname); 566 static struct dirent* readdir(DIR* dirp); 567 static int closedir(DIR* dirp); 568 569 // Dynamic library extension 570 static const char* dll_file_extension(); 571 572 static const char* get_temp_directory(); 573 static const char* get_current_directory(char *buf, size_t buflen); 574 575 // Builds a platform-specific full library path given a ld path and lib name 576 // Returns true if buffer contains full path to existing file, false otherwise 577 static bool dll_build_name(char* buffer, size_t size, 578 const char* pathname, const char* fname); 579 580 // Symbol lookup, find nearest function name; basically it implements 581 // dladdr() for all platforms. Name of the nearest function is copied 582 // to buf. Distance from its base address is optionally returned as offset. 583 // If function name is not found, buf[0] is set to '\0' and offset is 584 // set to -1 (if offset is non-NULL). 585 static bool dll_address_to_function_name(address addr, char* buf, 586 int buflen, int* offset); 587 588 // Locate DLL/DSO. On success, full path of the library is copied to 589 // buf, and offset is optionally set to be the distance between addr 590 // and the library's base address. On failure, buf[0] is set to '\0' 591 // and offset is set to -1 (if offset is non-NULL). 592 static bool dll_address_to_library_name(address addr, char* buf, 593 int buflen, int* offset); 594 595 // Find out whether the pc is in the static code for jvm.dll/libjvm.so. 596 static bool address_is_in_vm(address addr); 597 598 // Loads .dll/.so and 599 // in case of error it checks if .dll/.so was built for the 600 // same architecture as Hotspot is running on 601 static void* dll_load(const char *name, char *ebuf, int ebuflen); 602 603 // lookup symbol in a shared library 604 static void* dll_lookup(void* handle, const char* name); 605 606 // Unload library 607 static void dll_unload(void *lib); 608 609 // Callback for loaded module information 610 // Input parameters: 611 // char* module_file_name, 612 // address module_base_addr, 613 // address module_top_addr, 614 // void* param 615 typedef int (*LoadedModulesCallbackFunc)(const char *, address, address, void *); 616 617 static int get_loaded_modules_info(LoadedModulesCallbackFunc callback, void *param); 618 619 // Return the handle of this process 620 static void* get_default_process_handle(); 621 622 // Check for static linked agent library 623 static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], 624 size_t syms_len); 625 626 // Find agent entry point 627 static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib, 628 const char *syms[], size_t syms_len); 629 630 static int vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0); 631 static int snprintf(char* buf, size_t len, const char* fmt, ...) ATTRIBUTE_PRINTF(3, 4); 632 633 // Print out system information; they are called by fatal error handler. 634 // Output format may be different on different platforms. 635 static void print_os_info(outputStream* st); 636 static void print_os_info_brief(outputStream* st); 637 static void print_cpu_info(outputStream* st); 638 static void pd_print_cpu_info(outputStream* st); 639 static void print_memory_info(outputStream* st); 640 static void print_dll_info(outputStream* st); 641 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len); 642 static void print_context(outputStream* st, void* context); 643 static void print_register_info(outputStream* st, void* context); 644 static void print_siginfo(outputStream* st, void* siginfo); 645 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); 646 static void print_date_and_time(outputStream* st, char* buf, size_t buflen); 647 648 static void print_location(outputStream* st, intptr_t x, bool verbose = false); 649 static size_t lasterror(char *buf, size_t len); 650 static int get_last_error(); 651 652 // Determines whether the calling process is being debugged by a user-mode debugger. 653 static bool is_debugger_attached(); 654 655 // wait for a key press if PauseAtExit is set 656 static void wait_for_keypress_at_exit(void); 657 658 // The following two functions are used by fatal error handler to trace 659 // native (C) frames. They are not part of frame.hpp/frame.cpp because 660 // frame.hpp/cpp assume thread is JavaThread, and also because different 661 // OS/compiler may have different convention or provide different API to 662 // walk C frames. 663 // 664 // We don't attempt to become a debugger, so we only follow frames if that 665 // does not require a lookup in the unwind table, which is part of the binary 666 // file but may be unsafe to read after a fatal error. So on x86, we can 667 // only walk stack if %ebp is used as frame pointer; on ia64, it's not 668 // possible to walk C stack without having the unwind table. 669 static bool is_first_C_frame(frame *fr); 670 static frame get_sender_for_C_frame(frame *fr); 671 672 // return current frame. pc() and sp() are set to NULL on failure. 673 static frame current_frame(); 674 675 static void print_hex_dump(outputStream* st, address start, address end, int unitsize); 676 677 // returns a string to describe the exception/signal; 678 // returns NULL if exception_code is not an OS exception/signal. 679 static const char* exception_name(int exception_code, char* buf, size_t buflen); 680 681 // Returns native Java library, loads if necessary 682 static void* native_java_library(); 683 684 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) 685 static void jvm_path(char *buf, jint buflen); 686 687 // Returns true if we are running in a headless jre. 688 static bool is_headless_jre(); 689 690 // JNI names 691 static void print_jni_name_prefix_on(outputStream* st, int args_size); 692 static void print_jni_name_suffix_on(outputStream* st, int args_size); 693 694 // File conventions 695 static const char* file_separator(); 696 static const char* line_separator(); 697 static const char* path_separator(); 698 699 // Init os specific system properties values 700 static void init_system_properties_values(); 701 702 // IO operations, non-JVM_ version. 703 static int stat(const char* path, struct stat* sbuf); 704 static bool dir_is_empty(const char* path); 705 706 // IO operations on binary files 707 static int create_binary_file(const char* path, bool rewrite_existing); 708 static jlong current_file_offset(int fd); 709 static jlong seek_to_file_offset(int fd, jlong offset); 710 711 // Thread Local Storage 712 static int allocate_thread_local_storage(); 713 static void thread_local_storage_at_put(int index, void* value); 714 static void* thread_local_storage_at(int index); 715 static void free_thread_local_storage(int index); 716 717 // Retrieve native stack frames. 718 // Parameter: 719 // stack: an array to storage stack pointers. 720 // frames: size of above array. 721 // toSkip: number of stack frames to skip at the beginning. 722 // Return: number of stack frames captured. 723 static int get_native_stack(address* stack, int size, int toSkip = 0); 724 725 // General allocation (must be MT-safe) 726 static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack); 727 static void* malloc (size_t size, MEMFLAGS flags); 728 static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack); 729 static void* realloc (void *memblock, size_t size, MEMFLAGS flag); 730 731 static void free (void *memblock, MEMFLAGS flags = mtNone); 732 static bool check_heap(bool force = false); // verify C heap integrity 733 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup 734 735 #ifndef PRODUCT 736 static julong num_mallocs; // # of calls to malloc/realloc 737 static julong alloc_bytes; // # of bytes allocated 738 static julong num_frees; // # of calls to free 739 static julong free_bytes; // # of bytes freed 740 #endif 741 742 // SocketInterface (ex HPI SocketInterface ) 743 static int socket(int domain, int type, int protocol); 744 static int socket_close(int fd); 745 static int socket_shutdown(int fd, int howto); 746 static int recv(int fd, char* buf, size_t nBytes, uint flags); 747 static int send(int fd, char* buf, size_t nBytes, uint flags); 748 static int raw_send(int fd, char* buf, size_t nBytes, uint flags); 749 static int timeout(int fd, long timeout); 750 static int listen(int fd, int count); 751 static int connect(int fd, struct sockaddr* him, socklen_t len); 752 static int bind(int fd, struct sockaddr* him, socklen_t len); 753 static int accept(int fd, struct sockaddr* him, socklen_t* len); 754 static int recvfrom(int fd, char* buf, size_t nbytes, uint flags, 755 struct sockaddr* from, socklen_t* fromlen); 756 static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len); 757 static int sendto(int fd, char* buf, size_t len, uint flags, 758 struct sockaddr* to, socklen_t tolen); 759 static int socket_available(int fd, jint* pbytes); 760 761 static int get_sock_opt(int fd, int level, int optname, 762 char* optval, socklen_t* optlen); 763 static int set_sock_opt(int fd, int level, int optname, 764 const char* optval, socklen_t optlen); 765 static int get_host_name(char* name, int namelen); 766 767 static struct hostent* get_host_by_name(char* name); 768 769 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) 770 static void signal_init(); 771 static void signal_init_pd(); 772 static void signal_notify(int signal_number); 773 static void* signal(int signal_number, void* handler); 774 static void signal_raise(int signal_number); 775 static int signal_wait(); 776 static int signal_lookup(); 777 static void* user_handler(); 778 static void terminate_signal_thread(); 779 static int sigexitnum_pd(); 780 781 // random number generation 782 static long random(); // return 32bit pseudorandom number 783 static void init_random(long initval); // initialize random sequence 784 785 // Structured OS Exception support 786 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 787 788 // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits 789 static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize); 790 791 // Get the default path to the core file 792 // Returns the length of the string 793 static int get_core_path(char* buffer, size_t bufferSize); 794 795 // JVMTI & JVM monitoring and management support 796 // The thread_cpu_time() and current_thread_cpu_time() are only 797 // supported if is_thread_cpu_time_supported() returns true. 798 // They are not supported on Solaris T1. 799 800 // Thread CPU Time - return the fast estimate on a platform 801 // On Solaris - call gethrvtime (fast) - user time only 802 // On Linux - fast clock_gettime where available - user+sys 803 // - otherwise: very slow /proc fs - user+sys 804 // On Windows - GetThreadTimes - user+sys 805 static jlong current_thread_cpu_time(); 806 static jlong thread_cpu_time(Thread* t); 807 808 // Thread CPU Time with user_sys_cpu_time parameter. 809 // 810 // If user_sys_cpu_time is true, user+sys time is returned. 811 // Otherwise, only user time is returned 812 static jlong current_thread_cpu_time(bool user_sys_cpu_time); 813 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); 814 815 // Return a bunch of info about the timers. 816 // Note that the returned info for these two functions may be different 817 // on some platforms 818 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 819 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 820 821 static bool is_thread_cpu_time_supported(); 822 823 // System loadavg support. Returns -1 if load average cannot be obtained. 824 static int loadavg(double loadavg[], int nelem); 825 826 // Hook for os specific jvm options that we don't want to abort on seeing 827 static bool obsolete_option(const JavaVMOption *option); 828 829 // Extensions 830 #include "runtime/os_ext.hpp" 831 832 public: 833 class CrashProtectionCallback : public StackObj { 834 public: 835 virtual void call() = 0; 836 }; 837 838 // Platform dependent stuff 839 #ifdef TARGET_OS_FAMILY_linux 840 # include "os_linux.hpp" 841 # include "os_posix.hpp" 842 #endif 843 #ifdef TARGET_OS_FAMILY_solaris 844 # include "os_solaris.hpp" 845 # include "os_posix.hpp" 846 #endif 847 #ifdef TARGET_OS_FAMILY_windows 848 # include "os_windows.hpp" 849 #endif 850 #ifdef TARGET_OS_FAMILY_aix 851 # include "os_aix.hpp" 852 # include "os_posix.hpp" 853 #endif 854 #ifdef TARGET_OS_FAMILY_bsd 855 # include "os_posix.hpp" 856 # include "os_bsd.hpp" 857 #endif 858 #ifdef TARGET_OS_ARCH_linux_x86 859 # include "os_linux_x86.hpp" 860 #endif 861 #ifdef TARGET_OS_ARCH_linux_aarch64 862 # include "os_linux_aarch64.hpp" 863 #endif 864 #ifdef TARGET_OS_ARCH_linux_sparc 865 # include "os_linux_sparc.hpp" 866 #endif 867 #ifdef TARGET_OS_ARCH_linux_zero 868 # include "os_linux_zero.hpp" 869 #endif 870 #ifdef TARGET_OS_ARCH_solaris_x86 871 # include "os_solaris_x86.hpp" 872 #endif 873 #ifdef TARGET_OS_ARCH_solaris_sparc 874 # include "os_solaris_sparc.hpp" 875 #endif 876 #ifdef TARGET_OS_ARCH_windows_x86 877 # include "os_windows_x86.hpp" 878 #endif 879 #ifdef TARGET_OS_ARCH_linux_arm 880 # include "os_linux_arm.hpp" 881 #endif 882 #ifdef TARGET_OS_ARCH_linux_ppc 883 # include "os_linux_ppc.hpp" 884 #endif 885 #ifdef TARGET_OS_ARCH_aix_ppc 886 # include "os_aix_ppc.hpp" 887 #endif 888 #ifdef TARGET_OS_ARCH_bsd_x86 889 # include "os_bsd_x86.hpp" 890 #endif 891 #ifdef TARGET_OS_ARCH_bsd_zero 892 # include "os_bsd_zero.hpp" 893 #endif 894 895 public: 896 #ifndef PLATFORM_PRINT_NATIVE_STACK 897 // No platform-specific code for printing the native stack. 898 static bool platform_print_native_stack(outputStream* st, void* context, 899 char *buf, int buf_size) { 900 return false; 901 } 902 #endif 903 904 // debugging support (mostly used by debug.cpp but also fatal error handler) 905 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address 906 907 static bool dont_yield(); // when true, JVM_Yield() is nop 908 static void print_statistics(); 909 910 // Thread priority helpers (implemented in OS-specific part) 911 static OSReturn set_native_priority(Thread* thread, int native_prio); 912 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); 913 static int java_to_os_priority[CriticalPriority + 1]; 914 // Hint to the underlying OS that a task switch would not be good. 915 // Void return because it's a hint and can fail. 916 static void hint_no_preempt(); 917 918 // Used at creation if requested by the diagnostic flag PauseAtStartup. 919 // Causes the VM to wait until an external stimulus has been applied 920 // (for Unix, that stimulus is a signal, for Windows, an external 921 // ResumeThread call) 922 static void pause(); 923 924 // Builds a platform dependent Agent_OnLoad_<libname> function name 925 // which is used to find statically linked in agents. 926 static char* build_agent_function_name(const char *sym, const char *cname, 927 bool is_absolute_path); 928 929 class SuspendedThreadTaskContext { 930 public: 931 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} 932 Thread* thread() const { return _thread; } 933 void* ucontext() const { return _ucontext; } 934 private: 935 Thread* _thread; 936 void* _ucontext; 937 }; 938 939 class SuspendedThreadTask { 940 public: 941 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} 942 virtual ~SuspendedThreadTask() {} 943 void run(); 944 bool is_done() { return _done; } 945 virtual void do_task(const SuspendedThreadTaskContext& context) = 0; 946 protected: 947 private: 948 void internal_do_task(); 949 Thread* _thread; 950 bool _done; 951 }; 952 953 #ifndef TARGET_OS_FAMILY_windows 954 // Suspend/resume support 955 // Protocol: 956 // 957 // a thread starts in SR_RUNNING 958 // 959 // SR_RUNNING can go to 960 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it 961 // SR_SUSPEND_REQUEST can go to 962 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) 963 // * SR_SUSPENDED if the stopped thread receives the signal and switches state 964 // SR_SUSPENDED can go to 965 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume 966 // SR_WAKEUP_REQUEST can go to 967 // * SR_RUNNING when the stopped thread receives the signal 968 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) 969 class SuspendResume { 970 public: 971 enum State { 972 SR_RUNNING, 973 SR_SUSPEND_REQUEST, 974 SR_SUSPENDED, 975 SR_WAKEUP_REQUEST 976 }; 977 978 private: 979 volatile State _state; 980 981 private: 982 /* try to switch state from state "from" to state "to" 983 * returns the state set after the method is complete 984 */ 985 State switch_state(State from, State to); 986 987 public: 988 SuspendResume() : _state(SR_RUNNING) { } 989 990 State state() const { return _state; } 991 992 State request_suspend() { 993 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); 994 } 995 996 State cancel_suspend() { 997 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); 998 } 999 1000 State suspended() { 1001 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); 1002 } 1003 1004 State request_wakeup() { 1005 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); 1006 } 1007 1008 State running() { 1009 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); 1010 } 1011 1012 bool is_running() const { 1013 return _state == SR_RUNNING; 1014 } 1015 1016 bool is_suspend_request() const { 1017 return _state == SR_SUSPEND_REQUEST; 1018 } 1019 1020 bool is_suspended() const { 1021 return _state == SR_SUSPENDED; 1022 } 1023 }; 1024 #endif 1025 1026 1027 protected: 1028 static long _rand_seed; // seed for random number generator 1029 static int _processor_count; // number of processors 1030 static int _initial_active_processor_count; // number of active processors during initialization. 1031 1032 static char* format_boot_path(const char* format_string, 1033 const char* home, 1034 int home_len, 1035 char fileSep, 1036 char pathSep); 1037 static bool set_boot_path(char fileSep, char pathSep); 1038 static char** split_path(const char* path, int* n); 1039 1040 }; 1041 1042 // Note that "PAUSE" is almost always used with synchronization 1043 // so arguably we should provide Atomic::SpinPause() instead 1044 // of the global SpinPause() with C linkage. 1045 // It'd also be eligible for inlining on many platforms. 1046 1047 extern "C" int SpinPause(); 1048 1049 #endif // SHARE_VM_RUNTIME_OS_HPP