< prev index next >

src/hotspot/os/linux/os_linux.cpp

Print this page




1444   os::shutdown();
1445   if (dump_core) {
1446     if (DumpPrivateMappingsInCore) {
1447       ClassLoader::close_jrt_image();
1448     }
1449 #ifndef PRODUCT
1450     fdStream out(defaultStream::output_fd());
1451     out.print_raw("Current thread is ");
1452     char buf[16];
1453     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1454     out.print_raw_cr(buf);
1455     out.print_raw_cr("Dumping core ...");
1456 #endif
1457     ::abort(); // dump core
1458   }
1459 
1460   ::exit(1);
1461 }
1462 
1463 // Die immediately, no exit hook, no abort hook, no cleanup.
1464 // Dump a core file, if possible, for debugging.
1465 void os::die() {
1466   if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
1467     // For TimeoutInErrorHandlingTest.java, we just kill the VM
1468     // and don't take the time to generate a core file.
1469     os::signal_raise(SIGKILL);
1470   } else {
1471     ::abort();
1472   }
1473 }
1474 
1475 // thread_id is kernel thread id (similar to Solaris LWP id)
1476 intx os::current_thread_id() { return os::Linux::gettid(); }
1477 int os::current_process_id() {
1478   return ::getpid();
1479 }
1480 
1481 // DLL functions
1482 
1483 const char* os::dll_file_extension() { return ".so"; }
1484 
1485 // This must be hard coded because it's the system's temporary
1486 // directory not the java application's temp directory, ala java.io.tmpdir.
1487 const char* os::get_temp_directory() { return "/tmp"; }
1488 
1489 static bool file_exists(const char* filename) {
1490   struct stat statbuf;
1491   if (filename == NULL || strlen(filename) == 0) {
1492     return false;


1864   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1865     if (lib_arch.name!=NULL) {
1866       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1867                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1868                  lib_arch.name, arch_array[running_arch_index].name);
1869     } else {
1870       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1871                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1872                  lib_arch.code,
1873                  arch_array[running_arch_index].name);
1874     }
1875   }
1876 
1877   return NULL;
1878 }
1879 
1880 void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
1881                                 int ebuflen) {
1882   void * result = ::dlopen(filename, RTLD_LAZY);
1883   if (result == NULL) {
1884     const char* error_report = ::dlerror();
1885     if (error_report == NULL) {
1886       error_report = "dlerror returned no error description";
1887     }
1888     if (ebuf != NULL && ebuflen > 0) {
1889       ::strncpy(ebuf, error_report, ebuflen-1);
1890       ebuf[ebuflen-1]='\0';
1891     }
1892     Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
1893   } else {
1894     Events::log(NULL, "Loaded shared library %s", filename);
1895   }
1896   return result;
1897 }
1898 
1899 void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
1900                                        int ebuflen) {
1901   void * result = NULL;
1902   if (LoadExecStackDllInVMThread) {
1903     result = dlopen_helper(filename, ebuf, ebuflen);
1904   }
1905 
1906   // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
1907   // library that requires an executable stack, or which does not have this
1908   // stack attribute set, dlopen changes the stack attribute to executable. The
1909   // read protection of the guard pages gets lost.
1910   //
1911   // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
1912   // may have been queued at the same time.
1913 
1914   if (!_stack_is_executable) {


4088 
4089 bool os::can_execute_large_page_memory() {
4090   return UseTransparentHugePages || UseHugeTLBFS;
4091 }
4092 
4093 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
4094   assert(file_desc >= 0, "file_desc is not valid");
4095   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
4096   if (result != NULL) {
4097     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
4098       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
4099     }
4100   }
4101   return result;
4102 }
4103 
4104 // Reserve memory at an arbitrary address, only if that area is
4105 // available (and not reserved for something else).
4106 
4107 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {





4108   // Assert only that the size is a multiple of the page size, since
4109   // that's all that mmap requires, and since that's all we really know
4110   // about at this low abstraction level.  If we need higher alignment,
4111   // we can either pass an alignment to this method or verify alignment
4112   // in one of the methods further up the call chain.  See bug 5044738.
4113   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4114 
4115   // Repeatedly allocate blocks until the block is allocated at the
4116   // right spot.
4117 
4118   // Linux mmap allows caller to pass an address as hint; give it a try first,
4119   // if kernel honors the hint then we can return immediately.
4120   char * addr = anon_mmap(requested_addr, bytes, false);
4121   if (addr == requested_addr) {
4122     return requested_addr;
4123   }
4124 
4125   if (addr != NULL) {
4126     // mmap() is successful but it fails to reserve at the requested address
4127     anon_munmap(addr, bytes);
4128   }
4129 
4130   return NULL;











































4131 }
4132 
4133 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
4134 void os::infinite_sleep() {
4135   while (true) {    // sleep forever ...
4136     ::sleep(100);   // ... 100 seconds at a time
4137   }
4138 }
4139 
4140 // Used to convert frequent JVM_Yield() to nops
4141 bool os::dont_yield() {
4142   return DontYieldALot;
4143 }
4144 
4145 // Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
4146 // actually give up the CPU. Since skip buddy (v2.6.28):
4147 //
4148 // * Sets the yielding task as skip buddy for current CPU's run queue.
4149 // * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
4150 // * Clears skip buddies for this run queue (yielding task no longer a skip buddy).




1444   os::shutdown();
1445   if (dump_core) {
1446     if (DumpPrivateMappingsInCore) {
1447       ClassLoader::close_jrt_image();
1448     }
1449 #ifndef PRODUCT
1450     fdStream out(defaultStream::output_fd());
1451     out.print_raw("Current thread is ");
1452     char buf[16];
1453     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1454     out.print_raw_cr(buf);
1455     out.print_raw_cr("Dumping core ...");
1456 #endif
1457     ::abort(); // dump core
1458   }
1459 
1460   ::exit(1);
1461 }
1462 
1463 // Die immediately, no exit hook, no abort hook, no cleanup.

1464 void os::die() {
1465   ::abort();






1466 }
1467 
1468 // thread_id is kernel thread id (similar to Solaris LWP id)
1469 intx os::current_thread_id() { return os::Linux::gettid(); }
1470 int os::current_process_id() {
1471   return ::getpid();
1472 }
1473 
1474 // DLL functions
1475 
1476 const char* os::dll_file_extension() { return ".so"; }
1477 
1478 // This must be hard coded because it's the system's temporary
1479 // directory not the java application's temp directory, ala java.io.tmpdir.
1480 const char* os::get_temp_directory() { return "/tmp"; }
1481 
1482 static bool file_exists(const char* filename) {
1483   struct stat statbuf;
1484   if (filename == NULL || strlen(filename) == 0) {
1485     return false;


1857   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1858     if (lib_arch.name!=NULL) {
1859       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1860                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1861                  lib_arch.name, arch_array[running_arch_index].name);
1862     } else {
1863       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1864                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1865                  lib_arch.code,
1866                  arch_array[running_arch_index].name);
1867     }
1868   }
1869 
1870   return NULL;
1871 }
1872 
1873 void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
1874                                 int ebuflen) {
1875   void * result = ::dlopen(filename, RTLD_LAZY);
1876   if (result == NULL) {
1877     ::strncpy(ebuf, ::dlerror(), ebuflen - 1);
1878     ebuf[ebuflen-1] = '\0';









1879   }
1880   return result;
1881 }
1882 
1883 void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
1884                                        int ebuflen) {
1885   void * result = NULL;
1886   if (LoadExecStackDllInVMThread) {
1887     result = dlopen_helper(filename, ebuf, ebuflen);
1888   }
1889 
1890   // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
1891   // library that requires an executable stack, or which does not have this
1892   // stack attribute set, dlopen changes the stack attribute to executable. The
1893   // read protection of the guard pages gets lost.
1894   //
1895   // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
1896   // may have been queued at the same time.
1897 
1898   if (!_stack_is_executable) {


4072 
4073 bool os::can_execute_large_page_memory() {
4074   return UseTransparentHugePages || UseHugeTLBFS;
4075 }
4076 
4077 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
4078   assert(file_desc >= 0, "file_desc is not valid");
4079   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
4080   if (result != NULL) {
4081     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
4082       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
4083     }
4084   }
4085   return result;
4086 }
4087 
4088 // Reserve memory at an arbitrary address, only if that area is
4089 // available (and not reserved for something else).
4090 
4091 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
4092   const int max_tries = 10;
4093   char* base[max_tries];
4094   size_t size[max_tries];
4095   const size_t gap = 0x000000;
4096 
4097   // Assert only that the size is a multiple of the page size, since
4098   // that's all that mmap requires, and since that's all we really know
4099   // about at this low abstraction level.  If we need higher alignment,
4100   // we can either pass an alignment to this method or verify alignment
4101   // in one of the methods further up the call chain.  See bug 5044738.
4102   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4103 
4104   // Repeatedly allocate blocks until the block is allocated at the
4105   // right spot.
4106 
4107   // Linux mmap allows caller to pass an address as hint; give it a try first,
4108   // if kernel honors the hint then we can return immediately.
4109   char * addr = anon_mmap(requested_addr, bytes, false);
4110   if (addr == requested_addr) {
4111     return requested_addr;
4112   }
4113 
4114   if (addr != NULL) {
4115     // mmap() is successful but it fails to reserve at the requested address
4116     anon_munmap(addr, bytes);
4117   }
4118 
4119   int i;
4120   for (i = 0; i < max_tries; ++i) {
4121     base[i] = reserve_memory(bytes);
4122 
4123     if (base[i] != NULL) {
4124       // Is this the block we wanted?
4125       if (base[i] == requested_addr) {
4126         size[i] = bytes;
4127         break;
4128       }
4129 
4130       // Does this overlap the block we wanted? Give back the overlapped
4131       // parts and try again.
4132 
4133       ptrdiff_t top_overlap = requested_addr + (bytes + gap) - base[i];
4134       if (top_overlap >= 0 && (size_t)top_overlap < bytes) {
4135         unmap_memory(base[i], top_overlap);
4136         base[i] += top_overlap;
4137         size[i] = bytes - top_overlap;
4138       } else {
4139         ptrdiff_t bottom_overlap = base[i] + bytes - requested_addr;
4140         if (bottom_overlap >= 0 && (size_t)bottom_overlap < bytes) {
4141           unmap_memory(requested_addr, bottom_overlap);
4142           size[i] = bytes - bottom_overlap;
4143         } else {
4144           size[i] = bytes;
4145         }
4146       }
4147     }
4148   }
4149 
4150   // Give back the unused reserved pieces.
4151 
4152   for (int j = 0; j < i; ++j) {
4153     if (base[j] != NULL) {
4154       unmap_memory(base[j], size[j]);
4155     }
4156   }
4157 
4158   if (i < max_tries) {
4159     return requested_addr;
4160   } else {
4161     return NULL;
4162   }
4163 }
4164 
4165 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
4166 void os::infinite_sleep() {
4167   while (true) {    // sleep forever ...
4168     ::sleep(100);   // ... 100 seconds at a time
4169   }
4170 }
4171 
4172 // Used to convert frequent JVM_Yield() to nops
4173 bool os::dont_yield() {
4174   return DontYieldALot;
4175 }
4176 
4177 // Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
4178 // actually give up the CPU. Since skip buddy (v2.6.28):
4179 //
4180 // * Sets the yielding task as skip buddy for current CPU's run queue.
4181 // * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
4182 // * Clears skip buddies for this run queue (yielding task no longer a skip buddy).


< prev index next >