< prev index next >

src/hotspot/os/bsd/os_bsd.cpp

Print this page




1056 // called from signal handler. Before adding something to os::abort(), make
1057 // sure it is async-safe and can handle partially initialized VM.
1058 void os::abort(bool dump_core, void* siginfo, const void* context) {
1059   os::shutdown();
1060   if (dump_core) {
1061 #ifndef PRODUCT
1062     fdStream out(defaultStream::output_fd());
1063     out.print_raw("Current thread is ");
1064     char buf[16];
1065     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1066     out.print_raw_cr(buf);
1067     out.print_raw_cr("Dumping core ...");
1068 #endif
1069     ::abort(); // dump core
1070   }
1071 
1072   ::exit(1);
1073 }
1074 
1075 // Die immediately, no exit hook, no abort hook, no cleanup.
1076 // Dump a core file, if possible, for debugging.
1077 void os::die() {
1078   if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
1079     // For TimeoutInErrorHandlingTest.java, we just kill the VM
1080     // and don't take the time to generate a core file.
1081     os::signal_raise(SIGKILL);
1082   } else {
1083     // _exit() on BsdThreads only kills current thread
1084     ::abort();
1085   }
1086 }
1087 
1088 // Information of current thread in variety of formats
1089 pid_t os::Bsd::gettid() {
1090   int retval = -1;
1091 
1092 #ifdef __APPLE__ //XNU kernel
1093   // despite the fact mach port is actually not a thread id use it
1094   // instead of syscall(SYS_thread_selfid) as it certainly fits to u4
1095   retval = ::pthread_mach_thread_np(::pthread_self());
1096   guarantee(retval != 0, "just checking");
1097   return retval;
1098 
1099 #else
1100   #ifdef __FreeBSD__
1101   retval = syscall(SYS_thr_self);
1102   #else
1103     #ifdef __OpenBSD__
1104   retval = syscall(SYS_getthrid);
1105     #else


1248     }
1249     return true;
1250   }
1251 
1252   buf[0] = '\0';
1253   if (offset) *offset = -1;
1254   return false;
1255 }
1256 
1257 // Loads .dll/.so and
1258 // in case of error it checks if .dll/.so was built for the
1259 // same architecture as Hotspot is running on
1260 
1261 #ifdef __APPLE__
1262 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1263 #ifdef STATIC_BUILD
1264   return os::get_default_process_handle();
1265 #else
1266   void * result= ::dlopen(filename, RTLD_LAZY);
1267   if (result != NULL) {
1268     Events::log(NULL, "Loaded shared library %s", filename);
1269     // Successful loading
1270     return result;
1271   }
1272 
1273   const char* error_report = ::dlerror();
1274   if (error_report == NULL) {
1275     error_report = "dlerror returned no error description";
1276   }
1277   if (ebuf != NULL && ebuflen > 0) {
1278     // Read system error message into ebuf
1279     ::strncpy(ebuf, error_report, ebuflen-1);
1280     ebuf[ebuflen-1]='\0';
1281   }
1282   Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
1283 
1284   return NULL;
1285 #endif // STATIC_BUILD
1286 }
1287 #else
1288 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1289 #ifdef STATIC_BUILD
1290   return os::get_default_process_handle();
1291 #else
1292   void * result= ::dlopen(filename, RTLD_LAZY);
1293   if (result != NULL) {
1294     Events::log(NULL, "Loaded shared library %s", filename);
1295     // Successful loading
1296     return result;
1297   }
1298 
1299   Elf32_Ehdr elf_head;
1300 
1301   const char* const error_report = ::dlerror();
1302   if (error_report == NULL) {
1303     error_report = "dlerror returned no error description";
1304   }
1305   if (ebuf != NULL && ebuflen > 0) {
1306     // Read system error message into ebuf
1307     ::strncpy(ebuf, error_report, ebuflen-1);
1308     ebuf[ebuflen-1]='\0';
1309   }
1310   Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
1311 
1312   int diag_msg_max_length=ebuflen-strlen(ebuf);
1313   char* diag_msg_buf=ebuf+strlen(ebuf);
1314 
1315   if (diag_msg_max_length==0) {
1316     // No more space in ebuf for additional diagnostics message
1317     return NULL;
1318   }
1319 
1320 
1321   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1322 
1323   if (file_descriptor < 0) {
1324     // Can't open library, report dlerror() message
1325     return NULL;
1326   }
1327 
1328   bool failed_to_read_elf_head=
1329     (sizeof(elf_head)!=
1330      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1331 


2172 bool os::can_execute_large_page_memory() {
2173   // Does not matter, we do not support huge pages.
2174   return false;
2175 }
2176 
2177 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2178   assert(file_desc >= 0, "file_desc is not valid");
2179   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
2180   if (result != NULL) {
2181     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
2182       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2183     }
2184   }
2185   return result;
2186 }
2187 
2188 // Reserve memory at an arbitrary address, only if that area is
2189 // available (and not reserved for something else).
2190 
2191 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {





2192   // Assert only that the size is a multiple of the page size, since
2193   // that's all that mmap requires, and since that's all we really know
2194   // about at this low abstraction level.  If we need higher alignment,
2195   // we can either pass an alignment to this method or verify alignment
2196   // in one of the methods further up the call chain.  See bug 5044738.
2197   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2198 
2199   // Repeatedly allocate blocks until the block is allocated at the
2200   // right spot.
2201 
2202   // Bsd mmap allows caller to pass an address as hint; give it a try first,
2203   // if kernel honors the hint then we can return immediately.
2204   char * addr = anon_mmap(requested_addr, bytes, false);
2205   if (addr == requested_addr) {
2206     return requested_addr;
2207   }
2208 
2209   if (addr != NULL) {
2210     // mmap() is successful but it fails to reserve at the requested address
2211     anon_munmap(addr, bytes);
2212   }
2213 
2214   return NULL;











































2215 }
2216 
2217 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2218 void os::infinite_sleep() {
2219   while (true) {    // sleep forever ...
2220     ::sleep(100);   // ... 100 seconds at a time
2221   }
2222 }
2223 
2224 // Used to convert frequent JVM_Yield() to nops
2225 bool os::dont_yield() {
2226   return DontYieldALot;
2227 }
2228 
2229 void os::naked_yield() {
2230   sched_yield();
2231 }
2232 
2233 ////////////////////////////////////////////////////////////////////////////////
2234 // thread priority support


3220 }
3221 
3222 // Mark the polling page as readable
3223 void os::make_polling_page_readable(void) {
3224   if (!bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
3225     fatal("Could not enable polling page");
3226   }
3227 }
3228 
3229 int os::active_processor_count() {
3230   // User has overridden the number of active processors
3231   if (ActiveProcessorCount > 0) {
3232     log_trace(os)("active_processor_count: "
3233                   "active processor count set by user : %d",
3234                   ActiveProcessorCount);
3235     return ActiveProcessorCount;
3236   }
3237 
3238   return _processor_count;
3239 }
3240 
3241 #ifdef __APPLE__
3242 uint os::processor_id() {
3243   static volatile int* volatile apic_to_cpu_mapping = NULL;
3244   static volatile int next_cpu_id = 0;
3245 
3246   volatile int* mapping = OrderAccess::load_acquire(&apic_to_cpu_mapping);
3247   if (mapping == NULL) {
3248     // Calculate possible number space for APIC ids. This space is not necessarily
3249     // in the range [0, number_of_cpus).
3250     uint total_bits = 0;
3251     for (uint i = 0;; ++i) {
3252       uint eax = 0xb; // Query topology leaf
3253       uint ebx;
3254       uint ecx = i;
3255       uint edx;
3256 
3257       __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3258 
3259       uint level_type = (ecx >> 8) & 0xFF;
3260       if (level_type == 0) {
3261         // Invalid level; end of topology
3262         break;
3263       }
3264       uint level_apic_id_shift = eax & ((1u << 5) - 1);
3265       total_bits += level_apic_id_shift;
3266     }
3267 
3268     uint max_apic_ids = 1u << total_bits;
3269     mapping = NEW_C_HEAP_ARRAY(int, max_apic_ids, mtInternal);
3270 
3271     for (uint i = 0; i < max_apic_ids; ++i) {
3272       mapping[i] = -1;
3273     }
3274 
3275     if (!Atomic::replace_if_null(mapping, &apic_to_cpu_mapping)) {
3276       FREE_C_HEAP_ARRAY(int, mapping);
3277       mapping = OrderAccess::load_acquire(&apic_to_cpu_mapping);
3278     }
3279   }
3280 
3281   uint eax = 0xb;
3282   uint ebx;
3283   uint ecx = 0;
3284   uint edx;
3285 
3286   asm ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3287 
3288   // Map from APIC id to a unique logical processor ID in the expected
3289   // [0, num_processors) range.
3290 
3291   uint apic_id = edx;
3292   int cpu_id = Atomic::load(&mapping[apic_id]);
3293 
3294   while (cpu_id < 0) {
3295     if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) {
3296       Atomic::store(Atomic::add(1, &next_cpu_id) - 1, &mapping[apic_id]);
3297     }
3298     cpu_id = Atomic::load(&mapping[apic_id]);
3299   }
3300 
3301   return (uint)cpu_id;
3302 }
3303 #endif
3304 
3305 void os::set_native_thread_name(const char *name) {
3306 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
3307   // This is only supported in Snow Leopard and beyond
3308   if (name != NULL) {
3309     // Add a "Java: " prefix to the name
3310     char buf[MAXTHREADNAMESIZE];
3311     snprintf(buf, sizeof(buf), "Java: %s", name);
3312     pthread_setname_np(buf);
3313   }
3314 #endif
3315 }
3316 
3317 bool os::distribute_processes(uint length, uint* distribution) {
3318   // Not yet implemented.
3319   return false;
3320 }
3321 
3322 bool os::bind_to_processor(uint processor_id) {
3323   // Not yet implemented.




1056 // called from signal handler. Before adding something to os::abort(), make
1057 // sure it is async-safe and can handle partially initialized VM.
1058 void os::abort(bool dump_core, void* siginfo, const void* context) {
1059   os::shutdown();
1060   if (dump_core) {
1061 #ifndef PRODUCT
1062     fdStream out(defaultStream::output_fd());
1063     out.print_raw("Current thread is ");
1064     char buf[16];
1065     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1066     out.print_raw_cr(buf);
1067     out.print_raw_cr("Dumping core ...");
1068 #endif
1069     ::abort(); // dump core
1070   }
1071 
1072   ::exit(1);
1073 }
1074 
1075 // Die immediately, no exit hook, no abort hook, no cleanup.

1076 void os::die() {
1077   // _exit() on BsdThreads only kills current thread
1078   ::abort();






1079 }
1080 
1081 // Information of current thread in variety of formats
1082 pid_t os::Bsd::gettid() {
1083   int retval = -1;
1084 
1085 #ifdef __APPLE__ //XNU kernel
1086   // despite the fact mach port is actually not a thread id use it
1087   // instead of syscall(SYS_thread_selfid) as it certainly fits to u4
1088   retval = ::pthread_mach_thread_np(::pthread_self());
1089   guarantee(retval != 0, "just checking");
1090   return retval;
1091 
1092 #else
1093   #ifdef __FreeBSD__
1094   retval = syscall(SYS_thr_self);
1095   #else
1096     #ifdef __OpenBSD__
1097   retval = syscall(SYS_getthrid);
1098     #else


1241     }
1242     return true;
1243   }
1244 
1245   buf[0] = '\0';
1246   if (offset) *offset = -1;
1247   return false;
1248 }
1249 
1250 // Loads .dll/.so and
1251 // in case of error it checks if .dll/.so was built for the
1252 // same architecture as Hotspot is running on
1253 
1254 #ifdef __APPLE__
1255 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1256 #ifdef STATIC_BUILD
1257   return os::get_default_process_handle();
1258 #else
1259   void * result= ::dlopen(filename, RTLD_LAZY);
1260   if (result != NULL) {

1261     // Successful loading
1262     return result;
1263   }
1264 
1265   // Read system error message into ebuf
1266   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1267   ebuf[ebuflen-1]='\0';







1268 
1269   return NULL;
1270 #endif // STATIC_BUILD
1271 }
1272 #else
1273 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1274 #ifdef STATIC_BUILD
1275   return os::get_default_process_handle();
1276 #else
1277   void * result= ::dlopen(filename, RTLD_LAZY);
1278   if (result != NULL) {

1279     // Successful loading
1280     return result;
1281   }
1282 
1283   Elf32_Ehdr elf_head;
1284 
1285   // Read system error message into ebuf
1286   // It may or may not be overwritten below
1287   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1288   ebuf[ebuflen-1]='\0';







1289   int diag_msg_max_length=ebuflen-strlen(ebuf);
1290   char* diag_msg_buf=ebuf+strlen(ebuf);
1291 
1292   if (diag_msg_max_length==0) {
1293     // No more space in ebuf for additional diagnostics message
1294     return NULL;
1295   }
1296 
1297 
1298   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1299 
1300   if (file_descriptor < 0) {
1301     // Can't open library, report dlerror() message
1302     return NULL;
1303   }
1304 
1305   bool failed_to_read_elf_head=
1306     (sizeof(elf_head)!=
1307      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1308 


2149 bool os::can_execute_large_page_memory() {
2150   // Does not matter, we do not support huge pages.
2151   return false;
2152 }
2153 
2154 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2155   assert(file_desc >= 0, "file_desc is not valid");
2156   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
2157   if (result != NULL) {
2158     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
2159       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2160     }
2161   }
2162   return result;
2163 }
2164 
2165 // Reserve memory at an arbitrary address, only if that area is
2166 // available (and not reserved for something else).
2167 
2168 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2169   const int max_tries = 10;
2170   char* base[max_tries];
2171   size_t size[max_tries];
2172   const size_t gap = 0x000000;
2173 
2174   // Assert only that the size is a multiple of the page size, since
2175   // that's all that mmap requires, and since that's all we really know
2176   // about at this low abstraction level.  If we need higher alignment,
2177   // we can either pass an alignment to this method or verify alignment
2178   // in one of the methods further up the call chain.  See bug 5044738.
2179   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2180 
2181   // Repeatedly allocate blocks until the block is allocated at the
2182   // right spot.
2183 
2184   // Bsd mmap allows caller to pass an address as hint; give it a try first,
2185   // if kernel honors the hint then we can return immediately.
2186   char * addr = anon_mmap(requested_addr, bytes, false);
2187   if (addr == requested_addr) {
2188     return requested_addr;
2189   }
2190 
2191   if (addr != NULL) {
2192     // mmap() is successful but it fails to reserve at the requested address
2193     anon_munmap(addr, bytes);
2194   }
2195 
2196   int i;
2197   for (i = 0; i < max_tries; ++i) {
2198     base[i] = reserve_memory(bytes);
2199 
2200     if (base[i] != NULL) {
2201       // Is this the block we wanted?
2202       if (base[i] == requested_addr) {
2203         size[i] = bytes;
2204         break;
2205       }
2206 
2207       // Does this overlap the block we wanted? Give back the overlapped
2208       // parts and try again.
2209 
2210       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2211       if (top_overlap >= 0 && top_overlap < bytes) {
2212         unmap_memory(base[i], top_overlap);
2213         base[i] += top_overlap;
2214         size[i] = bytes - top_overlap;
2215       } else {
2216         size_t bottom_overlap = base[i] + bytes - requested_addr;
2217         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2218           unmap_memory(requested_addr, bottom_overlap);
2219           size[i] = bytes - bottom_overlap;
2220         } else {
2221           size[i] = bytes;
2222         }
2223       }
2224     }
2225   }
2226 
2227   // Give back the unused reserved pieces.
2228 
2229   for (int j = 0; j < i; ++j) {
2230     if (base[j] != NULL) {
2231       unmap_memory(base[j], size[j]);
2232     }
2233   }
2234 
2235   if (i < max_tries) {
2236     return requested_addr;
2237   } else {
2238     return NULL;
2239   }
2240 }
2241 
2242 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2243 void os::infinite_sleep() {
2244   while (true) {    // sleep forever ...
2245     ::sleep(100);   // ... 100 seconds at a time
2246   }
2247 }
2248 
2249 // Used to convert frequent JVM_Yield() to nops
2250 bool os::dont_yield() {
2251   return DontYieldALot;
2252 }
2253 
2254 void os::naked_yield() {
2255   sched_yield();
2256 }
2257 
2258 ////////////////////////////////////////////////////////////////////////////////
2259 // thread priority support


3245 }
3246 
3247 // Mark the polling page as readable
3248 void os::make_polling_page_readable(void) {
3249   if (!bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
3250     fatal("Could not enable polling page");
3251   }
3252 }
3253 
3254 int os::active_processor_count() {
3255   // User has overridden the number of active processors
3256   if (ActiveProcessorCount > 0) {
3257     log_trace(os)("active_processor_count: "
3258                   "active processor count set by user : %d",
3259                   ActiveProcessorCount);
3260     return ActiveProcessorCount;
3261   }
3262 
3263   return _processor_count;
3264 }
































































3265 
3266 void os::set_native_thread_name(const char *name) {
3267 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
3268   // This is only supported in Snow Leopard and beyond
3269   if (name != NULL) {
3270     // Add a "Java: " prefix to the name
3271     char buf[MAXTHREADNAMESIZE];
3272     snprintf(buf, sizeof(buf), "Java: %s", name);
3273     pthread_setname_np(buf);
3274   }
3275 #endif
3276 }
3277 
3278 bool os::distribute_processes(uint length, uint* distribution) {
3279   // Not yet implemented.
3280   return false;
3281 }
3282 
3283 bool os::bind_to_processor(uint processor_id) {
3284   // Not yet implemented.


< prev index next >