< prev index next >

src/hotspot/os/bsd/os_bsd.cpp

Print this page

        

@@ -1071,20 +1071,13 @@
 
   ::exit(1);
 }
 
 // Die immediately, no exit hook, no abort hook, no cleanup.
-// Dump a core file, if possible, for debugging.
 void os::die() {
-  if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
-    // For TimeoutInErrorHandlingTest.java, we just kill the VM
-    // and don't take the time to generate a core file.
-    os::signal_raise(SIGKILL);
-  } else {
-    // _exit() on BsdThreads only kills current thread
-    ::abort();
-  }
+  // _exit() on BsdThreads only kills current thread
+  ::abort();
 }
 
 // Information of current thread in variety of formats
 pid_t os::Bsd::gettid() {
   int retval = -1;

@@ -1263,25 +1256,17 @@
 #ifdef STATIC_BUILD
   return os::get_default_process_handle();
 #else
   void * result= ::dlopen(filename, RTLD_LAZY);
   if (result != NULL) {
-    Events::log(NULL, "Loaded shared library %s", filename);
     // Successful loading
     return result;
   }
 
-  const char* error_report = ::dlerror();
-  if (error_report == NULL) {
-    error_report = "dlerror returned no error description";
-  }
-  if (ebuf != NULL && ebuflen > 0) {
-    // Read system error message into ebuf
-    ::strncpy(ebuf, error_report, ebuflen-1);
-    ebuf[ebuflen-1]='\0';
-  }
-  Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
+  // Read system error message into ebuf
+  ::strncpy(ebuf, ::dlerror(), ebuflen-1);
+  ebuf[ebuflen-1]='\0';
 
   return NULL;
 #endif // STATIC_BUILD
 }
 #else

@@ -1289,28 +1274,20 @@
 #ifdef STATIC_BUILD
   return os::get_default_process_handle();
 #else
   void * result= ::dlopen(filename, RTLD_LAZY);
   if (result != NULL) {
-    Events::log(NULL, "Loaded shared library %s", filename);
     // Successful loading
     return result;
   }
 
   Elf32_Ehdr elf_head;
 
-  const char* const error_report = ::dlerror();
-  if (error_report == NULL) {
-    error_report = "dlerror returned no error description";
-  }
-  if (ebuf != NULL && ebuflen > 0) {
-    // Read system error message into ebuf
-    ::strncpy(ebuf, error_report, ebuflen-1);
-    ebuf[ebuflen-1]='\0';
-  }
-  Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
-
+  // Read system error message into ebuf
+  // It may or may not be overwritten below
+  ::strncpy(ebuf, ::dlerror(), ebuflen-1);
+  ebuf[ebuflen-1]='\0';
   int diag_msg_max_length=ebuflen-strlen(ebuf);
   char* diag_msg_buf=ebuf+strlen(ebuf);
 
   if (diag_msg_max_length==0) {
     // No more space in ebuf for additional diagnostics message

@@ -2187,10 +2164,15 @@
 
 // Reserve memory at an arbitrary address, only if that area is
 // available (and not reserved for something else).
 
 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
+  const int max_tries = 10;
+  char* base[max_tries];
+  size_t size[max_tries];
+  const size_t gap = 0x000000;
+
   // Assert only that the size is a multiple of the page size, since
   // that's all that mmap requires, and since that's all we really know
   // about at this low abstraction level.  If we need higher alignment,
   // we can either pass an alignment to this method or verify alignment
   // in one of the methods further up the call chain.  See bug 5044738.

@@ -2209,11 +2191,54 @@
   if (addr != NULL) {
     // mmap() is successful but it fails to reserve at the requested address
     anon_munmap(addr, bytes);
   }
 
-  return NULL;
+  int i;
+  for (i = 0; i < max_tries; ++i) {
+    base[i] = reserve_memory(bytes);
+
+    if (base[i] != NULL) {
+      // Is this the block we wanted?
+      if (base[i] == requested_addr) {
+        size[i] = bytes;
+        break;
+      }
+
+      // Does this overlap the block we wanted? Give back the overlapped
+      // parts and try again.
+
+      size_t top_overlap = requested_addr + (bytes + gap) - base[i];
+      if (top_overlap >= 0 && top_overlap < bytes) {
+        unmap_memory(base[i], top_overlap);
+        base[i] += top_overlap;
+        size[i] = bytes - top_overlap;
+      } else {
+        size_t bottom_overlap = base[i] + bytes - requested_addr;
+        if (bottom_overlap >= 0 && bottom_overlap < bytes) {
+          unmap_memory(requested_addr, bottom_overlap);
+          size[i] = bytes - bottom_overlap;
+        } else {
+          size[i] = bytes;
+        }
+      }
+    }
+  }
+
+  // Give back the unused reserved pieces.
+
+  for (int j = 0; j < i; ++j) {
+    if (base[j] != NULL) {
+      unmap_memory(base[j], size[j]);
+    }
+  }
+
+  if (i < max_tries) {
+    return requested_addr;
+  } else {
+    return NULL;
+  }
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...

@@ -3236,74 +3261,10 @@
   }
 
   return _processor_count;
 }
 
-#ifdef __APPLE__
-uint os::processor_id() {
-  static volatile int* volatile apic_to_cpu_mapping = NULL;
-  static volatile int next_cpu_id = 0;
-
-  volatile int* mapping = OrderAccess::load_acquire(&apic_to_cpu_mapping);
-  if (mapping == NULL) {
-    // Calculate possible number space for APIC ids. This space is not necessarily
-    // in the range [0, number_of_cpus).
-    uint total_bits = 0;
-    for (uint i = 0;; ++i) {
-      uint eax = 0xb; // Query topology leaf
-      uint ebx;
-      uint ecx = i;
-      uint edx;
-
-      __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
-
-      uint level_type = (ecx >> 8) & 0xFF;
-      if (level_type == 0) {
-        // Invalid level; end of topology
-        break;
-      }
-      uint level_apic_id_shift = eax & ((1u << 5) - 1);
-      total_bits += level_apic_id_shift;
-    }
-
-    uint max_apic_ids = 1u << total_bits;
-    mapping = NEW_C_HEAP_ARRAY(int, max_apic_ids, mtInternal);
-
-    for (uint i = 0; i < max_apic_ids; ++i) {
-      mapping[i] = -1;
-    }
-
-    if (!Atomic::replace_if_null(mapping, &apic_to_cpu_mapping)) {
-      FREE_C_HEAP_ARRAY(int, mapping);
-      mapping = OrderAccess::load_acquire(&apic_to_cpu_mapping);
-    }
-  }
-
-  uint eax = 0xb;
-  uint ebx;
-  uint ecx = 0;
-  uint edx;
-
-  asm ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
-
-  // Map from APIC id to a unique logical processor ID in the expected
-  // [0, num_processors) range.
-
-  uint apic_id = edx;
-  int cpu_id = Atomic::load(&mapping[apic_id]);
-
-  while (cpu_id < 0) {
-    if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) {
-      Atomic::store(Atomic::add(1, &next_cpu_id) - 1, &mapping[apic_id]);
-    }
-    cpu_id = Atomic::load(&mapping[apic_id]);
-  }
-
-  return (uint)cpu_id;
-}
-#endif
-
 void os::set_native_thread_name(const char *name) {
 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
   // This is only supported in Snow Leopard and beyond
   if (name != NULL) {
     // Add a "Java: " prefix to the name
< prev index next >