< prev index next >

src/hotspot/share/gc/shared/memAllocator.cpp

Print this page

        

@@ -141,10 +141,11 @@
 
 void MemAllocator::Allocation::verify_before() {
   // Clear unhandled oops for memory allocation.  Memory allocation might
   // not take out a lock if from tlab, so clear here.
   Thread* THREAD = _thread;
+  CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 }
 

@@ -169,13 +170,18 @@
   // How to choose between a pending exception and a potential
   // OutOfMemoryError?  Don't allow pending exceptions.
   // This is a VM policy failure, so how do we exhaustively test it?
   assert(!_thread->has_pending_exception(),
          "shouldn't be allocating with pending exception");
-  // Allocation of an oop can always invoke a safepoint,
-  // hence, the true argument.
-  _thread->check_for_valid_safepoint_state(true);
+  if (StrictSafepointChecks) {
+    assert(_thread->allow_allocation(),
+           "Allocation done by thread for which allocation is blocked "
+           "by No_Allocation_Verifier!");
+    // Allocation of an oop can always invoke a safepoint,
+    // hence, the true argument
+    _thread->check_for_valid_safepoint_state(true);
+  }
 }
 #endif
 
 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
   // support for JVMTI VMObjectAlloc event (no-op if not enabled)

@@ -224,22 +230,22 @@
 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
   HeapWord* mem = (HeapWord*)obj();
   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
 
   if (_allocated_outside_tlab) {
-    AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
+    AllocTracer::send_allocation_outside_tlab(_allocator._klass, mem, size_in_bytes, _thread);
   } else if (_allocated_tlab_size != 0) {
     // TLAB was refilled
-    AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
+    AllocTracer::send_allocation_in_new_tlab(_allocator._klass, mem, _allocated_tlab_size * HeapWordSize,
                                              size_in_bytes, _thread);
   }
 }
 
 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
   if (DTraceAllocProbes) {
     // support for Dtrace object alloc event (no-op most of the time)
-    Klass* klass = obj()->klass();
+    Klass* klass = _allocator._klass;
     size_t word_size = _allocator._word_size;
     if (klass != NULL && klass->name() != NULL) {
       SharedRuntime::dtrace_object_alloc(obj(), (int)word_size);
     }
   }

@@ -362,14 +368,10 @@
   {
     Allocation allocation(*this, &obj);
     HeapWord* mem = mem_allocate(allocation);
     if (mem != NULL) {
       obj = initialize(mem);
-    } else {
-      // The unhandled oop detector will poison local variable obj,
-      // so reset it to NULL if mem is NULL.
-      obj = NULL;
     }
   }
   return obj;
 }
 

@@ -385,11 +387,11 @@
   assert(mem != NULL, "NULL object pointer");
   if (UseBiasedLocking) {
     oopDesc::set_mark_raw(mem, _klass->prototype_header());
   } else {
     // May be bootstrapping
-    oopDesc::set_mark_raw(mem, markWord::prototype());
+    oopDesc::set_mark_raw(mem, markOopDesc::prototype());
   }
   // Need a release store to ensure array/class length, mark word, and
   // object zeroing are visible before setting the klass non-NULL, for
   // concurrent collectors.
   oopDesc::release_set_klass(mem, _klass);
< prev index next >