< prev index next >

src/hotspot/share/gc/shared/memAllocator.cpp

Print this page




 126     report_java_out_of_memory(message);
 127 
 128     if (JvmtiExport::should_post_resource_exhausted()) {
 129       JvmtiExport::post_resource_exhausted(
 130         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
 131         message);
 132     }
 133     oop exception = _overhead_limit_exceeded ?
 134         Universe::out_of_memory_error_gc_overhead_limit() :
 135         Universe::out_of_memory_error_java_heap();
 136     THROW_OOP_(exception, true);
 137   } else {
 138     THROW_OOP_(Universe::out_of_memory_error_retry(), true);
 139   }
 140 }
 141 
 142 void MemAllocator::Allocation::verify_before() {
 143   // Clear unhandled oops for memory allocation.  Memory allocation might
 144   // not take out a lock if from tlab, so clear here.
 145   Thread* THREAD = _thread;

 146   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
 147   debug_only(check_for_valid_allocation_state());
 148   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 149 }
 150 
 151 void MemAllocator::Allocation::verify_after() {
 152   NOT_PRODUCT(check_for_bad_heap_word_value();)
 153 }
 154 
 155 void MemAllocator::Allocation::check_for_bad_heap_word_value() const {
 156   MemRegion obj_range = _allocator.obj_memory_range(obj());
 157   HeapWord* addr = obj_range.start();
 158   size_t size = obj_range.word_size();
 159   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 160     for (size_t slot = 0; slot < size; slot += 1) {
 161       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
 162              "Found badHeapWordValue in post-allocation check");
 163     }
 164   }
 165 }
 166 
 167 #ifdef ASSERT
 168 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
 169   // How to choose between a pending exception and a potential
 170   // OutOfMemoryError?  Don't allow pending exceptions.
 171   // This is a VM policy failure, so how do we exhaustively test it?
 172   assert(!_thread->has_pending_exception(),
 173          "shouldn't be allocating with pending exception");
 174   // Allocation of an oop can always invoke a safepoint,
 175   // hence, the true argument.
 176   _thread->check_for_valid_safepoint_state(true);





 177 }
 178 #endif
 179 
 180 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
 181   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
 182   JvmtiExport::vm_object_alloc_event_collector(obj());
 183 
 184   if (!JvmtiExport::should_post_sampled_object_alloc()) {
 185     // Sampling disabled
 186     return;
 187   }
 188 
 189   if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
 190     // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
 191     // or expands it due to taking a sampler induced slow path.
 192     return;
 193   }
 194 
 195   // If we want to be sampling, protect the allocated object with a Handle
 196   // before doing the callback. The callback is done in the destructor of


 209 
 210     _thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
 211   }
 212 
 213   if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
 214     // Tell tlab to forget bytes_since_last if we passed it to the heap sampler.
 215     _thread->tlab().set_sample_end(bytes_since_last != 0);
 216   }
 217 }
 218 
 219 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
 220   // support low memory notifications (no-op if not enabled)
 221   LowMemoryDetector::detect_low_memory_for_collected_pools();
 222 }
 223 
 224 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
 225   HeapWord* mem = (HeapWord*)obj();
 226   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
 227 
 228   if (_allocated_outside_tlab) {
 229     AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
 230   } else if (_allocated_tlab_size != 0) {
 231     // TLAB was refilled
 232     AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
 233                                              size_in_bytes, _thread);
 234   }
 235 }
 236 
 237 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
 238   if (DTraceAllocProbes) {
 239     // support for Dtrace object alloc event (no-op most of the time)
 240     Klass* klass = obj()->klass();
 241     size_t word_size = _allocator._word_size;
 242     if (klass != NULL && klass->name() != NULL) {
 243       SharedRuntime::dtrace_object_alloc(obj(), (int)word_size);
 244     }
 245   }
 246 }
 247 
 248 void MemAllocator::Allocation::notify_allocation() {
 249   notify_allocation_low_memory_detector();
 250   notify_allocation_jfr_sampler();
 251   notify_allocation_dtrace_sampler();
 252   notify_allocation_jvmti_sampler();
 253 }
 254 
 255 HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
 256   allocation._allocated_outside_tlab = true;
 257   HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
 258   if (mem == NULL) {
 259     return mem;
 260   }


 347 }
 348 
 349 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
 350   if (UseTLAB) {
 351     HeapWord* result = allocate_inside_tlab(allocation);
 352     if (result != NULL) {
 353       return result;
 354     }
 355   }
 356 
 357   return allocate_outside_tlab(allocation);
 358 }
 359 
 360 oop MemAllocator::allocate() const {
 361   oop obj = NULL;
 362   {
 363     Allocation allocation(*this, &obj);
 364     HeapWord* mem = mem_allocate(allocation);
 365     if (mem != NULL) {
 366       obj = initialize(mem);
 367     } else {
 368       // The unhandled oop detector will poison local variable obj,
 369       // so reset it to NULL if mem is NULL.
 370       obj = NULL;
 371     }
 372   }
 373   return obj;
 374 }
 375 
 376 void MemAllocator::mem_clear(HeapWord* mem) const {
 377   assert(mem != NULL, "cannot initialize NULL object");
 378   const size_t hs = oopDesc::header_size();
 379   assert(_word_size >= hs, "unexpected object size");
 380   oopDesc::set_klass_gap(mem, 0);
 381   Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
 382 }
 383 
 384 oop MemAllocator::finish(HeapWord* mem) const {
 385   assert(mem != NULL, "NULL object pointer");
 386   if (UseBiasedLocking) {
 387     oopDesc::set_mark_raw(mem, _klass->prototype_header());
 388   } else {
 389     // May be bootstrapping
 390     oopDesc::set_mark_raw(mem, markWord::prototype());
 391   }
 392   // Need a release store to ensure array/class length, mark word, and
 393   // object zeroing are visible before setting the klass non-NULL, for
 394   // concurrent collectors.
 395   oopDesc::release_set_klass(mem, _klass);
 396   return oop(mem);
 397 }
 398 
 399 oop ObjAllocator::initialize(HeapWord* mem) const {
 400   mem_clear(mem);
 401   return finish(mem);
 402 }
 403 
 404 MemRegion ObjArrayAllocator::obj_memory_range(oop obj) const {
 405   if (_do_zero) {
 406     return MemAllocator::obj_memory_range(obj);
 407   }
 408   ArrayKlass* array_klass = ArrayKlass::cast(_klass);
 409   const size_t hs = arrayOopDesc::header_size(array_klass->element_type());
 410   return MemRegion(((HeapWord*)obj) + hs, _word_size - hs);




 126     report_java_out_of_memory(message);
 127 
 128     if (JvmtiExport::should_post_resource_exhausted()) {
 129       JvmtiExport::post_resource_exhausted(
 130         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
 131         message);
 132     }
 133     oop exception = _overhead_limit_exceeded ?
 134         Universe::out_of_memory_error_gc_overhead_limit() :
 135         Universe::out_of_memory_error_java_heap();
 136     THROW_OOP_(exception, true);
 137   } else {
 138     THROW_OOP_(Universe::out_of_memory_error_retry(), true);
 139   }
 140 }
 141 
 142 void MemAllocator::Allocation::verify_before() {
 143   // Clear unhandled oops for memory allocation.  Memory allocation might
 144   // not take out a lock if from tlab, so clear here.
 145   Thread* THREAD = _thread;
 146   CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
 147   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
 148   debug_only(check_for_valid_allocation_state());
 149   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 150 }
 151 
 152 void MemAllocator::Allocation::verify_after() {
 153   NOT_PRODUCT(check_for_bad_heap_word_value();)
 154 }
 155 
 156 void MemAllocator::Allocation::check_for_bad_heap_word_value() const {
 157   MemRegion obj_range = _allocator.obj_memory_range(obj());
 158   HeapWord* addr = obj_range.start();
 159   size_t size = obj_range.word_size();
 160   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 161     for (size_t slot = 0; slot < size; slot += 1) {
 162       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
 163              "Found badHeapWordValue in post-allocation check");
 164     }
 165   }
 166 }
 167 
 168 #ifdef ASSERT
 169 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
 170   // How to choose between a pending exception and a potential
 171   // OutOfMemoryError?  Don't allow pending exceptions.
 172   // This is a VM policy failure, so how do we exhaustively test it?
 173   assert(!_thread->has_pending_exception(),
 174          "shouldn't be allocating with pending exception");
 175   if (StrictSafepointChecks) {
 176     assert(_thread->allow_allocation(),
 177            "Allocation done by thread for which allocation is blocked "
 178            "by No_Allocation_Verifier!");
 179     // Allocation of an oop can always invoke a safepoint,
 180     // hence, the true argument
 181     _thread->check_for_valid_safepoint_state(true);
 182   }
 183 }
 184 #endif
 185 
 186 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
 187   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
 188   JvmtiExport::vm_object_alloc_event_collector(obj());
 189 
 190   if (!JvmtiExport::should_post_sampled_object_alloc()) {
 191     // Sampling disabled
 192     return;
 193   }
 194 
 195   if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
 196     // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
 197     // or expands it due to taking a sampler induced slow path.
 198     return;
 199   }
 200 
 201   // If we want to be sampling, protect the allocated object with a Handle
 202   // before doing the callback. The callback is done in the destructor of


 215 
 216     _thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
 217   }
 218 
 219   if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
 220     // Tell tlab to forget bytes_since_last if we passed it to the heap sampler.
 221     _thread->tlab().set_sample_end(bytes_since_last != 0);
 222   }
 223 }
 224 
 225 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
 226   // support low memory notifications (no-op if not enabled)
 227   LowMemoryDetector::detect_low_memory_for_collected_pools();
 228 }
 229 
 230 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
 231   HeapWord* mem = (HeapWord*)obj();
 232   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
 233 
 234   if (_allocated_outside_tlab) {
 235     AllocTracer::send_allocation_outside_tlab(_allocator._klass, mem, size_in_bytes, _thread);
 236   } else if (_allocated_tlab_size != 0) {
 237     // TLAB was refilled
 238     AllocTracer::send_allocation_in_new_tlab(_allocator._klass, mem, _allocated_tlab_size * HeapWordSize,
 239                                              size_in_bytes, _thread);
 240   }
 241 }
 242 
 243 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
 244   if (DTraceAllocProbes) {
 245     // support for Dtrace object alloc event (no-op most of the time)
 246     Klass* klass = _allocator._klass;
 247     size_t word_size = _allocator._word_size;
 248     if (klass != NULL && klass->name() != NULL) {
 249       SharedRuntime::dtrace_object_alloc(obj(), (int)word_size);
 250     }
 251   }
 252 }
 253 
 254 void MemAllocator::Allocation::notify_allocation() {
 255   notify_allocation_low_memory_detector();
 256   notify_allocation_jfr_sampler();
 257   notify_allocation_dtrace_sampler();
 258   notify_allocation_jvmti_sampler();
 259 }
 260 
 261 HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
 262   allocation._allocated_outside_tlab = true;
 263   HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
 264   if (mem == NULL) {
 265     return mem;
 266   }


 353 }
 354 
 355 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
 356   if (UseTLAB) {
 357     HeapWord* result = allocate_inside_tlab(allocation);
 358     if (result != NULL) {
 359       return result;
 360     }
 361   }
 362 
 363   return allocate_outside_tlab(allocation);
 364 }
 365 
 366 oop MemAllocator::allocate() const {
 367   oop obj = NULL;
 368   {
 369     Allocation allocation(*this, &obj);
 370     HeapWord* mem = mem_allocate(allocation);
 371     if (mem != NULL) {
 372       obj = initialize(mem);




 373     }
 374   }
 375   return obj;
 376 }
 377 
 378 void MemAllocator::mem_clear(HeapWord* mem) const {
 379   assert(mem != NULL, "cannot initialize NULL object");
 380   const size_t hs = oopDesc::header_size();
 381   assert(_word_size >= hs, "unexpected object size");
 382   oopDesc::set_klass_gap(mem, 0);
 383   Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
 384 }
 385 
 386 oop MemAllocator::finish(HeapWord* mem) const {
 387   assert(mem != NULL, "NULL object pointer");
 388   if (UseBiasedLocking) {
 389     oopDesc::set_mark_raw(mem, _klass->prototype_header());
 390   } else {
 391     // May be bootstrapping
 392     oopDesc::set_mark_raw(mem, markOopDesc::prototype());
 393   }
 394   // Need a release store to ensure array/class length, mark word, and
 395   // object zeroing are visible before setting the klass non-NULL, for
 396   // concurrent collectors.
 397   oopDesc::release_set_klass(mem, _klass);
 398   return oop(mem);
 399 }
 400 
 401 oop ObjAllocator::initialize(HeapWord* mem) const {
 402   mem_clear(mem);
 403   return finish(mem);
 404 }
 405 
 406 MemRegion ObjArrayAllocator::obj_memory_range(oop obj) const {
 407   if (_do_zero) {
 408     return MemAllocator::obj_memory_range(obj);
 409   }
 410   ArrayKlass* array_klass = ArrayKlass::cast(_klass);
 411   const size_t hs = arrayOopDesc::header_size(array_klass->element_type());
 412   return MemRegion(((HeapWord*)obj) + hs, _word_size - hs);


< prev index next >