296 return ao;
297 }
298
299 int len = obj->size();
300 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
301 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
302 p2i(obj), (size_t)obj->size());
303 return NULL;
304 }
305
306 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
307 if (archived_oop != NULL) {
308 count_allocation(len);
309 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
310 // Reinitialize markword to remove age/marking/locking/etc.
311 //
312 // We need to retain the identity_hash, because it may have been used by some hashtables
313 // in the shared heap. This also has the side effect of pre-initializing the
314 // identity_hash for all shared objects, so they are less likely to be written
315 // into during run time, increasing the potential of memory sharing.
316 int hash_original = obj->identity_hash();
317 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
318 assert(archived_oop->mark().is_unlocked(), "sanity");
319
320 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
321 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
322
323 ArchivedObjectCache* cache = archived_object_cache();
324 CachedOopInfo info = make_cached_oop_info(archived_oop);
325 cache->put(obj, info);
326 if (_original_object_table != NULL) {
327 _original_object_table->put(archived_oop, obj);
328 }
329 mark_native_pointers(obj, archived_oop);
330 if (log_is_enabled(Debug, cds, heap)) {
331 ResourceMark rm;
332 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
333 p2i(obj), p2i(archived_oop), obj->klass()->external_name());
334 }
335 } else {
336 log_error(cds, heap)(
337 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
338 p2i(obj));
339 log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
340 SIZE_FORMAT "M", MaxHeapSize/M);
341 os::_exit(-1);
561 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
562 os::vm_allocation_granularity());
563 }
564
565 // Copy _pending_archive_roots into an objArray
566 void HeapShared::copy_roots() {
567 // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the
568 // objects in this array are discovered during HeapShared::archive_objects(). For example,
569 // in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj().
570 // However, HeapShared::archive_objects() happens inside a safepoint, so we can't
571 // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
572 // Instead, we have to roll our own alloc/copy routine here.
573 int length = _pending_roots != NULL ? _pending_roots->length() : 0;
574 size_t size = objArrayOopDesc::object_size(length);
575 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
576 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
577
578 memset(mem, 0, size * BytesPerWord);
579 {
580 // This is copied from MemAllocator::finish
581 oopDesc::set_mark(mem, markWord::prototype());
582 oopDesc::release_set_klass(mem, k);
583 }
584 {
585 // This is copied from ObjArrayAllocator::initialize
586 arrayOopDesc::set_length(mem, length);
587 }
588
589 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
590 for (int i = 0; i < length; i++) {
591 roots()->obj_at_put(i, _pending_roots->at(i));
592 }
593 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
594 count_allocation(roots()->size());
595 }
596
597 //
598 // Subgraph archiving support
599 //
600 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
601 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
|
296 return ao;
297 }
298
299 int len = obj->size();
300 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
301 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
302 p2i(obj), (size_t)obj->size());
303 return NULL;
304 }
305
306 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
307 if (archived_oop != NULL) {
308 count_allocation(len);
309 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
310 // Reinitialize markword to remove age/marking/locking/etc.
311 //
312 // We need to retain the identity_hash, because it may have been used by some hashtables
313 // in the shared heap. This also has the side effect of pre-initializing the
314 // identity_hash for all shared objects, so they are less likely to be written
315 // into during run time, increasing the potential of memory sharing.
316 if (!(EnableValhalla && obj->mark().is_inline_type())) {
317 int hash_original = obj->identity_hash();
318 archived_oop->set_mark(archived_oop->klass()->prototype_header().copy_set_hash(hash_original));
319 assert(archived_oop->mark().is_unlocked(), "sanity");
320
321 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
322 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
323 }
324
325 ArchivedObjectCache* cache = archived_object_cache();
326 CachedOopInfo info = make_cached_oop_info(archived_oop);
327 cache->put(obj, info);
328 if (_original_object_table != NULL) {
329 _original_object_table->put(archived_oop, obj);
330 }
331 mark_native_pointers(obj, archived_oop);
332 if (log_is_enabled(Debug, cds, heap)) {
333 ResourceMark rm;
334 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
335 p2i(obj), p2i(archived_oop), obj->klass()->external_name());
336 }
337 } else {
338 log_error(cds, heap)(
339 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
340 p2i(obj));
341 log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
342 SIZE_FORMAT "M", MaxHeapSize/M);
343 os::_exit(-1);
563 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
564 os::vm_allocation_granularity());
565 }
566
567 // Copy _pending_archive_roots into an objArray
568 void HeapShared::copy_roots() {
569 // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the
570 // objects in this array are discovered during HeapShared::archive_objects(). For example,
571 // in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj().
572 // However, HeapShared::archive_objects() happens inside a safepoint, so we can't
573 // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
574 // Instead, we have to roll our own alloc/copy routine here.
575 int length = _pending_roots != NULL ? _pending_roots->length() : 0;
576 size_t size = objArrayOopDesc::object_size(length);
577 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
578 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
579
580 memset(mem, 0, size * BytesPerWord);
581 {
582 // This is copied from MemAllocator::finish
583 oopDesc::set_mark(mem, k->prototype_header());
584 oopDesc::release_set_klass(mem, k);
585 }
586 {
587 // This is copied from ObjArrayAllocator::initialize
588 arrayOopDesc::set_length(mem, length);
589 }
590
591 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
592 for (int i = 0; i < length; i++) {
593 roots()->obj_at_put(i, _pending_roots->at(i));
594 }
595 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
596 count_allocation(roots()->size());
597 }
598
599 //
600 // Subgraph archiving support
601 //
602 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
603 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
|