< prev index next >

src/hotspot/share/gc/shared/space.cpp

Print this page

 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "gc/shared/blockOffsetTable.inline.hpp"
 29 #include "gc/shared/collectedHeap.inline.hpp"
 30 #include "gc/shared/genCollectedHeap.hpp"
 31 #include "gc/shared/genOopClosures.inline.hpp"

 32 #include "gc/shared/space.hpp"
 33 #include "gc/shared/space.inline.hpp"
 34 #include "gc/shared/spaceDecorator.inline.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/universe.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "runtime/atomic.hpp"
 39 #include "runtime/java.hpp"
 40 #include "runtime/prefetch.inline.hpp"
 41 #include "runtime/safepoint.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/copy.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 #include "utilities/macros.hpp"
 46 #if INCLUDE_SERIALGC
 47 #include "gc/serial/defNewGeneration.hpp"
 48 #endif
 49 
 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
 51                                                 HeapWord* top_obj) {

330 }
331 void ContiguousSpace::mangle_unused_area_complete() {
332   mangler()->mangle_unused_area_complete();
333 }
334 #endif  // NOT_PRODUCT
335 
336 void CompactibleSpace::initialize(MemRegion mr,
337                                   bool clear_space,
338                                   bool mangle_space) {
339   Space::initialize(mr, clear_space, mangle_space);
340   set_compaction_top(bottom());
341   _next_compaction_space = NULL;
342 }
343 
344 void CompactibleSpace::clear(bool mangle_space) {
345   Space::clear(mangle_space);
346   _compaction_top = bottom();
347 }
348 
349 HeapWord* CompactibleSpace::forward(oop q, size_t size,
350                                     CompactPoint* cp, HeapWord* compact_top) {
351   // q is alive
352   // First check if we should switch compaction space
353   assert(this == cp->space, "'this' should be current compaction space.");
354   size_t compaction_max_size = pointer_delta(end(), compact_top);
355   while (size > compaction_max_size) {
356     // switch to next compaction space
357     cp->space->set_compaction_top(compact_top);
358     cp->space = cp->space->next_compaction_space();
359     if (cp->space == NULL) {
360       cp->gen = GenCollectedHeap::heap()->young_gen();
361       assert(cp->gen != NULL, "compaction must succeed");
362       cp->space = cp->gen->first_compaction_space();
363       assert(cp->space != NULL, "generation must have a first compaction space");
364     }
365     compact_top = cp->space->bottom();
366     cp->space->set_compaction_top(compact_top);
367     cp->space->initialize_threshold();
368     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
369   }
370 
371   // store the forwarding pointer into the mark word
372   if (cast_from_oop<HeapWord*>(q) != compact_top) {
373     q->forward_to(cast_to_oop(compact_top));
374     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
375   } else {
376     // if the object isn't moving we can just set the mark to the default
377     // mark and handle it specially later on.
378     q->init_mark();
379     assert(!q->is_forwarded(), "should not be forwarded");
380   }
381 
382   compact_top += size;
383 
384   // We need to update the offset table so that the beginnings of objects can be
385   // found during scavenge.  Note that we are updating the offset table based on
386   // where the object will be once the compaction phase finishes.
387   cp->space->alloc_block(compact_top - size, compact_top);
388   return compact_top;
389 }
390 
391 #if INCLUDE_SERIALGC
392 
393 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {

401   if (cp->space == NULL) {
402     assert(cp->gen != NULL, "need a generation");
403     assert(cp->gen->first_compaction_space() == this, "just checking");
404     cp->space = cp->gen->first_compaction_space();
405     cp->space->initialize_threshold();
406     cp->space->set_compaction_top(cp->space->bottom());
407   }
408 
409   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
410 
411   DeadSpacer dead_spacer(this);
412 
413   HeapWord*  end_of_live = bottom();  // One byte beyond the last byte of the last live object.
414   HeapWord*  first_dead = NULL; // The first dead object.
415 
416   const intx interval = PrefetchScanIntervalInBytes;
417 
418   HeapWord* cur_obj = bottom();
419   HeapWord* scan_limit = top();
420 

421   while (cur_obj < scan_limit) {
422     if (cast_to_oop(cur_obj)->is_gc_marked()) {
423       // prefetch beyond cur_obj
424       Prefetch::write(cur_obj, interval);
425       size_t size = cast_to_oop(cur_obj)->size();
426       compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top);
427       cur_obj += size;
428       end_of_live = cur_obj;
429     } else {
430       // run over all the contiguous dead objects
431       HeapWord* end = cur_obj;
432       do {
433         // prefetch beyond end
434         Prefetch::write(end, interval);
435         end += cast_to_oop(end)->size();
436       } while (end < scan_limit && !cast_to_oop(end)->is_gc_marked());
437 
438       // see if we might want to pretend this object is alive so that
439       // we don't have to compact quite as often.
440       if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
441         oop obj = cast_to_oop(cur_obj);
442         compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
443         end_of_live = end;
444       } else {
445         // otherwise, it really is a free region.
446 
447         // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
448         *(HeapWord**)cur_obj = end;
449 
450         // see if this is the first dead region.
451         if (first_dead == NULL) {
452           first_dead = cur_obj;
453         }
454       }
455 
456       // move on to the next object
457       cur_obj = end;
458     }
459   }
460 
461   assert(cur_obj == scan_limit, "just checking");
462   _end_of_live = end_of_live;

465   } else {
466     _first_dead = end_of_live;
467   }
468 
469   // save the compaction_top of the compaction space.
470   cp->space->set_compaction_top(compact_top);
471 }
472 
473 void CompactibleSpace::adjust_pointers() {
474   // Check first is there is any work to do.
475   if (used() == 0) {
476     return;   // Nothing to do.
477   }
478 
479   // adjust all the interior pointers to point at the new locations of objects
480   // Used by MarkSweep::mark_sweep_phase3()
481 
482   HeapWord* cur_obj = bottom();
483   HeapWord* const end_of_live = _end_of_live;  // Established by prepare_for_compaction().
484   HeapWord* const first_dead = _first_dead;    // Established by prepare_for_compaction().

485 
486   assert(first_dead <= end_of_live, "Stands to reason, no?");
487 
488   const intx interval = PrefetchScanIntervalInBytes;
489 
490   debug_only(HeapWord* prev_obj = NULL);
491   while (cur_obj < end_of_live) {
492     Prefetch::write(cur_obj, interval);
493     if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
494       // cur_obj is alive
495       // point all the oops to the new location
496       size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj));
497       debug_only(prev_obj = cur_obj);
498       cur_obj += size;
499     } else {
500       debug_only(prev_obj = cur_obj);
501       // cur_obj is not a live object, instead it points at the next live object
502       cur_obj = *(HeapWord**)cur_obj;
503       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
504     }
505   }
506 
507   assert(cur_obj == end_of_live, "just checking");
508 }
509 
510 void CompactibleSpace::compact() {
511   // Copy all live objects to their new location
512   // Used by MarkSweep::mark_sweep_phase4()
513 
514   verify_up_to_first_dead(this);
515 
516   HeapWord* const start = bottom();
517   HeapWord* const end_of_live = _end_of_live;
518 
519   assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live));
520   if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) {
521     // Nothing to compact. The space is either empty or all live object should be left in place.
522     clear_empty_region(this);
523     return;
524   }
525 
526   const intx scan_interval = PrefetchScanIntervalInBytes;
527   const intx copy_interval = PrefetchCopyIntervalInBytes;
528 
529   assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live));
530   HeapWord* cur_obj = start;
531   if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
532     // All object before _first_dead can be skipped. They should not be moved.
533     // A pointer to the first live object is stored at the memory location for _first_dead.
534     cur_obj = *(HeapWord**)(_first_dead);
535   }
536 


537   debug_only(HeapWord* prev_obj = NULL);
538   while (cur_obj < end_of_live) {
539     if (!cast_to_oop(cur_obj)->is_forwarded()) {
540       debug_only(prev_obj = cur_obj);
541       // The first word of the dead object contains a pointer to the next live object or end of space.
542       cur_obj = *(HeapWord**)cur_obj;
543       assert(cur_obj > prev_obj, "we should be moving forward through memory");
544     } else {
545       // prefetch beyond q
546       Prefetch::read(cur_obj, scan_interval);
547 
548       // size and destination
549       size_t size = cast_to_oop(cur_obj)->size();
550       HeapWord* compaction_top = cast_from_oop<HeapWord*>(cast_to_oop(cur_obj)->forwardee());
551 
552       // prefetch beyond compaction_top
553       Prefetch::write(compaction_top, copy_interval);
554 
555       // copy object and reinit its mark
556       assert(cur_obj != compaction_top, "everything in this pass should be moving");
557       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
558       cast_to_oop(compaction_top)->init_mark();
559       assert(cast_to_oop(compaction_top)->klass() != NULL, "should have a class");
560 
561       debug_only(prev_obj = cur_obj);
562       cur_obj += size;
563     }
564   }
565 
566   clear_empty_region(this);
567 }
568 
569 #endif // INCLUDE_SERIALGC
570 

732 
733 void ContiguousSpace::allocate_temporary_filler(int factor) {
734   // allocate temporary type array decreasing free size with factor 'factor'
735   assert(factor >= 0, "just checking");
736   size_t size = pointer_delta(end(), top());
737 
738   // if space is full, return
739   if (size == 0) return;
740 
741   if (factor > 0) {
742     size -= size/factor;
743   }
744   size = align_object_size(size);
745 
746   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
747   if (size >= align_object_size(array_header_size)) {
748     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
749     // allocate uninitialized int array
750     typeArrayOop t = (typeArrayOop) cast_to_oop(allocate(size));
751     assert(t != NULL, "allocation should succeed");
752     t->set_mark(markWord::prototype());
753     t->set_klass(Universe::intArrayKlassObj());
754     t->set_length((int)length);
755   } else {
756     assert(size == CollectedHeap::min_fill_size(),
757            "size for smallest fake object doesn't match");
758     instanceOop obj = (instanceOop) cast_to_oop(allocate(size));
759     obj->set_mark(markWord::prototype());
760     obj->set_klass_gap(0);
761     obj->set_klass(vmClasses::Object_klass());
762   }
763 }
764 
765 void OffsetTableContigSpace::initialize_threshold() {
766   _offsets.initialize_threshold();
767 }
768 
769 void OffsetTableContigSpace::alloc_block(HeapWord* start, HeapWord* end) {
770   _offsets.alloc_block(start, end);
771 }
772 
773 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
774                                                MemRegion mr) :
775   _offsets(sharedOffsetArray, mr),
776   _par_alloc_lock(Mutex::safepoint, "OffsetTableContigSpaceParAlloc_lock", true)
777 {
778   _offsets.set_contig_space(this);
779   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);

 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "gc/shared/blockOffsetTable.inline.hpp"
 29 #include "gc/shared/collectedHeap.inline.hpp"
 30 #include "gc/shared/genCollectedHeap.hpp"
 31 #include "gc/shared/genOopClosures.inline.hpp"
 32 #include "gc/shared/slidingForwarding.inline.hpp"
 33 #include "gc/shared/space.hpp"
 34 #include "gc/shared/space.inline.hpp"
 35 #include "gc/shared/spaceDecorator.inline.hpp"
 36 #include "memory/iterator.inline.hpp"
 37 #include "memory/universe.hpp"
 38 #include "oops/oop.inline.hpp"
 39 #include "runtime/atomic.hpp"
 40 #include "runtime/java.hpp"
 41 #include "runtime/prefetch.inline.hpp"
 42 #include "runtime/safepoint.hpp"
 43 #include "utilities/align.hpp"
 44 #include "utilities/copy.hpp"
 45 #include "utilities/globalDefinitions.hpp"
 46 #include "utilities/macros.hpp"
 47 #if INCLUDE_SERIALGC
 48 #include "gc/serial/defNewGeneration.hpp"
 49 #endif
 50 
 51 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
 52                                                 HeapWord* top_obj) {

331 }
332 void ContiguousSpace::mangle_unused_area_complete() {
333   mangler()->mangle_unused_area_complete();
334 }
335 #endif  // NOT_PRODUCT
336 
337 void CompactibleSpace::initialize(MemRegion mr,
338                                   bool clear_space,
339                                   bool mangle_space) {
340   Space::initialize(mr, clear_space, mangle_space);
341   set_compaction_top(bottom());
342   _next_compaction_space = NULL;
343 }
344 
345 void CompactibleSpace::clear(bool mangle_space) {
346   Space::clear(mangle_space);
347   _compaction_top = bottom();
348 }
349 
350 HeapWord* CompactibleSpace::forward(oop q, size_t size,
351                                     CompactPoint* cp, HeapWord* compact_top, SlidingForwarding* const forwarding) {
352   // q is alive
353   // First check if we should switch compaction space
354   assert(this == cp->space, "'this' should be current compaction space.");
355   size_t compaction_max_size = pointer_delta(end(), compact_top);
356   while (size > compaction_max_size) {
357     // switch to next compaction space
358     cp->space->set_compaction_top(compact_top);
359     cp->space = cp->space->next_compaction_space();
360     if (cp->space == NULL) {
361       cp->gen = GenCollectedHeap::heap()->young_gen();
362       assert(cp->gen != NULL, "compaction must succeed");
363       cp->space = cp->gen->first_compaction_space();
364       assert(cp->space != NULL, "generation must have a first compaction space");
365     }
366     compact_top = cp->space->bottom();
367     cp->space->set_compaction_top(compact_top);
368     cp->space->initialize_threshold();
369     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
370   }
371 
372   // store the forwarding pointer into the mark word
373   if (cast_from_oop<HeapWord*>(q) != compact_top) {
374     forwarding->forward_to(q, cast_to_oop(compact_top));
375     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
376   } else {
377     // if the object isn't moving we can just set the mark to the default
378     // mark and handle it specially later on.
379     q->init_mark();
380     assert(!q->is_forwarded(), "should not be forwarded");
381   }
382 
383   compact_top += size;
384 
385   // We need to update the offset table so that the beginnings of objects can be
386   // found during scavenge.  Note that we are updating the offset table based on
387   // where the object will be once the compaction phase finishes.
388   cp->space->alloc_block(compact_top - size, compact_top);
389   return compact_top;
390 }
391 
392 #if INCLUDE_SERIALGC
393 
394 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {

402   if (cp->space == NULL) {
403     assert(cp->gen != NULL, "need a generation");
404     assert(cp->gen->first_compaction_space() == this, "just checking");
405     cp->space = cp->gen->first_compaction_space();
406     cp->space->initialize_threshold();
407     cp->space->set_compaction_top(cp->space->bottom());
408   }
409 
410   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
411 
412   DeadSpacer dead_spacer(this);
413 
414   HeapWord*  end_of_live = bottom();  // One byte beyond the last byte of the last live object.
415   HeapWord*  first_dead = NULL; // The first dead object.
416 
417   const intx interval = PrefetchScanIntervalInBytes;
418 
419   HeapWord* cur_obj = bottom();
420   HeapWord* scan_limit = top();
421 
422   SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
423   while (cur_obj < scan_limit) {
424     if (cast_to_oop(cur_obj)->is_gc_marked()) {
425       // prefetch beyond cur_obj
426       Prefetch::write(cur_obj, interval);
427       size_t size = cast_to_oop(cur_obj)->size();
428       compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top, forwarding);
429       cur_obj += size;
430       end_of_live = cur_obj;
431     } else {
432       // run over all the contiguous dead objects
433       HeapWord* end = cur_obj;
434       do {
435         // prefetch beyond end
436         Prefetch::write(end, interval);
437         end += cast_to_oop(end)->size();
438       } while (end < scan_limit && !cast_to_oop(end)->is_gc_marked());
439 
440       // see if we might want to pretend this object is alive so that
441       // we don't have to compact quite as often.
442       if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
443         oop obj = cast_to_oop(cur_obj);
444         compact_top = cp->space->forward(obj, obj->size(), cp, compact_top, forwarding);
445         end_of_live = end;
446       } else {
447         // otherwise, it really is a free region.
448 
449         // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
450         *(HeapWord**)cur_obj = end;
451 
452         // see if this is the first dead region.
453         if (first_dead == NULL) {
454           first_dead = cur_obj;
455         }
456       }
457 
458       // move on to the next object
459       cur_obj = end;
460     }
461   }
462 
463   assert(cur_obj == scan_limit, "just checking");
464   _end_of_live = end_of_live;

467   } else {
468     _first_dead = end_of_live;
469   }
470 
471   // save the compaction_top of the compaction space.
472   cp->space->set_compaction_top(compact_top);
473 }
474 
475 void CompactibleSpace::adjust_pointers() {
476   // Check first is there is any work to do.
477   if (used() == 0) {
478     return;   // Nothing to do.
479   }
480 
481   // adjust all the interior pointers to point at the new locations of objects
482   // Used by MarkSweep::mark_sweep_phase3()
483 
484   HeapWord* cur_obj = bottom();
485   HeapWord* const end_of_live = _end_of_live;  // Established by prepare_for_compaction().
486   HeapWord* const first_dead = _first_dead;    // Established by prepare_for_compaction().
487   const SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
488 
489   assert(first_dead <= end_of_live, "Stands to reason, no?");
490 
491   const intx interval = PrefetchScanIntervalInBytes;
492 
493   debug_only(HeapWord* prev_obj = NULL);
494   while (cur_obj < end_of_live) {
495     Prefetch::write(cur_obj, interval);
496     if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
497       // cur_obj is alive
498       // point all the oops to the new location
499       size_t size = MarkSweep::adjust_pointers(forwarding, cast_to_oop(cur_obj));
500       debug_only(prev_obj = cur_obj);
501       cur_obj += size;
502     } else {
503       debug_only(prev_obj = cur_obj);
504       // cur_obj is not a live object, instead it points at the next live object
505       cur_obj = *(HeapWord**)cur_obj;
506       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
507     }
508   }
509 
510   assert(cur_obj == end_of_live, "just checking");
511 }
512 
513 void CompactibleSpace::compact() {
514   // Copy all live objects to their new location
515   // Used by MarkSweep::mark_sweep_phase4()
516 
517   verify_up_to_first_dead(this);
518 
519   HeapWord* const start = bottom();
520   HeapWord* const end_of_live = _end_of_live;
521 
522   assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live));
523   if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) {
524     // Nothing to compact. The space is either empty or all live object should be left in place.
525     clear_empty_region(this);
526     return;
527   }
528 
529   const intx scan_interval = PrefetchScanIntervalInBytes;
530   const intx copy_interval = PrefetchCopyIntervalInBytes;
531 
532   assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live));
533   HeapWord* cur_obj = start;
534   if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
535     // All object before _first_dead can be skipped. They should not be moved.
536     // A pointer to the first live object is stored at the memory location for _first_dead.
537     cur_obj = *(HeapWord**)(_first_dead);
538   }
539 
540   const SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
541 
542   debug_only(HeapWord* prev_obj = NULL);
543   while (cur_obj < end_of_live) {
544     if (!cast_to_oop(cur_obj)->is_forwarded()) {
545       debug_only(prev_obj = cur_obj);
546       // The first word of the dead object contains a pointer to the next live object or end of space.
547       cur_obj = *(HeapWord**)cur_obj;
548       assert(cur_obj > prev_obj, "we should be moving forward through memory");
549     } else {
550       // prefetch beyond q
551       Prefetch::read(cur_obj, scan_interval);
552 
553       // size and destination
554       size_t size = cast_to_oop(cur_obj)->size();
555       HeapWord* compaction_top = cast_from_oop<HeapWord*>(forwarding->forwardee(cast_to_oop(cur_obj)));
556 
557       // prefetch beyond compaction_top
558       Prefetch::write(compaction_top, copy_interval);
559 
560       // copy object and reinit its mark
561       assert(cur_obj != compaction_top, "everything in this pass should be moving");
562       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
563       cast_to_oop(compaction_top)->init_mark();
564       assert(cast_to_oop(compaction_top)->klass() != NULL, "should have a class");
565 
566       debug_only(prev_obj = cur_obj);
567       cur_obj += size;
568     }
569   }
570 
571   clear_empty_region(this);
572 }
573 
574 #endif // INCLUDE_SERIALGC
575 

737 
738 void ContiguousSpace::allocate_temporary_filler(int factor) {
739   // allocate temporary type array decreasing free size with factor 'factor'
740   assert(factor >= 0, "just checking");
741   size_t size = pointer_delta(end(), top());
742 
743   // if space is full, return
744   if (size == 0) return;
745 
746   if (factor > 0) {
747     size -= size/factor;
748   }
749   size = align_object_size(size);
750 
751   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
752   if (size >= align_object_size(array_header_size)) {
753     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
754     // allocate uninitialized int array
755     typeArrayOop t = (typeArrayOop) cast_to_oop(allocate(size));
756     assert(t != NULL, "allocation should succeed");
757     t->set_mark(Universe::intArrayKlassObj()->prototype_header());
758     t->set_klass(Universe::intArrayKlassObj());
759     t->set_length((int)length);
760   } else {
761     assert(size == CollectedHeap::min_fill_size(),
762            "size for smallest fake object doesn't match");
763     instanceOop obj = (instanceOop) cast_to_oop(allocate(size));
764     obj->set_mark(vmClasses::Object_klass()->prototype_header());
765     obj->set_klass_gap(0);
766     obj->set_klass(vmClasses::Object_klass());
767   }
768 }
769 
770 void OffsetTableContigSpace::initialize_threshold() {
771   _offsets.initialize_threshold();
772 }
773 
774 void OffsetTableContigSpace::alloc_block(HeapWord* start, HeapWord* end) {
775   _offsets.alloc_block(start, end);
776 }
777 
778 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
779                                                MemRegion mr) :
780   _offsets(sharedOffsetArray, mr),
781   _par_alloc_lock(Mutex::safepoint, "OffsetTableContigSpaceParAlloc_lock", true)
782 {
783   _offsets.set_contig_space(this);
784   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
< prev index next >