< prev index next >

src/hotspot/share/gc/shared/space.cpp

Print this page

 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "gc/shared/collectedHeap.inline.hpp"
 29 #include "gc/shared/genCollectedHeap.hpp"

 30 #include "gc/shared/space.hpp"
 31 #include "gc/shared/space.inline.hpp"
 32 #include "gc/shared/spaceDecorator.inline.hpp"
 33 #include "memory/iterator.inline.hpp"
 34 #include "memory/universe.hpp"
 35 #include "oops/oop.inline.hpp"
 36 #include "runtime/atomic.hpp"
 37 #include "runtime/java.hpp"
 38 #include "runtime/prefetch.inline.hpp"
 39 #include "runtime/safepoint.hpp"
 40 #include "utilities/align.hpp"
 41 #include "utilities/copy.hpp"
 42 #include "utilities/globalDefinitions.hpp"
 43 #include "utilities/macros.hpp"
 44 #if INCLUDE_SERIALGC
 45 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
 46 #include "gc/serial/defNewGeneration.hpp"
 47 #endif
 48 
 49 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,

235 }
236 void ContiguousSpace::mangle_unused_area_complete() {
237   mangler()->mangle_unused_area_complete();
238 }
239 #endif  // NOT_PRODUCT
240 
241 void CompactibleSpace::initialize(MemRegion mr,
242                                   bool clear_space,
243                                   bool mangle_space) {
244   Space::initialize(mr, clear_space, mangle_space);
245   set_compaction_top(bottom());
246   _next_compaction_space = nullptr;
247 }
248 
249 void CompactibleSpace::clear(bool mangle_space) {
250   Space::clear(mangle_space);
251   _compaction_top = bottom();
252 }
253 
254 HeapWord* CompactibleSpace::forward(oop q, size_t size,
255                                     CompactPoint* cp, HeapWord* compact_top) {
256   // q is alive
257   // First check if we should switch compaction space
258   assert(this == cp->space, "'this' should be current compaction space.");
259   size_t compaction_max_size = pointer_delta(end(), compact_top);
260   while (size > compaction_max_size) {
261     // switch to next compaction space
262     cp->space->set_compaction_top(compact_top);
263     cp->space = cp->space->next_compaction_space();
264     if (cp->space == nullptr) {
265       cp->gen = GenCollectedHeap::heap()->young_gen();
266       assert(cp->gen != nullptr, "compaction must succeed");
267       cp->space = cp->gen->first_compaction_space();
268       assert(cp->space != nullptr, "generation must have a first compaction space");
269     }
270     compact_top = cp->space->bottom();
271     cp->space->set_compaction_top(compact_top);
272     cp->space->initialize_threshold();
273     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
274   }
275 
276   // store the forwarding pointer into the mark word
277   if (cast_from_oop<HeapWord*>(q) != compact_top) {
278     q->forward_to(cast_to_oop(compact_top));
279     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
280   } else {
281     // if the object isn't moving we can just set the mark to the default
282     // mark and handle it specially later on.
283     q->init_mark();
284     assert(!q->is_forwarded(), "should not be forwarded");
285   }
286 
287   compact_top += size;
288 
289   // We need to update the offset table so that the beginnings of objects can be
290   // found during scavenge.  Note that we are updating the offset table based on
291   // where the object will be once the compaction phase finishes.
292   cp->space->alloc_block(compact_top - size, compact_top);
293   return compact_top;
294 }
295 
296 #if INCLUDE_SERIALGC
297 
298 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {

306   if (cp->space == nullptr) {
307     assert(cp->gen != nullptr, "need a generation");
308     assert(cp->gen->first_compaction_space() == this, "just checking");
309     cp->space = cp->gen->first_compaction_space();
310     cp->space->initialize_threshold();
311     cp->space->set_compaction_top(cp->space->bottom());
312   }
313 
314   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
315 
316   DeadSpacer dead_spacer(this);
317 
318   HeapWord*  end_of_live = bottom();  // One byte beyond the last byte of the last live object.
319   HeapWord*  first_dead = nullptr; // The first dead object.
320 
321   const intx interval = PrefetchScanIntervalInBytes;
322 
323   HeapWord* cur_obj = bottom();
324   HeapWord* scan_limit = top();
325 

326   while (cur_obj < scan_limit) {
327     if (cast_to_oop(cur_obj)->is_gc_marked()) {
328       // prefetch beyond cur_obj
329       Prefetch::write(cur_obj, interval);
330       size_t size = cast_to_oop(cur_obj)->size();
331       compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top);
332       cur_obj += size;
333       end_of_live = cur_obj;
334     } else {
335       // run over all the contiguous dead objects
336       HeapWord* end = cur_obj;
337       do {
338         // prefetch beyond end
339         Prefetch::write(end, interval);
340         end += cast_to_oop(end)->size();
341       } while (end < scan_limit && !cast_to_oop(end)->is_gc_marked());
342 
343       // see if we might want to pretend this object is alive so that
344       // we don't have to compact quite as often.
345       if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
346         oop obj = cast_to_oop(cur_obj);
347         compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
348         end_of_live = end;
349       } else {
350         // otherwise, it really is a free region.
351 
352         // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
353         *(HeapWord**)cur_obj = end;
354 
355         // see if this is the first dead region.
356         if (first_dead == nullptr) {
357           first_dead = cur_obj;
358         }
359       }
360 
361       // move on to the next object
362       cur_obj = end;
363     }
364   }
365 
366   assert(cur_obj == scan_limit, "just checking");
367   _end_of_live = end_of_live;

370   } else {
371     _first_dead = end_of_live;
372   }
373 
374   // save the compaction_top of the compaction space.
375   cp->space->set_compaction_top(compact_top);
376 }
377 
378 void CompactibleSpace::adjust_pointers() {
379   // Check first is there is any work to do.
380   if (used() == 0) {
381     return;   // Nothing to do.
382   }
383 
384   // adjust all the interior pointers to point at the new locations of objects
385   // Used by MarkSweep::mark_sweep_phase3()
386 
387   HeapWord* cur_obj = bottom();
388   HeapWord* const end_of_live = _end_of_live;  // Established by prepare_for_compaction().
389   HeapWord* const first_dead = _first_dead;    // Established by prepare_for_compaction().

390 
391   assert(first_dead <= end_of_live, "Stands to reason, no?");
392 
393   const intx interval = PrefetchScanIntervalInBytes;
394 
395   debug_only(HeapWord* prev_obj = nullptr);
396   while (cur_obj < end_of_live) {
397     Prefetch::write(cur_obj, interval);
398     if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
399       // cur_obj is alive
400       // point all the oops to the new location
401       size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj));
402       debug_only(prev_obj = cur_obj);
403       cur_obj += size;
404     } else {
405       debug_only(prev_obj = cur_obj);
406       // cur_obj is not a live object, instead it points at the next live object
407       cur_obj = *(HeapWord**)cur_obj;
408       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
409     }
410   }
411 
412   assert(cur_obj == end_of_live, "just checking");
413 }
414 
415 void CompactibleSpace::compact() {
416   // Copy all live objects to their new location
417   // Used by MarkSweep::mark_sweep_phase4()
418 
419   verify_up_to_first_dead(this);
420 
421   HeapWord* const start = bottom();
422   HeapWord* const end_of_live = _end_of_live;
423 
424   assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live));
425   if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) {
426     // Nothing to compact. The space is either empty or all live object should be left in place.
427     clear_empty_region(this);
428     return;
429   }
430 
431   const intx scan_interval = PrefetchScanIntervalInBytes;
432   const intx copy_interval = PrefetchCopyIntervalInBytes;
433 
434   assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live));
435   HeapWord* cur_obj = start;
436   if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
437     // All object before _first_dead can be skipped. They should not be moved.
438     // A pointer to the first live object is stored at the memory location for _first_dead.
439     cur_obj = *(HeapWord**)(_first_dead);
440   }
441 


442   debug_only(HeapWord* prev_obj = nullptr);
443   while (cur_obj < end_of_live) {
444     if (!cast_to_oop(cur_obj)->is_forwarded()) {
445       debug_only(prev_obj = cur_obj);
446       // The first word of the dead object contains a pointer to the next live object or end of space.
447       cur_obj = *(HeapWord**)cur_obj;
448       assert(cur_obj > prev_obj, "we should be moving forward through memory");
449     } else {
450       // prefetch beyond q
451       Prefetch::read(cur_obj, scan_interval);
452 
453       // size and destination
454       size_t size = cast_to_oop(cur_obj)->size();
455       HeapWord* compaction_top = cast_from_oop<HeapWord*>(cast_to_oop(cur_obj)->forwardee());
456 
457       // prefetch beyond compaction_top
458       Prefetch::write(compaction_top, copy_interval);
459 
460       // copy object and reinit its mark
461       assert(cur_obj != compaction_top, "everything in this pass should be moving");
462       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
463       oop new_obj = cast_to_oop(compaction_top);
464 
465       ContinuationGCSupport::transform_stack_chunk(new_obj);
466 
467       new_obj->init_mark();
468       assert(new_obj->klass() != nullptr, "should have a class");
469 
470       debug_only(prev_obj = cur_obj);
471       cur_obj += size;
472     }
473   }
474 
475   clear_empty_region(this);

 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "gc/shared/collectedHeap.inline.hpp"
 29 #include "gc/shared/genCollectedHeap.hpp"
 30 #include "gc/shared/slidingForwarding.inline.hpp"
 31 #include "gc/shared/space.hpp"
 32 #include "gc/shared/space.inline.hpp"
 33 #include "gc/shared/spaceDecorator.inline.hpp"
 34 #include "memory/iterator.inline.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "runtime/atomic.hpp"
 38 #include "runtime/java.hpp"
 39 #include "runtime/prefetch.inline.hpp"
 40 #include "runtime/safepoint.hpp"
 41 #include "utilities/align.hpp"
 42 #include "utilities/copy.hpp"
 43 #include "utilities/globalDefinitions.hpp"
 44 #include "utilities/macros.hpp"
 45 #if INCLUDE_SERIALGC
 46 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
 47 #include "gc/serial/defNewGeneration.hpp"
 48 #endif
 49 
 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,

236 }
237 void ContiguousSpace::mangle_unused_area_complete() {
238   mangler()->mangle_unused_area_complete();
239 }
240 #endif  // NOT_PRODUCT
241 
242 void CompactibleSpace::initialize(MemRegion mr,
243                                   bool clear_space,
244                                   bool mangle_space) {
245   Space::initialize(mr, clear_space, mangle_space);
246   set_compaction_top(bottom());
247   _next_compaction_space = nullptr;
248 }
249 
250 void CompactibleSpace::clear(bool mangle_space) {
251   Space::clear(mangle_space);
252   _compaction_top = bottom();
253 }
254 
255 HeapWord* CompactibleSpace::forward(oop q, size_t size,
256                                     CompactPoint* cp, HeapWord* compact_top, SlidingForwarding* const forwarding) {
257   // q is alive
258   // First check if we should switch compaction space
259   assert(this == cp->space, "'this' should be current compaction space.");
260   size_t compaction_max_size = pointer_delta(end(), compact_top);
261   while (size > compaction_max_size) {
262     // switch to next compaction space
263     cp->space->set_compaction_top(compact_top);
264     cp->space = cp->space->next_compaction_space();
265     if (cp->space == nullptr) {
266       cp->gen = GenCollectedHeap::heap()->young_gen();
267       assert(cp->gen != nullptr, "compaction must succeed");
268       cp->space = cp->gen->first_compaction_space();
269       assert(cp->space != nullptr, "generation must have a first compaction space");
270     }
271     compact_top = cp->space->bottom();
272     cp->space->set_compaction_top(compact_top);
273     cp->space->initialize_threshold();
274     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
275   }
276 
277   // store the forwarding pointer into the mark word
278   if (cast_from_oop<HeapWord*>(q) != compact_top) {
279     forwarding->forward_to(q, cast_to_oop(compact_top));
280     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
281   } else {
282     // if the object isn't moving we can just set the mark to the default
283     // mark and handle it specially later on.
284     q->init_mark();
285     assert(!q->is_forwarded(), "should not be forwarded");
286   }
287 
288   compact_top += size;
289 
290   // We need to update the offset table so that the beginnings of objects can be
291   // found during scavenge.  Note that we are updating the offset table based on
292   // where the object will be once the compaction phase finishes.
293   cp->space->alloc_block(compact_top - size, compact_top);
294   return compact_top;
295 }
296 
297 #if INCLUDE_SERIALGC
298 
299 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {

307   if (cp->space == nullptr) {
308     assert(cp->gen != nullptr, "need a generation");
309     assert(cp->gen->first_compaction_space() == this, "just checking");
310     cp->space = cp->gen->first_compaction_space();
311     cp->space->initialize_threshold();
312     cp->space->set_compaction_top(cp->space->bottom());
313   }
314 
315   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
316 
317   DeadSpacer dead_spacer(this);
318 
319   HeapWord*  end_of_live = bottom();  // One byte beyond the last byte of the last live object.
320   HeapWord*  first_dead = nullptr; // The first dead object.
321 
322   const intx interval = PrefetchScanIntervalInBytes;
323 
324   HeapWord* cur_obj = bottom();
325   HeapWord* scan_limit = top();
326 
327   SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
328   while (cur_obj < scan_limit) {
329     if (cast_to_oop(cur_obj)->is_gc_marked()) {
330       // prefetch beyond cur_obj
331       Prefetch::write(cur_obj, interval);
332       size_t size = cast_to_oop(cur_obj)->size();
333       compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top, forwarding);
334       cur_obj += size;
335       end_of_live = cur_obj;
336     } else {
337       // run over all the contiguous dead objects
338       HeapWord* end = cur_obj;
339       do {
340         // prefetch beyond end
341         Prefetch::write(end, interval);
342         end += cast_to_oop(end)->size();
343       } while (end < scan_limit && !cast_to_oop(end)->is_gc_marked());
344 
345       // see if we might want to pretend this object is alive so that
346       // we don't have to compact quite as often.
347       if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
348         oop obj = cast_to_oop(cur_obj);
349         compact_top = cp->space->forward(obj, obj->size(), cp, compact_top, forwarding);
350         end_of_live = end;
351       } else {
352         // otherwise, it really is a free region.
353 
354         // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
355         *(HeapWord**)cur_obj = end;
356 
357         // see if this is the first dead region.
358         if (first_dead == nullptr) {
359           first_dead = cur_obj;
360         }
361       }
362 
363       // move on to the next object
364       cur_obj = end;
365     }
366   }
367 
368   assert(cur_obj == scan_limit, "just checking");
369   _end_of_live = end_of_live;

372   } else {
373     _first_dead = end_of_live;
374   }
375 
376   // save the compaction_top of the compaction space.
377   cp->space->set_compaction_top(compact_top);
378 }
379 
380 void CompactibleSpace::adjust_pointers() {
381   // Check first is there is any work to do.
382   if (used() == 0) {
383     return;   // Nothing to do.
384   }
385 
386   // adjust all the interior pointers to point at the new locations of objects
387   // Used by MarkSweep::mark_sweep_phase3()
388 
389   HeapWord* cur_obj = bottom();
390   HeapWord* const end_of_live = _end_of_live;  // Established by prepare_for_compaction().
391   HeapWord* const first_dead = _first_dead;    // Established by prepare_for_compaction().
392   const SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
393 
394   assert(first_dead <= end_of_live, "Stands to reason, no?");
395 
396   const intx interval = PrefetchScanIntervalInBytes;
397 
398   debug_only(HeapWord* prev_obj = nullptr);
399   while (cur_obj < end_of_live) {
400     Prefetch::write(cur_obj, interval);
401     if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
402       // cur_obj is alive
403       // point all the oops to the new location
404       size_t size = MarkSweep::adjust_pointers(forwarding, cast_to_oop(cur_obj));
405       debug_only(prev_obj = cur_obj);
406       cur_obj += size;
407     } else {
408       debug_only(prev_obj = cur_obj);
409       // cur_obj is not a live object, instead it points at the next live object
410       cur_obj = *(HeapWord**)cur_obj;
411       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
412     }
413   }
414 
415   assert(cur_obj == end_of_live, "just checking");
416 }
417 
418 void CompactibleSpace::compact() {
419   // Copy all live objects to their new location
420   // Used by MarkSweep::mark_sweep_phase4()
421 
422   verify_up_to_first_dead(this);
423 
424   HeapWord* const start = bottom();
425   HeapWord* const end_of_live = _end_of_live;
426 
427   assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live));
428   if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) {
429     // Nothing to compact. The space is either empty or all live object should be left in place.
430     clear_empty_region(this);
431     return;
432   }
433 
434   const intx scan_interval = PrefetchScanIntervalInBytes;
435   const intx copy_interval = PrefetchCopyIntervalInBytes;
436 
437   assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live));
438   HeapWord* cur_obj = start;
439   if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
440     // All object before _first_dead can be skipped. They should not be moved.
441     // A pointer to the first live object is stored at the memory location for _first_dead.
442     cur_obj = *(HeapWord**)(_first_dead);
443   }
444 
445   const SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
446 
447   debug_only(HeapWord* prev_obj = nullptr);
448   while (cur_obj < end_of_live) {
449     if (!cast_to_oop(cur_obj)->is_forwarded()) {
450       debug_only(prev_obj = cur_obj);
451       // The first word of the dead object contains a pointer to the next live object or end of space.
452       cur_obj = *(HeapWord**)cur_obj;
453       assert(cur_obj > prev_obj, "we should be moving forward through memory");
454     } else {
455       // prefetch beyond q
456       Prefetch::read(cur_obj, scan_interval);
457 
458       // size and destination
459       size_t size = cast_to_oop(cur_obj)->size();
460       HeapWord* compaction_top = cast_from_oop<HeapWord*>(forwarding->forwardee(cast_to_oop(cur_obj)));
461 
462       // prefetch beyond compaction_top
463       Prefetch::write(compaction_top, copy_interval);
464 
465       // copy object and reinit its mark
466       assert(cur_obj != compaction_top, "everything in this pass should be moving");
467       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
468       oop new_obj = cast_to_oop(compaction_top);
469 
470       ContinuationGCSupport::transform_stack_chunk(new_obj);
471 
472       new_obj->init_mark();
473       assert(new_obj->klass() != nullptr, "should have a class");
474 
475       debug_only(prev_obj = cur_obj);
476       cur_obj += size;
477     }
478   }
479 
480   clear_empty_region(this);
< prev index next >