< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

  1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.

  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/continuationGCSupport.inline.hpp"
 34 #include "gc/shared/suspendibleThreadSet.hpp"
 35 #include "gc/shared/tlab_globals.hpp"
 36 #include "gc/shenandoah/shenandoahAsserts.hpp"
 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 43 #include "gc/shenandoah/shenandoahControlThread.hpp"
 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"

 46 #include "oops/compressedOops.inline.hpp"
 47 #include "oops/oop.inline.hpp"
 48 #include "runtime/atomic.hpp"
 49 #include "runtime/javaThread.hpp"
 50 #include "runtime/prefetch.inline.hpp"

 51 #include "utilities/copy.hpp"
 52 #include "utilities/globalDefinitions.hpp"
 53 
 54 inline ShenandoahHeap* ShenandoahHeap::heap() {
 55   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 56 }
 57 
 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 59   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 60   // get_region() provides the bounds-check and returns null on OOB.
 61   return _heap->get_region(new_index - 1);
 62 }
 63 
 64 inline bool ShenandoahHeap::has_forwarded_objects() const {
 65   return _gc_state.is_set(HAS_FORWARDED);
 66 }
 67 
 68 inline WorkerThreads* ShenandoahHeap::workers() const {
 69   return _workers;
 70 }

247 }
248 
249 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
250   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
251   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
252 }
253 
254 inline bool ShenandoahHeap::cancelled_gc() const {
255   return _cancelled_gc.get() == CANCELLED;
256 }
257 
258 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
259   if (sts_active && !cancelled_gc()) {
260     if (SuspendibleThreadSet::should_yield()) {
261       SuspendibleThreadSet::yield();
262     }
263   }
264   return cancelled_gc();
265 }
266 
267 inline void ShenandoahHeap::clear_cancelled_gc() {
268   _cancelled_gc.set(CANCELLABLE);
269   _oom_evac_handler.clear();








270 }
271 
272 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
273   assert(UseTLAB, "TLABs should be enabled");
274 
275   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
276   if (gclab == nullptr) {
277     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
278            "Performance: thread should have GCLAB: %s", thread->name());
279     // No GCLABs in this thread, fallback to shared allocation
280     return nullptr;
281   }
282   HeapWord* obj = gclab->allocate(size);
283   if (obj != nullptr) {
284     return obj;
285   }
286   // Otherwise...
287   return allocate_from_gclab_slow(thread, size);
288 }
289 








































































































































290 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
291   oop obj = cast_to_oop(entry);
292   return !_marking_context->is_marked_strong(obj);
293 }
294 
295 inline bool ShenandoahHeap::in_collection_set(oop p) const {
296   assert(collection_set() != nullptr, "Sanity");
297   return collection_set()->is_in(p);
298 }
299 
300 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
301   assert(collection_set() != nullptr, "Sanity");
302   return collection_set()->is_in_loc(p);
303 }
304 
305 inline bool ShenandoahHeap::is_stable() const {
306   return _gc_state.is_clear();
307 }
308 
309 inline bool ShenandoahHeap::is_idle() const {
310   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
311 }
312 
313 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
314   return _gc_state.is_set(MARKING);
315 }
316 








317 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
318   return _gc_state.is_set(EVACUATION);
319 }
320 
321 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
322   return _degenerated_gc_in_progress.is_set();
323 }
324 
325 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
326   return _full_gc_in_progress.is_set();
327 }
328 
329 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
330   return _full_gc_move_in_progress.is_set();
331 }
332 
333 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
334   return _gc_state.is_set(UPDATEREFS);
335 }
336 

338   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
339 }
340 
341 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
342   return _concurrent_strong_root_in_progress.is_set();
343 }
344 
345 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
346   return _gc_state.is_set(WEAK_ROOTS);
347 }
348 
349 template<class T>
350 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
351   marked_object_iterate(region, cl, region->top());
352 }
353 
354 template<class T>
355 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
356   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
357 
358   ShenandoahMarkingContext* const ctx = complete_marking_context();
359   assert(ctx->is_complete(), "sanity");
360 
361   HeapWord* tams = ctx->top_at_mark_start(region);
362 
363   size_t skip_bitmap_delta = 1;
364   HeapWord* start = region->bottom();
365   HeapWord* end = MIN2(tams, region->end());
366 
367   // Step 1. Scan below the TAMS based on bitmap data.
368   HeapWord* limit_bitmap = MIN2(limit, tams);
369 
370   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
371   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
372   HeapWord* cb = ctx->get_next_marked_addr(start, end);
373 
374   intx dist = ShenandoahMarkScanPrefetch;
375   if (dist > 0) {
376     // Batched scan that prefetches the oop data, anticipating the access to
377     // either header, oop field, or forwarding pointer. Not that we cannot
378     // touch anything in oop, while it still being prefetched to get enough
379     // time for prefetch to work. This is why we try to scan the bitmap linearly,

470     HeapWord* bottom = region->bottom();
471     if (top > bottom) {
472       region = region->humongous_start_region();
473       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
474       marked_object_iterate(region, &objs);
475     }
476   } else {
477     ShenandoahObjectToOopClosure<T> objs(cl);
478     marked_object_iterate(region, &objs, top);
479   }
480 }
481 
482 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const {
483   if (region_idx < _num_regions) {
484     return _regions[region_idx];
485   } else {
486     return nullptr;
487   }
488 }
489 
490 inline void ShenandoahHeap::mark_complete_marking_context() {
491   _marking_context->mark_complete();
492 }
493 
494 inline void ShenandoahHeap::mark_incomplete_marking_context() {
495   _marking_context->mark_incomplete();
496 }
497 
498 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
499   assert (_marking_context->is_complete()," sanity");
500   return _marking_context;
501 }
502 
503 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
504   return _marking_context;
505 }
506 
507 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP

  1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 28 
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 
 31 #include "classfile/javaClasses.inline.hpp"
 32 #include "gc/shared/markBitMap.inline.hpp"
 33 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 34 #include "gc/shared/continuationGCSupport.inline.hpp"
 35 #include "gc/shared/suspendibleThreadSet.hpp"
 36 #include "gc/shared/tlab_globals.hpp"
 37 #include "gc/shenandoah/shenandoahAsserts.hpp"
 38 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 40 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 41 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 44 #include "gc/shenandoah/shenandoahGeneration.hpp"
 45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 46 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 47 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 48 #include "oops/compressedOops.inline.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/atomic.hpp"
 51 #include "runtime/javaThread.hpp"
 52 #include "runtime/prefetch.inline.hpp"
 53 #include "runtime/objectMonitor.inline.hpp"
 54 #include "utilities/copy.hpp"
 55 #include "utilities/globalDefinitions.hpp"
 56 
 57 inline ShenandoahHeap* ShenandoahHeap::heap() {
 58   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 59 }
 60 
 61 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 62   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 63   // get_region() provides the bounds-check and returns null on OOB.
 64   return _heap->get_region(new_index - 1);
 65 }
 66 
 67 inline bool ShenandoahHeap::has_forwarded_objects() const {
 68   return _gc_state.is_set(HAS_FORWARDED);
 69 }
 70 
 71 inline WorkerThreads* ShenandoahHeap::workers() const {
 72   return _workers;
 73 }

250 }
251 
252 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
253   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
254   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
255 }
256 
257 inline bool ShenandoahHeap::cancelled_gc() const {
258   return _cancelled_gc.get() == CANCELLED;
259 }
260 
261 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
262   if (sts_active && !cancelled_gc()) {
263     if (SuspendibleThreadSet::should_yield()) {
264       SuspendibleThreadSet::yield();
265     }
266   }
267   return cancelled_gc();
268 }
269 
270 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
271   _cancelled_gc.set(CANCELLABLE);
272   if (_cancel_requested_time > 0) {
273     double cancel_time = os::elapsedTime() - _cancel_requested_time;
274     log_info(gc)("GC cancellation took %.3fs", cancel_time);
275     _cancel_requested_time = 0;
276   }
277 
278   if (clear_oom_handler) {
279     _oom_evac_handler.clear();
280   }
281 }
282 
283 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
284   assert(UseTLAB, "TLABs should be enabled");
285 
286   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
287   if (gclab == nullptr) {
288     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
289            "Performance: thread should have GCLAB: %s", thread->name());
290     // No GCLABs in this thread, fallback to shared allocation
291     return nullptr;
292   }
293   HeapWord* obj = gclab->allocate(size);
294   if (obj != nullptr) {
295     return obj;
296   }

297   return allocate_from_gclab_slow(thread, size);
298 }
299 
300 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
301   // This operates on new copy of an object. This means that the object's mark-word
302   // is thread-local and therefore safe to access. However, when the mark is
303   // displaced (i.e. stack-locked or monitor-locked), then it must be considered
304   // a shared memory location. It can be accessed by other threads.
305   // In particular, a competing evacuating thread can succeed to install its copy
306   // as the forwardee and continue to unlock the object, at which point 'our'
307   // write to the foreign stack-location would potentially over-write random
308   // information on that stack. Writing to a monitor is less problematic,
309   // but still not safe: while the ObjectMonitor would not randomly disappear,
310   // the other thread would also write to the same displaced header location,
311   // possibly leading to increase the age twice.
312   // For all these reasons, we take the conservative approach and not attempt
313   // to increase the age when the header is displaced.
314   markWord w = obj->mark();
315   // The mark-word has been copied from the original object. It can not be
316   // inflating, because inflation can not be interrupted by a safepoint,
317   // and after a safepoint, a Java thread would first have to successfully
318   // evacuate the object before it could inflate the monitor.
319   assert(!w.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT, "must not inflate monitor before evacuation of object succeeds");
320   // It is possible that we have copied the object after another thread has
321   // already successfully completed evacuation. While harmless (we would never
322   // publish our copy), don't even attempt to modify the age when that
323   // happens.
324   if (!w.has_displaced_mark_helper() && !w.is_marked()) {
325     w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
326     obj->set_mark(w);
327   }
328 }
329 
330 // Return the object's age, or a sentinel value when the age can't
331 // necessarily be determined because of concurrent locking by the
332 // mutator
333 uint ShenandoahHeap::get_object_age(oop obj) {
334   // This is impossible to do unless we "freeze" ABA-type oscillations
335   // With Lilliput, we can do this more easily.
336   markWord w = obj->mark();
337   assert(!w.is_marked(), "must not be forwarded");
338   if (w.has_monitor()) {
339     w = w.monitor()->header();
340   } else if (w.is_being_inflated() || w.has_displaced_mark_helper()) {
341     // Informs caller that we aren't able to determine the age
342     return markWord::max_age + 1; // sentinel
343   }
344   assert(w.age() <= markWord::max_age, "Impossible!");
345   return w.age();
346 }
347 
348 inline bool ShenandoahHeap::is_in_active_generation(oop obj) const {
349   if (!mode()->is_generational()) {
350     // everything is the same single generation
351     assert(is_in_reserved(obj), "Otherwise shouldn't return true below");
352     return true;
353   }
354 
355   ShenandoahGeneration* const gen = active_generation();
356 
357   if (gen == nullptr) {
358     // no collection is happening: only expect this to be called
359     // when concurrent processing is active, but that could change
360     return false;
361   }
362 
363   assert(is_in_reserved(obj), "only check if is in active generation for objects (" PTR_FORMAT ") in heap", p2i(obj));
364   assert(gen->is_old() || gen->is_young() || gen->is_global(),
365          "Active generation must be old, young, or global");
366 
367   size_t index = heap_region_containing(obj)->index();
368 
369   // No flickering!
370   assert(gen == active_generation(), "Race?");
371 
372   switch (_affiliations[index]) {
373   case ShenandoahAffiliation::FREE:
374     // Free regions are in Old, Young, Global
375     return true;
376   case ShenandoahAffiliation::YOUNG_GENERATION:
377     // Young regions are in young_generation and global_generation, not in old_generation
378     return gen != (ShenandoahGeneration*)old_generation();
379   case ShenandoahAffiliation::OLD_GENERATION:
380     // Old regions are in old_generation and global_generation, not in young_generation
381     return gen != (ShenandoahGeneration*)young_generation();
382   default:
383     assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, _affiliations[index], index);
384     return false;
385   }
386 }
387 
388 inline bool ShenandoahHeap::is_in_young(const void* p) const {
389   return is_in_reserved(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::YOUNG_GENERATION);
390 }
391 
392 inline bool ShenandoahHeap::is_in_old(const void* p) const {
393   return is_in_reserved(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::OLD_GENERATION);
394 }
395 
396 inline bool ShenandoahHeap::is_old(oop obj) const {
397   return active_generation()->is_young() && is_in_old(obj);
398 }
399 
400 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) {
401   return (ShenandoahAffiliation) _affiliations[r->index()];
402 }
403 
404 inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
405                                                         ShenandoahAffiliation new_affiliation) {
406   // A lock is required when changing from FREE to NON-FREE.  Though it may be possible to elide the lock when
407   // transitioning from in-use to FREE, the current implementation uses a lock for this transition.  A lock is
408   // not required to change from YOUNG to OLD (i.e. when promoting humongous region).
409   //
410   //         new_affiliation is:     FREE   YOUNG   OLD
411   //  orig_affiliation is:  FREE      X       L      L
412   //                       YOUNG      L       X
413   //                         OLD      L       X      X
414   //  X means state transition won't happen (so don't care)
415   //  L means lock should be held
416   //  Blank means no lock required because affiliation visibility will not be required until subsequent safepoint
417   //
418   // Note: during full GC, all transitions between states are possible.  During Full GC, we should be in a safepoint.
419 
420   if ((orig_affiliation == ShenandoahAffiliation::FREE) || (new_affiliation == ShenandoahAffiliation::FREE)) {
421     shenandoah_assert_heaplocked_or_safepoint();
422   }
423 }
424 
425 inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation) {
426 #ifdef ASSERT
427   assert_lock_for_affiliation(region_affiliation(r), new_affiliation);
428 #endif
429   _affiliations[r->index()] = (uint8_t) new_affiliation;
430 }
431 
432 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(size_t index) {
433   return (ShenandoahAffiliation) _affiliations[index];
434 }
435 
436 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
437   oop obj = cast_to_oop(entry);
438   return !_marking_context->is_marked_strong(obj);
439 }
440 
441 inline bool ShenandoahHeap::in_collection_set(oop p) const {
442   assert(collection_set() != nullptr, "Sanity");
443   return collection_set()->is_in(p);
444 }
445 
446 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
447   assert(collection_set() != nullptr, "Sanity");
448   return collection_set()->is_in_loc(p);
449 }
450 
451 inline bool ShenandoahHeap::is_stable() const {
452   return _gc_state.is_clear();
453 }
454 
455 inline bool ShenandoahHeap::is_idle() const {
456   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
457 }
458 
459 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
460   return _gc_state.is_set(MARKING);
461 }
462 
463 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
464   return _gc_state.is_set(YOUNG_MARKING);
465 }
466 
467 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
468   return _gc_state.is_set(OLD_MARKING);
469 }
470 
471 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
472   return _gc_state.is_set(EVACUATION);
473 }
474 
475 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
476   return _degenerated_gc_in_progress.is_set();
477 }
478 
479 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
480   return _full_gc_in_progress.is_set();
481 }
482 
483 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
484   return _full_gc_move_in_progress.is_set();
485 }
486 
487 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
488   return _gc_state.is_set(UPDATEREFS);
489 }
490 

492   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
493 }
494 
495 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
496   return _concurrent_strong_root_in_progress.is_set();
497 }
498 
499 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
500   return _gc_state.is_set(WEAK_ROOTS);
501 }
502 
503 template<class T>
504 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
505   marked_object_iterate(region, cl, region->top());
506 }
507 
508 template<class T>
509 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
510   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
511 
512   ShenandoahMarkingContext* const ctx = marking_context();

513 
514   HeapWord* tams = ctx->top_at_mark_start(region);
515 
516   size_t skip_bitmap_delta = 1;
517   HeapWord* start = region->bottom();
518   HeapWord* end = MIN2(tams, region->end());
519 
520   // Step 1. Scan below the TAMS based on bitmap data.
521   HeapWord* limit_bitmap = MIN2(limit, tams);
522 
523   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
524   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
525   HeapWord* cb = ctx->get_next_marked_addr(start, end);
526 
527   intx dist = ShenandoahMarkScanPrefetch;
528   if (dist > 0) {
529     // Batched scan that prefetches the oop data, anticipating the access to
530     // either header, oop field, or forwarding pointer. Not that we cannot
531     // touch anything in oop, while it still being prefetched to get enough
532     // time for prefetch to work. This is why we try to scan the bitmap linearly,

623     HeapWord* bottom = region->bottom();
624     if (top > bottom) {
625       region = region->humongous_start_region();
626       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
627       marked_object_iterate(region, &objs);
628     }
629   } else {
630     ShenandoahObjectToOopClosure<T> objs(cl);
631     marked_object_iterate(region, &objs, top);
632   }
633 }
634 
635 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const {
636   if (region_idx < _num_regions) {
637     return _regions[region_idx];
638   } else {
639     return nullptr;
640   }
641 }
642 








643 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
644   assert (_marking_context->is_complete()," sanity");
645   return _marking_context;
646 }
647 
648 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
649   return _marking_context;
650 }
651 
652 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >