1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/oop.hpp"
  30 #include "oops/objArrayOop.hpp"
  31 #include "gc/shared/collectorCounters.hpp"
  32 #include "gc/shenandoah/shenandoahCardStats.hpp"
  33 #include "gc/shenandoah/shenandoahCardTable.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  36 #include "gc/shenandoah/shenandoahScanRemembered.hpp"
  37 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  38 
  39 inline size_t
  40 ShenandoahDirectCardMarkRememberedSet::last_valid_index() const {
  41   return _card_table->last_valid_index();
  42 }
  43 
  44 inline size_t
  45 ShenandoahDirectCardMarkRememberedSet::total_cards() const {
  46   return _total_card_count;
  47 }
  48 
  49 inline size_t
  50 ShenandoahDirectCardMarkRememberedSet::card_index_for_addr(HeapWord *p) const {
  51   return _card_table->index_for(p);
  52 }
  53 
  54 inline HeapWord*
  55 ShenandoahDirectCardMarkRememberedSet::addr_for_card_index(size_t card_index) const {
  56   return _whole_heap_base + CardTable::card_size_in_words() * card_index;
  57 }
  58 
  59 inline const CardValue*
  60 ShenandoahDirectCardMarkRememberedSet::get_card_table_byte_map(bool use_write_table) const {
  61   return use_write_table ?
  62            _card_table->write_byte_map()
  63            : _card_table->read_byte_map();
  64 }
  65 
  66 inline bool
  67 ShenandoahDirectCardMarkRememberedSet::is_write_card_dirty(size_t card_index) const {
  68   CardValue* bp = &(_card_table->write_byte_map())[card_index];
  69   return (bp[0] == CardTable::dirty_card_val());
  70 }
  71 
  72 inline bool
  73 ShenandoahDirectCardMarkRememberedSet::is_card_dirty(size_t card_index) const {
  74   CardValue* bp = &(_card_table->read_byte_map())[card_index];
  75   return (bp[0] == CardTable::dirty_card_val());
  76 }
  77 
  78 inline void
  79 ShenandoahDirectCardMarkRememberedSet::mark_card_as_dirty(size_t card_index) {
  80   CardValue* bp = &(_card_table->write_byte_map())[card_index];
  81   bp[0] = CardTable::dirty_card_val();
  82 }
  83 
  84 inline void
  85 ShenandoahDirectCardMarkRememberedSet::mark_range_as_dirty(size_t card_index, size_t num_cards) {
  86   CardValue* bp = &(_card_table->write_byte_map())[card_index];
  87   while (num_cards-- > 0) {
  88     *bp++ = CardTable::dirty_card_val();
  89   }
  90 }
  91 
  92 inline void
  93 ShenandoahDirectCardMarkRememberedSet::mark_card_as_clean(size_t card_index) {
  94   CardValue* bp = &(_card_table->write_byte_map())[card_index];
  95   bp[0] = CardTable::clean_card_val();
  96 }
  97 
  98 inline void
  99 ShenandoahDirectCardMarkRememberedSet::mark_range_as_clean(size_t card_index, size_t num_cards) {
 100   CardValue* bp = &(_card_table->write_byte_map())[card_index];
 101   while (num_cards-- > 0) {
 102     *bp++ = CardTable::clean_card_val();
 103   }
 104 }
 105 
 106 inline bool
 107 ShenandoahDirectCardMarkRememberedSet::is_card_dirty(HeapWord *p) const {
 108   size_t index = card_index_for_addr(p);
 109   CardValue* bp = &(_card_table->read_byte_map())[index];
 110   return (bp[0] == CardTable::dirty_card_val());
 111 }
 112 
 113 inline void
 114 ShenandoahDirectCardMarkRememberedSet::mark_card_as_dirty(HeapWord *p) {
 115   size_t index = card_index_for_addr(p);
 116   CardValue* bp = &(_card_table->write_byte_map())[index];
 117   bp[0] = CardTable::dirty_card_val();
 118 }
 119 
 120 inline void
 121 ShenandoahDirectCardMarkRememberedSet::mark_range_as_dirty(HeapWord *p, size_t num_heap_words) {
 122   CardValue* bp = &(_card_table->write_byte_map_base())[uintptr_t(p) >> _card_shift];
 123   CardValue* end_bp = &(_card_table->write_byte_map_base())[uintptr_t(p + num_heap_words) >> _card_shift];
 124   // If (p + num_heap_words) is not aligned on card boundary, we also need to dirty last card.
 125   if (((unsigned long long) (p + num_heap_words)) & (CardTable::card_size() - 1)) {
 126     end_bp++;
 127   }
 128   while (bp < end_bp) {
 129     *bp++ = CardTable::dirty_card_val();
 130   }
 131 }
 132 
 133 inline void
 134 ShenandoahDirectCardMarkRememberedSet::mark_card_as_clean(HeapWord *p) {
 135   size_t index = card_index_for_addr(p);
 136   CardValue* bp = &(_card_table->write_byte_map())[index];
 137   bp[0] = CardTable::clean_card_val();
 138 }
 139 
 140 inline void
 141 ShenandoahDirectCardMarkRememberedSet::mark_read_card_as_clean(size_t index) {
 142   CardValue* bp = &(_card_table->read_byte_map())[index];
 143   bp[0] = CardTable::clean_card_val();
 144 }
 145 
 146 inline void
 147 ShenandoahDirectCardMarkRememberedSet::mark_range_as_clean(HeapWord *p, size_t num_heap_words) {
 148   CardValue* bp = &(_card_table->write_byte_map_base())[uintptr_t(p) >> _card_shift];
 149   CardValue* end_bp = &(_card_table->write_byte_map_base())[uintptr_t(p + num_heap_words) >> _card_shift];
 150   // If (p + num_heap_words) is not aligned on card boundary, we also need to clean last card.
 151   if (((unsigned long long) (p + num_heap_words)) & (CardTable::card_size() - 1)) {
 152     end_bp++;
 153   }
 154   while (bp < end_bp) {
 155     *bp++ = CardTable::clean_card_val();
 156   }
 157 }
 158 
 159 inline size_t
 160 ShenandoahDirectCardMarkRememberedSet::cluster_count() const {
 161   return _cluster_count;
 162 }
 163 
 164 // No lock required because arguments align with card boundaries.
 165 template<typename RememberedSet>
 166 inline void
 167 ShenandoahCardCluster<RememberedSet>::reset_object_range(HeapWord* from, HeapWord* to) {
 168   assert(((((unsigned long long) from) & (CardTable::card_size() - 1)) == 0) &&
 169          ((((unsigned long long) to) & (CardTable::card_size() - 1)) == 0),
 170          "reset_object_range bounds must align with card boundaries");
 171   size_t card_at_start = _rs->card_index_for_addr(from);
 172   size_t num_cards = (to - from) / CardTable::card_size_in_words();
 173 
 174   for (size_t i = 0; i < num_cards; i++) {
 175     object_starts[card_at_start + i].short_word = 0;
 176   }
 177 }
 178 
 179 // Assume only one thread at a time registers objects pertaining to
 180 // each card-table entry's range of memory.
 181 template<typename RememberedSet>
 182 inline void
 183 ShenandoahCardCluster<RememberedSet>::register_object(HeapWord* address) {
 184   shenandoah_assert_heaplocked();
 185 
 186   register_object_without_lock(address);
 187 }
 188 
 189 template<typename RememberedSet>
 190 inline void
 191 ShenandoahCardCluster<RememberedSet>::register_object_without_lock(HeapWord* address) {
 192   size_t card_at_start = _rs->card_index_for_addr(address);
 193   HeapWord *card_start_address = _rs->addr_for_card_index(card_at_start);
 194   uint8_t offset_in_card = address - card_start_address;
 195 
 196   if (!starts_object(card_at_start)) {
 197     set_starts_object_bit(card_at_start);
 198     set_first_start(card_at_start, offset_in_card);
 199     set_last_start(card_at_start, offset_in_card);
 200   } else {
 201     if (offset_in_card < get_first_start(card_at_start))
 202       set_first_start(card_at_start, offset_in_card);
 203     if (offset_in_card > get_last_start(card_at_start))
 204       set_last_start(card_at_start, offset_in_card);
 205   }
 206 }
 207 
 208 template<typename RememberedSet>
 209 inline void
 210 ShenandoahCardCluster<RememberedSet>::coalesce_objects(HeapWord* address, size_t length_in_words) {
 211 
 212   size_t card_at_start = _rs->card_index_for_addr(address);
 213   HeapWord *card_start_address = _rs->addr_for_card_index(card_at_start);
 214   size_t card_at_end = card_at_start + ((address + length_in_words) - card_start_address) / CardTable::card_size_in_words();
 215 
 216   if (card_at_start == card_at_end) {
 217     // There are no changes to the get_first_start array.  Either get_first_start(card_at_start) returns this coalesced object,
 218     // or it returns an object that precedes the coalesced object.
 219     if (card_start_address + get_last_start(card_at_start) < address + length_in_words) {
 220       uint8_t coalesced_offset = static_cast<uint8_t>(address - card_start_address);
 221       // The object that used to be the last object starting within this card is being subsumed within the coalesced
 222       // object.  Since we always coalesce entire objects, this condition only occurs if the last object ends before or at
 223       // the end of the card's memory range and there is no object following this object.  In this case, adjust last_start
 224       // to represent the start of the coalesced range.
 225       set_last_start(card_at_start, coalesced_offset);
 226     }
 227     // Else, no changes to last_starts information.  Either get_last_start(card_at_start) returns the object that immediately
 228     // follows the coalesced object, or it returns an object that follows the object immediately following the coalesced object.
 229   } else {
 230     uint8_t coalesced_offset = static_cast<uint8_t>(address - card_start_address);
 231     if (get_last_start(card_at_start) > coalesced_offset) {
 232       // Existing last start is being coalesced, create new last start
 233       set_last_start(card_at_start, coalesced_offset);
 234     }
 235     // otherwise, get_last_start(card_at_start) must equal coalesced_offset
 236 
 237     // All the cards between first and last get cleared.
 238     for (size_t i = card_at_start + 1; i < card_at_end; i++) {
 239       clear_starts_object_bit(i);
 240     }
 241 
 242     uint8_t follow_offset = static_cast<uint8_t>((address + length_in_words) - _rs->addr_for_card_index(card_at_end));
 243     if (starts_object(card_at_end) && (get_first_start(card_at_end) < follow_offset)) {
 244       // It may be that after coalescing within this last card's memory range, the last card
 245       // no longer holds an object.
 246       if (get_last_start(card_at_end) >= follow_offset) {
 247         set_first_start(card_at_end, follow_offset);
 248       } else {
 249         // last_start is being coalesced so this card no longer has any objects.
 250         clear_starts_object_bit(card_at_end);
 251       }
 252     }
 253     // else
 254     //  card_at_end did not have an object, so it still does not have an object, or
 255     //  card_at_end had an object that starts after the coalesced object, so no changes required for card_at_end
 256 
 257   }
 258 }
 259 
 260 
 261 template<typename RememberedSet>
 262 inline size_t
 263 ShenandoahCardCluster<RememberedSet>::get_first_start(size_t card_index) const {
 264   assert(starts_object(card_index), "Can't get first start because no object starts here");
 265   return object_starts[card_index].offsets.first & FirstStartBits;
 266 }
 267 
 268 template<typename RememberedSet>
 269 inline size_t
 270 ShenandoahCardCluster<RememberedSet>::get_last_start(size_t card_index) const {
 271   assert(starts_object(card_index), "Can't get last start because no object starts here");
 272   return object_starts[card_index].offsets.last;
 273 }
 274 
 275 // Given a card_index, return the starting address of the first block in the heap
 276 // that straddles into this card. If this card is co-initial with an object, then
 277 // this would return the first address of the range that this card covers, which is
 278 // where the card's first object also begins.
 279 // TODO: collect some stats for the size of walks backward over cards.
 280 // For larger objects, a logarithmic BOT such as used by G1 might make the
 281 // backwards walk potentially faster.
 282 template<typename RememberedSet>
 283 HeapWord*
 284 ShenandoahCardCluster<RememberedSet>::block_start(const size_t card_index) const {
 285 
 286   HeapWord* left = _rs->addr_for_card_index(card_index);
 287 
 288 #ifdef ASSERT
 289   assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode");
 290   ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left);
 291   assert(region->is_old(), "Do not use for young regions");
 292   // For HumongousRegion:s it's more efficient to jump directly to the
 293   // start region.
 294   assert(!region->is_humongous(), "Use region->humongous_start_region() instead");
 295 #endif
 296   if (starts_object(card_index) && get_first_start(card_index) == 0) {
 297     // This card contains a co-initial object; a fortiori, it covers
 298     // also the case of a card being the first in a region.
 299     assert(oopDesc::is_oop(cast_to_oop(left)), "Should be an object");
 300     return left;
 301   }
 302 
 303   HeapWord* p = nullptr;
 304   oop obj = cast_to_oop(p);
 305   ssize_t cur_index = (ssize_t)card_index;
 306   assert(cur_index >= 0, "Overflow");
 307   assert(cur_index > 0, "Should have returned above");
 308   // Walk backwards over the cards...
 309   while (--cur_index > 0 && !starts_object(cur_index)) {
 310    // ... to the one that starts the object
 311   }
 312   // cur_index should start an object: we should not have walked
 313   // past the left end of the region.
 314   assert(cur_index >= 0 && (cur_index <= (ssize_t)card_index), "Error");
 315   assert(region->bottom() <= _rs->addr_for_card_index(cur_index),
 316          "Fell off the bottom of containing region");
 317   assert(starts_object(cur_index), "Error");
 318   size_t offset = get_last_start(cur_index);
 319   // can avoid call via card size arithmetic below instead
 320   p = _rs->addr_for_card_index(cur_index) + offset;
 321   // Recall that we already dealt with the co-initial object case above
 322   assert(p < left, "obj should start before left");
 323   // While it is safe to ask an object its size in the loop that
 324   // follows, the (ifdef'd out) loop should never be needed.
 325   // 1. we ask this question only for regions in the old generation
 326   // 2. there is no direct allocation ever by mutators in old generation
 327   //    regions. Only GC will ever allocate in old regions, and then
 328   //    too only during promotion/evacuation phases. Thus there is no danger
 329   //    of races between reading from and writing to the object start array,
 330   //    or of asking partially initialized objects their size (in the loop below).
 331   // 3. only GC asks this question during phases when it is not concurrently
 332   //    evacuating/promoting, viz. during concurrent root scanning (before
 333   //    the evacuation phase) and during concurrent update refs (after the
 334   //    evacuation phase) of young collections. This is never called
 335   //    during old or global collections.
 336   // 4. Every allocation under TAMS updates the object start array.
 337   NOT_PRODUCT(obj = cast_to_oop(p);)
 338   assert(oopDesc::is_oop(obj), "Should be an object");
 339 #define WALK_FORWARD_IN_BLOCK_START false
 340   while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
 341     p += obj->size();
 342   }
 343 #undef WALK_FORWARD_IN_BLOCK_START // false
 344   assert(p + obj->size() > left, "obj should end after left");
 345   return p;
 346 }
 347 
 348 template<typename RememberedSet>
 349 inline size_t
 350 ShenandoahScanRemembered<RememberedSet>::last_valid_index() { return _rs->last_valid_index(); }
 351 
 352 template<typename RememberedSet>
 353 inline size_t
 354 ShenandoahScanRemembered<RememberedSet>::total_cards() { return _rs->total_cards(); }
 355 
 356 template<typename RememberedSet>
 357 inline size_t
 358 ShenandoahScanRemembered<RememberedSet>::card_index_for_addr(HeapWord *p) { return _rs->card_index_for_addr(p); };
 359 
 360 template<typename RememberedSet>
 361 inline HeapWord *
 362 ShenandoahScanRemembered<RememberedSet>::addr_for_card_index(size_t card_index) { return _rs->addr_for_card_index(card_index); }
 363 
 364 template<typename RememberedSet>
 365 inline bool
 366 ShenandoahScanRemembered<RememberedSet>::is_card_dirty(size_t card_index) { return _rs->is_card_dirty(card_index); }
 367 
 368 template<typename RememberedSet>
 369 inline void
 370 ShenandoahScanRemembered<RememberedSet>::mark_card_as_dirty(size_t card_index) { _rs->mark_card_as_dirty(card_index); }
 371 
 372 template<typename RememberedSet>
 373 inline void
 374 ShenandoahScanRemembered<RememberedSet>::mark_range_as_dirty(size_t card_index, size_t num_cards) { _rs->mark_range_as_dirty(card_index, num_cards); }
 375 
 376 template<typename RememberedSet>
 377 inline void
 378 ShenandoahScanRemembered<RememberedSet>::mark_card_as_clean(size_t card_index) { _rs->mark_card_as_clean(card_index); }
 379 
 380 template<typename RememberedSet>
 381 inline void
 382 ShenandoahScanRemembered<RememberedSet>::mark_range_as_clean(size_t card_index, size_t num_cards) { _rs->mark_range_as_clean(card_index, num_cards); }
 383 
 384 template<typename RememberedSet>
 385 inline bool
 386 ShenandoahScanRemembered<RememberedSet>::is_card_dirty(HeapWord *p) { return _rs->is_card_dirty(p); }
 387 
 388 template<typename RememberedSet>
 389 inline void
 390 ShenandoahScanRemembered<RememberedSet>::mark_card_as_dirty(HeapWord *p) { _rs->mark_card_as_dirty(p); }
 391 
 392 template<typename RememberedSet>
 393 inline void
 394 ShenandoahScanRemembered<RememberedSet>::mark_range_as_dirty(HeapWord *p, size_t num_heap_words) { _rs->mark_range_as_dirty(p, num_heap_words); }
 395 
 396 template<typename RememberedSet>
 397 inline void
 398 ShenandoahScanRemembered<RememberedSet>::mark_card_as_clean(HeapWord *p) { _rs->mark_card_as_clean(p); }
 399 
 400 template<typename RememberedSet>
 401 inline void
 402 ShenandoahScanRemembered<RememberedSet>:: mark_range_as_clean(HeapWord *p, size_t num_heap_words) { _rs->mark_range_as_clean(p, num_heap_words); }
 403 
 404 template<typename RememberedSet>
 405 inline size_t
 406 ShenandoahScanRemembered<RememberedSet>::cluster_count() { return _rs->cluster_count(); }
 407 
 408 template<typename RememberedSet>
 409 inline void
 410 ShenandoahScanRemembered<RememberedSet>::reset_object_range(HeapWord *from, HeapWord *to) {
 411   _scc->reset_object_range(from, to);
 412 }
 413 
 414 template<typename RememberedSet>
 415 inline void
 416 ShenandoahScanRemembered<RememberedSet>::register_object(HeapWord *addr) {
 417   _scc->register_object(addr);
 418 }
 419 
 420 template<typename RememberedSet>
 421 inline void
 422 ShenandoahScanRemembered<RememberedSet>::register_object_without_lock(HeapWord *addr) {
 423   _scc->register_object_without_lock(addr);
 424 }
 425 
 426 template <typename RememberedSet>
 427 inline bool
 428 ShenandoahScanRemembered<RememberedSet>::verify_registration(HeapWord* address, ShenandoahMarkingContext* ctx) {
 429 
 430   size_t index = card_index_for_addr(address);
 431   if (!_scc->starts_object(index)) {
 432     return false;
 433   }
 434   HeapWord* base_addr = addr_for_card_index(index);
 435   size_t offset = _scc->get_first_start(index);
 436   ShenandoahHeap* heap = ShenandoahHeap::heap();
 437 
 438   // Verify that I can find this object within its enclosing card by scanning forward from first_start.
 439   while (base_addr + offset < address) {
 440     oop obj = cast_to_oop(base_addr + offset);
 441     if (!ctx || ctx->is_marked(obj)) {
 442       offset += obj->size();
 443     } else {
 444       // If this object is not live, don't trust its size(); all objects above tams are live.
 445       ShenandoahHeapRegion* r = heap->heap_region_containing(obj);
 446       HeapWord* tams = ctx->top_at_mark_start(r);
 447       offset = ctx->get_next_marked_addr(base_addr + offset, tams) - base_addr;
 448     }
 449   }
 450   if (base_addr + offset != address){
 451     return false;
 452   }
 453 
 454   // At this point, offset represents object whose registration we are verifying.  We know that at least this object resides
 455   // within this card's memory.
 456 
 457   // Make sure that last_offset is properly set for the enclosing card, but we can't verify this for
 458   // candidate collection-set regions during mixed evacuations, so disable this check in general
 459   // during mixed evacuations.
 460 
 461   ShenandoahHeapRegion* r = heap->heap_region_containing(base_addr + offset);
 462   size_t max_offset = r->top() - base_addr;
 463   if (max_offset > CardTable::card_size_in_words()) {
 464     max_offset = CardTable::card_size_in_words();
 465   }
 466   size_t prev_offset;
 467   if (!ctx) {
 468     do {
 469       oop obj = cast_to_oop(base_addr + offset);
 470       prev_offset = offset;
 471       offset += obj->size();
 472     } while (offset < max_offset);
 473     if (_scc->get_last_start(index) != prev_offset) {
 474       return false;
 475     }
 476 
 477     // base + offset represents address of first object that starts on following card, if there is one.
 478 
 479     // Notes: base_addr is addr_for_card_index(index)
 480     //        base_addr + offset is end of the object we are verifying
 481     //        cannot use card_index_for_addr(base_addr + offset) because it asserts arg < end of whole heap
 482     size_t end_card_index = index + offset / CardTable::card_size_in_words();
 483 
 484     if (end_card_index > index && end_card_index <= _rs->last_valid_index()) {
 485       // If there is a following object registered on the next card, it should begin where this object ends.
 486       if (_scc->starts_object(end_card_index) &&
 487           ((addr_for_card_index(end_card_index) + _scc->get_first_start(end_card_index)) != (base_addr + offset))) {
 488         return false;
 489       }
 490     }
 491 
 492     // Assure that no other objects are registered "inside" of this one.
 493     for (index++; index < end_card_index; index++) {
 494       if (_scc->starts_object(index)) {
 495         return false;
 496       }
 497     }
 498   } else {
 499     // This is a mixed evacuation or a global collect: rely on mark bits to identify which objects need to be properly registered
 500     assert(!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Cannot rely on mark context here.");
 501     // If the object reaching or spanning the end of this card's memory is marked, then last_offset for this card
 502     // should represent this object.  Otherwise, last_offset is a don't care.
 503     ShenandoahHeapRegion* region = heap->heap_region_containing(base_addr + offset);
 504     HeapWord* tams = ctx->top_at_mark_start(region);
 505     oop last_obj = nullptr;
 506     do {
 507       oop obj = cast_to_oop(base_addr + offset);
 508       if (ctx->is_marked(obj)) {
 509         prev_offset = offset;
 510         offset += obj->size();
 511         last_obj = obj;
 512       } else {
 513         offset = ctx->get_next_marked_addr(base_addr + offset, tams) - base_addr;
 514         // If there are no marked objects remaining in this region, offset equals tams - base_addr.  If this offset is
 515         // greater than max_offset, we will immediately exit this loop.  Otherwise, the next iteration of the loop will
 516         // treat the object at offset as marked and live (because address >= tams) and we will continue iterating object
 517         // by consulting the size() fields of each.
 518       }
 519     } while (offset < max_offset);
 520     if (last_obj != nullptr && prev_offset + last_obj->size() >= max_offset) {
 521       // last marked object extends beyond end of card
 522       if (_scc->get_last_start(index) != prev_offset) {
 523         return false;
 524       }
 525       // otherwise, the value of _scc->get_last_start(index) is a don't care because it represents a dead object and we
 526       // cannot verify its context
 527     }
 528   }
 529   return true;
 530 }
 531 
 532 template<typename RememberedSet>
 533 inline void
 534 ShenandoahScanRemembered<RememberedSet>::coalesce_objects(HeapWord *addr, size_t length_in_words) {
 535   _scc->coalesce_objects(addr, length_in_words);
 536 }
 537 
 538 template<typename RememberedSet>
 539 inline void
 540 ShenandoahScanRemembered<RememberedSet>::mark_range_as_empty(HeapWord *addr, size_t length_in_words) {
 541   _rs->mark_range_as_clean(addr, length_in_words);
 542   _scc->clear_objects_in_range(addr, length_in_words);
 543 }
 544 
 545 // Process all objects starting within count clusters beginning with first_cluster and for which the start address is
 546 // less than end_of_range.  For any non-array object whose header lies on a dirty card, scan the entire object,
 547 // even if its end reaches beyond end_of_range. Object arrays, on the other hand, are precisely dirtied and
 548 // only the portions of the array on dirty cards need to be scanned.
 549 //
 550 // Do not CANCEL within process_clusters.  It is assumed that if a worker thread accepts responsibility for processing
 551 // a chunk of work, it will finish the work it starts.  Otherwise, the chunk of work will be lost in the transition to
 552 // degenerated execution, leading to dangling references.
 553 template<typename RememberedSet>
 554 template <typename ClosureType>
 555 void ShenandoahScanRemembered<RememberedSet>::process_clusters(size_t first_cluster, size_t count, HeapWord* end_of_range,
 556                                                                ClosureType* cl, bool use_write_table, uint worker_id) {
 557 
 558   // If old-gen evacuation is active, then MarkingContext for old-gen heap regions is valid.  We use the MarkingContext
 559   // bits to determine which objects within a DIRTY card need to be scanned.  This is necessary because old-gen heap
 560   // regions that are in the candidate collection set have not been coalesced and filled.  Thus, these heap regions
 561   // may contain zombie objects.  Zombie objects are known to be dead, but have not yet been "collected".  Scanning
 562   // zombie objects is unsafe because the Klass pointer is not reliable, objects referenced from a zombie may have been
 563   // collected (if dead), or relocated (if live), or if dead but not yet collected, we don't want to "revive" them
 564   // by marking them (when marking) or evacuating them (when updating references).
 565 
 566   // start and end addresses of range of objects to be scanned, clipped to end_of_range
 567   const size_t start_card_index = first_cluster * ShenandoahCardCluster<RememberedSet>::CardsPerCluster;
 568   const HeapWord* start_addr = _rs->addr_for_card_index(start_card_index);
 569   // clip at end_of_range (exclusive)
 570   HeapWord* end_addr = MIN2(end_of_range, (HeapWord*)start_addr + (count * ShenandoahCardCluster<RememberedSet>::CardsPerCluster
 571                                                                    * CardTable::card_size_in_words()));
 572   assert(start_addr < end_addr, "Empty region?");
 573 
 574   const size_t whole_cards = (end_addr - start_addr + CardTable::card_size_in_words() - 1)/CardTable::card_size_in_words();
 575   const size_t end_card_index = start_card_index + whole_cards - 1;
 576   log_debug(gc, remset)("Worker %u: cluster = " SIZE_FORMAT " count = " SIZE_FORMAT " eor = " INTPTR_FORMAT
 577                         " start_addr = " INTPTR_FORMAT " end_addr = " INTPTR_FORMAT " cards = " SIZE_FORMAT,
 578                         worker_id, first_cluster, count, p2i(end_of_range), p2i(start_addr), p2i(end_addr), whole_cards);
 579 
 580   // use_write_table states whether we are using the card table that is being
 581   // marked by the mutators. If false, we are using a snapshot of the card table
 582   // that is not subject to modifications. Even when this arg is true, and
 583   // the card table is being actively marked, SATB marking ensures that we need not
 584   // worry about cards marked after the processing here has passed them.
 585   const CardValue* const ctbm = _rs->get_card_table_byte_map(use_write_table);
 586 
 587   // If old gen evacuation is active, ctx will hold the completed marking of
 588   // old generation objects. We'll only scan objects that are marked live by
 589   // the old generation marking. These include objects allocated since the
 590   // start of old generation marking (being those above TAMS).
 591   const ShenandoahHeap* heap = ShenandoahHeap::heap();
 592   const ShenandoahMarkingContext* ctx = heap->is_old_bitmap_stable() ?
 593                                         heap->marking_context() : nullptr;
 594 
 595   // The region we will scan is the half-open interval [start_addr, end_addr),
 596   // and lies entirely within a single region.
 597   const ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(start_addr);
 598   assert(region->contains(end_addr - 1), "Slice shouldn't cross regions");
 599 
 600   // This code may have implicit assumptions of examining only old gen regions.
 601   assert(region->is_old(), "We only expect to be processing old regions");
 602   assert(!region->is_humongous(), "Humongous regions can be processed more efficiently;"
 603                                   "see process_humongous_clusters()");
 604   // tams and ctx below are for old generation marking. As such, young gen roots must
 605   // consider everything above tams, since it doesn't represent a TAMS for young gen's
 606   // SATB marking.
 607   const HeapWord* tams = (ctx == nullptr ? region->bottom() : ctx->top_at_mark_start(region));
 608 
 609   NOT_PRODUCT(ShenandoahCardStats stats(whole_cards, card_stats(worker_id));)
 610 
 611   // In the case of imprecise marking, we remember the lowest address
 612   // scanned in a range of dirty cards, as we work our way left from the
 613   // highest end_addr. This serves as another upper bound on the address we will
 614   // scan as we move left over each contiguous range of dirty cards.
 615   HeapWord* upper_bound = nullptr;
 616 
 617   // Starting at the right end of the address range, walk backwards accumulating
 618   // a maximal dirty range of cards, then process those cards.
 619   ssize_t cur_index = (ssize_t) end_card_index;
 620   assert(cur_index >= 0, "Overflow");
 621   assert(((ssize_t)start_card_index) >= 0, "Overflow");
 622   while (cur_index >= (ssize_t)start_card_index) {
 623 
 624     // We'll continue the search starting with the card for the upper bound
 625     // address identified by the last dirty range that we processed, if any,
 626     // skipping any cards at higher addresses.
 627     if (upper_bound != nullptr) {
 628       ssize_t right_index = _rs->card_index_for_addr(upper_bound);
 629       assert(right_index >= 0, "Overflow");
 630       cur_index = MIN2(cur_index, right_index);
 631       assert(upper_bound < end_addr, "Program logic");
 632       end_addr  = upper_bound;   // lower end_addr
 633       upper_bound = nullptr;     // and clear upper_bound
 634       if (end_addr <= start_addr) {
 635         assert(right_index <= (ssize_t)start_card_index, "Program logic");
 636         // We are done with our cluster
 637         return;
 638       }
 639     }
 640 
 641     if (ctbm[cur_index] == CardTable::dirty_card_val()) {
 642       // ==== BEGIN DIRTY card range processing ====
 643 
 644       const size_t dirty_r = cur_index;  // record right end of dirty range (inclusive)
 645       while (--cur_index >= (ssize_t)start_card_index && ctbm[cur_index] == CardTable::dirty_card_val()) {
 646         // walk back over contiguous dirty cards to find left end of dirty range (inclusive)
 647       }
 648       // [dirty_l, dirty_r] is a "maximal" closed interval range of dirty card indices:
 649       // it may not be maximal if we are using the write_table, because of concurrent
 650       // mutations dirtying the card-table. It may also not be maximal if an upper bound
 651       // was established by the scan of the previous chunk.
 652       const size_t dirty_l = cur_index + 1;   // record left end of dirty range (inclusive)
 653       // Check that we identified a boundary on our left
 654       assert(ctbm[dirty_l] == CardTable::dirty_card_val(), "First card in range should be dirty");
 655       assert(dirty_l == start_card_index || use_write_table
 656              || ctbm[dirty_l - 1] == CardTable::clean_card_val(),
 657              "Interval isn't maximal on the left");
 658       assert(dirty_r >= dirty_l, "Error");
 659       assert(ctbm[dirty_r] == CardTable::dirty_card_val(), "Last card in range should be dirty");
 660       // Record alternations, dirty run length, and dirty card count
 661       NOT_PRODUCT(stats.record_dirty_run(dirty_r - dirty_l + 1);)
 662 
 663       // Find first object that starts this range:
 664       // [left, right) is a maximal right-open interval of dirty cards
 665       HeapWord* left = _rs->addr_for_card_index(dirty_l);        // inclusive
 666       HeapWord* right = _rs->addr_for_card_index(dirty_r + 1);   // exclusive
 667       // Clip right to end_addr established above (still exclusive)
 668       right = MIN2(right, end_addr);
 669       assert(right <= region->top() && end_addr <= region->top(), "Busted bounds");
 670       const MemRegion mr(left, right);
 671 
 672       // NOTE: We'll not call block_start() repeatedly
 673       // on a very large object if its head card is dirty. If not,
 674       // (i.e. the head card is clean) we'll call it each time we
 675       // process a new dirty range on the object. This is always
 676       // the case for large object arrays, which are typically more
 677       // common.
 678       // TODO: It is worthwhile to memoize this, so as to avoid that
 679       // overhead, and it is easy to do, but deferred to a follow-up.
 680       HeapWord* p = _scc->block_start(dirty_l);
 681       oop obj = cast_to_oop(p);
 682 
 683       // PREFIX: The object that straddles into this range of dirty cards
 684       // from the left may be subject to special treatment unless
 685       // it is an object array.
 686       if (p < left && !obj->is_objArray()) {
 687         // The mutator (both compiler and interpreter, but not JNI?)
 688         // typically dirty imprecisely (i.e. only the head of an object),
 689         // but GC closures typically dirty the object precisely. (It would
 690         // be nice to have everything be precise for maximum efficiency.)
 691         //
 692         // To handle this, we check the head card of the object here and,
 693         // if dirty, (arrange to) scan the object in its entirety. If we
 694         // find the head card clean, we'll scan only the portion of the
 695         // object lying in the dirty card range below, assuming this was
 696         // the result of precise marking by GC closures.
 697 
 698         // index of the "head card" for p
 699         const size_t hc_index = _rs->card_index_for_addr(p);
 700         if (ctbm[hc_index] == CardTable::dirty_card_val()) {
 701           // Scan or skip the object, depending on location of its
 702           // head card, and remember that we'll have processed all
 703           // the objects back up to p, which is thus an upper bound
 704           // for the next iteration of a dirty card loop.
 705           upper_bound = p;   // remember upper bound for next chunk
 706           if (p < start_addr) {
 707             // if object starts in a previous slice, it'll be handled
 708             // in its entirety by the thread processing that slice; we can
 709             // skip over it and avoid an unnecessary extra scan.
 710             assert(obj == cast_to_oop(p), "Inconsistency detected");
 711             p += obj->size();
 712           } else {
 713             // the object starts in our slice, we scan it in its entirety
 714             assert(obj == cast_to_oop(p), "Inconsistency detected");
 715             if (ctx == nullptr || ctx->is_marked(obj)) {
 716               // Scan the object in its entirety
 717               p += obj->oop_iterate_size(cl);
 718             } else {
 719               assert(p < tams, "Error 1 in ctx/marking/tams logic");
 720               // Skip over any intermediate dead objects
 721               p = ctx->get_next_marked_addr(p, tams);
 722               assert(p <= tams, "Error 2 in ctx/marking/tams logic");
 723             }
 724           }
 725           assert(p > left, "Should have processed into interior of dirty range");
 726         }
 727       }
 728 
 729       size_t i = 0;
 730       HeapWord* last_p = nullptr;
 731 
 732       // BODY: Deal with (other) objects in this dirty card range
 733       while (p < right) {
 734         obj = cast_to_oop(p);
 735         // walk right scanning eligible objects
 736         if (ctx == nullptr || ctx->is_marked(obj)) {
 737           // we need to remember the last object ptr we scanned, in case we need to
 738           // complete a partial suffix scan after mr, see below
 739           last_p = p;
 740           // apply the closure to the oops in the portion of
 741           // the object within mr.
 742           p += obj->oop_iterate_size(cl, mr);
 743           NOT_PRODUCT(i++);
 744         } else {
 745           // forget the last object pointer we remembered
 746           last_p = nullptr;
 747           assert(p < tams, "Tams and above are implicitly marked in ctx");
 748           // object under tams isn't marked: skip to next live object
 749           p = ctx->get_next_marked_addr(p, tams);
 750           assert(p <= tams, "Error 3 in ctx/marking/tams logic");
 751         }
 752       }
 753 
 754       // TODO: if an objArray then only use mr, else just iterate over entire object;
 755       // that would avoid the special treatment of suffix below.
 756 
 757       // SUFFIX: Fix up a possible incomplete scan at right end of window
 758       // by scanning the portion of a non-objArray that wasn't done.
 759       if (p > right && last_p != nullptr) {
 760         assert(last_p < right, "Error");
 761         // check if last_p suffix needs scanning
 762         const oop last_obj = cast_to_oop(last_p);
 763         if (!last_obj->is_objArray()) {
 764           // scan the remaining suffix of the object
 765           const MemRegion last_mr(right, p);
 766           assert(p == last_p + last_obj->size(), "Would miss portion of last_obj");
 767           last_obj->oop_iterate(cl, last_mr);
 768           log_debug(gc, remset)("Fixed up non-objArray suffix scan in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 769                                 p2i(last_mr.start()), p2i(last_mr.end()));
 770         } else {
 771           log_debug(gc, remset)("Skipped suffix scan of objArray in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 772                                 p2i(right), p2i(p));
 773         }
 774       }
 775       NOT_PRODUCT(stats.record_scan_obj_cnt(i);)
 776 
 777       // ==== END   DIRTY card range processing ====
 778     } else {
 779       // ==== BEGIN CLEAN card range processing ====
 780 
 781       assert(ctbm[cur_index] == CardTable::clean_card_val(), "Error");
 782       // walk back over contiguous clean cards
 783       size_t i = 0;
 784       while (--cur_index >= (ssize_t)start_card_index && ctbm[cur_index] == CardTable::clean_card_val()) {
 785         NOT_PRODUCT(i++);
 786       }
 787       // Record alternations, clean run length, and clean card count
 788       NOT_PRODUCT(stats.record_clean_run(i);)
 789 
 790       // ==== END CLEAN card range processing ====
 791     }
 792   }
 793 }
 794 
 795 // Given that this range of clusters is known to span a humongous object spanned by region r, scan the
 796 // portion of the humongous object that corresponds to the specified range.
 797 template<typename RememberedSet>
 798 template <typename ClosureType>
 799 inline void
 800 ShenandoahScanRemembered<RememberedSet>::process_humongous_clusters(ShenandoahHeapRegion* r, size_t first_cluster, size_t count,
 801                                                                     HeapWord *end_of_range, ClosureType *cl, bool use_write_table) {
 802   ShenandoahHeapRegion* start_region = r->humongous_start_region();
 803   HeapWord* p = start_region->bottom();
 804   oop obj = cast_to_oop(p);
 805   assert(r->is_humongous(), "Only process humongous regions here");
 806   assert(start_region->is_humongous_start(), "Should be start of humongous region");
 807   assert(p + obj->size() >= end_of_range, "Humongous object ends before range ends");
 808 
 809   size_t first_card_index = first_cluster * ShenandoahCardCluster<RememberedSet>::CardsPerCluster;
 810   HeapWord* first_cluster_addr = _rs->addr_for_card_index(first_card_index);
 811   size_t spanned_words = count * ShenandoahCardCluster<RememberedSet>::CardsPerCluster * CardTable::card_size_in_words();
 812   start_region->oop_iterate_humongous_slice(cl, true, first_cluster_addr, spanned_words, use_write_table);
 813 }
 814 
 815 
 816 // This method takes a region & determines the end of the region that the worker can scan.
 817 template<typename RememberedSet>
 818 template <typename ClosureType>
 819 inline void
 820 ShenandoahScanRemembered<RememberedSet>::process_region_slice(ShenandoahHeapRegion *region, size_t start_offset, size_t clusters,
 821                                                               HeapWord *end_of_range, ClosureType *cl, bool use_write_table,
 822                                                               uint worker_id) {
 823 
 824   // This is called only for young gen collection, when we scan old gen regions
 825   assert(region->is_old(), "Expecting an old region");
 826   HeapWord *start_of_range = region->bottom() + start_offset;
 827   size_t start_cluster_no = cluster_for_addr(start_of_range);
 828   assert(addr_for_cluster(start_cluster_no) == start_of_range, "process_region_slice range must align on cluster boundary");
 829 
 830   // region->end() represents the end of memory spanned by this region, but not all of this
 831   //   memory is eligible to be scanned because some of this memory has not yet been allocated.
 832   //
 833   // region->top() represents the end of allocated memory within this region.  Any addresses
 834   //   beyond region->top() should not be scanned as that memory does not hold valid objects.
 835 
 836   if (use_write_table) {
 837     // This is update-refs servicing.
 838     if (end_of_range > region->get_update_watermark()) {
 839       end_of_range = region->get_update_watermark();
 840     }
 841   } else {
 842     // This is concurrent mark servicing.  Note that TAMS for this region is TAMS at start of old-gen
 843     // collection.  Here, we need to scan up to TAMS for most recently initiated young-gen collection.
 844     // Since all LABs are retired at init mark, and since replacement LABs are allocated lazily, and since no
 845     // promotions occur until evacuation phase, TAMS for most recent young-gen is same as top().
 846     if (end_of_range > region->top()) {
 847       end_of_range = region->top();
 848     }
 849   }
 850 
 851   log_debug(gc)("Remembered set scan processing Region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT ", using %s table",
 852                 region->index(), p2i(start_of_range), p2i(end_of_range),
 853                 use_write_table? "read/write (updating)": "read (marking)");
 854 
 855   // Note that end_of_range may point to the middle of a cluster because we limit scanning to
 856   // region->top() or region->get_update_watermark(). We avoid processing past end_of_range.
 857   // Objects that start between start_of_range and end_of_range, including humongous objects, will
 858   // be fully processed by process_clusters. In no case should we need to scan past end_of_range.
 859   if (start_of_range < end_of_range) {
 860     if (region->is_humongous()) {
 861       ShenandoahHeapRegion* start_region = region->humongous_start_region();
 862       // TODO: ysr : This will be called multiple times with same start_region, but different start_cluster_no.
 863       // Check that it does the right thing here, and doesn't do redundant work. Also see if the call API/interface
 864       // can be simplified.
 865       process_humongous_clusters(start_region, start_cluster_no, clusters, end_of_range, cl, use_write_table);
 866     } else {
 867       // TODO: ysr The start_of_range calculated above is discarded and may be calculated again in process_clusters().
 868       // See if the redundant and wasted calculations can be avoided, and if the call parameters can be cleaned up.
 869       // It almost sounds like this set of methods needs a working class to stash away some useful info that can be
 870       // efficiently passed around amongst these methods, as well as related state. Note that we can't use
 871       // ShenandoahScanRemembered as there seems to be only one instance of that object for the heap which is shared
 872       // by all workers. Note that there are also task methods which call these which may have per worker storage.
 873       // We need to be careful however that if the number of workers changes dynamically that state isn't sequestered
 874       // and become obsolete.
 875       process_clusters(start_cluster_no, clusters, end_of_range, cl, use_write_table, worker_id);
 876     }
 877   }
 878 }
 879 
 880 template<typename RememberedSet>
 881 inline size_t
 882 ShenandoahScanRemembered<RememberedSet>::cluster_for_addr(HeapWordImpl **addr) {
 883   size_t card_index = _rs->card_index_for_addr(addr);
 884   size_t result = card_index / ShenandoahCardCluster<RememberedSet>::CardsPerCluster;
 885   return result;
 886 }
 887 
 888 template<typename RememberedSet>
 889 inline HeapWord*
 890 ShenandoahScanRemembered<RememberedSet>::addr_for_cluster(size_t cluster_no) {
 891   size_t card_index = cluster_no * ShenandoahCardCluster<RememberedSet>::CardsPerCluster;
 892   return addr_for_card_index(card_index);
 893 }
 894 
 895 // This is used only for debug verification so don't worry about making the scan parallel.
 896 template<typename RememberedSet>
 897 void ShenandoahScanRemembered<RememberedSet>::roots_do(OopIterateClosure* cl) {
 898   ShenandoahHeap* heap = ShenandoahHeap::heap();
 899   for (size_t i = 0, n = heap->num_regions(); i < n; ++i) {
 900     ShenandoahHeapRegion* region = heap->get_region(i);
 901     if (region->is_old() && region->is_active() && !region->is_cset()) {
 902       HeapWord* start_of_range = region->bottom();
 903       HeapWord* end_of_range = region->top();
 904       size_t start_cluster_no = cluster_for_addr(start_of_range);
 905       size_t num_heapwords = end_of_range - start_of_range;
 906       unsigned int cluster_size = CardTable::card_size_in_words() *
 907                                   ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
 908       size_t num_clusters = (size_t) ((num_heapwords - 1 + cluster_size) / cluster_size);
 909 
 910       // Remembered set scanner
 911       if (region->is_humongous()) {
 912         process_humongous_clusters(region->humongous_start_region(), start_cluster_no, num_clusters, end_of_range, cl,
 913                                    false /* use_write_table */);
 914       } else {
 915         process_clusters(start_cluster_no, num_clusters, end_of_range, cl,
 916                          false /* use_write_table */, 0 /* fake worker id */);
 917       }
 918     }
 919   }
 920 }
 921 
 922 #ifndef PRODUCT
 923 // Log given card stats
 924 template<typename RememberedSet>
 925 inline void ShenandoahScanRemembered<RememberedSet>::log_card_stats(HdrSeq* stats) {
 926   for (int i = 0; i < MAX_CARD_STAT_TYPE; i++) {
 927     log_info(gc, remset)("%18s: [ %8.2f %8.2f %8.2f %8.2f %8.2f ]",
 928       _card_stats_name[i],
 929       stats[i].percentile(0), stats[i].percentile(25),
 930       stats[i].percentile(50), stats[i].percentile(75),
 931       stats[i].maximum());
 932   }
 933 }
 934 
 935 // Log card stats for all nworkers for a specific phase t
 936 template<typename RememberedSet>
 937 void ShenandoahScanRemembered<RememberedSet>::log_card_stats(uint nworkers, CardStatLogType t) {
 938   assert(ShenandoahEnableCardStats, "Do not call");
 939   HdrSeq* sum_stats = card_stats_for_phase(t);
 940   log_info(gc, remset)("%s", _card_stat_log_type[t]);
 941   for (uint i = 0; i < nworkers; i++) {
 942     log_worker_card_stats(i, sum_stats);
 943   }
 944 
 945   // Every so often, log the cumulative global stats
 946   if (++_card_stats_log_counter[t] >= ShenandoahCardStatsLogInterval) {
 947     _card_stats_log_counter[t] = 0;
 948     log_info(gc, remset)("Cumulative stats");
 949     log_card_stats(sum_stats);
 950   }
 951 }
 952 
 953 // Log card stats for given worker_id, & clear them after merging into given cumulative stats
 954 template<typename RememberedSet>
 955 void ShenandoahScanRemembered<RememberedSet>::log_worker_card_stats(uint worker_id, HdrSeq* sum_stats) {
 956   assert(ShenandoahEnableCardStats, "Do not call");
 957 
 958   HdrSeq* worker_card_stats = card_stats(worker_id);
 959   log_info(gc, remset)("Worker %u Card Stats: ", worker_id);
 960   log_card_stats(worker_card_stats);
 961   // Merge worker stats into the cumulative stats & clear worker stats
 962   merge_worker_card_stats_cumulative(worker_card_stats, sum_stats);
 963 }
 964 
 965 template<typename RememberedSet>
 966 void ShenandoahScanRemembered<RememberedSet>::merge_worker_card_stats_cumulative(
 967   HdrSeq* worker_stats, HdrSeq* sum_stats) {
 968   for (int i = 0; i < MAX_CARD_STAT_TYPE; i++) {
 969     sum_stats[i].add(worker_stats[i]);
 970     worker_stats[i].clear();
 971   }
 972 }
 973 #endif
 974 
 975 inline bool ShenandoahRegionChunkIterator::has_next() const {
 976   return _index < _total_chunks;
 977 }
 978 
 979 inline bool ShenandoahRegionChunkIterator::next(struct ShenandoahRegionChunk *assignment) {
 980   if (_index >= _total_chunks) {
 981     return false;
 982   }
 983   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 984   if (new_index > _total_chunks) {
 985     // First worker that hits new_index == _total_chunks continues, other
 986     // contending workers return false.
 987     return false;
 988   }
 989   // convert to zero-based indexing
 990   new_index--;
 991   assert(new_index < _total_chunks, "Error");
 992 
 993   // Find the group number for the assigned chunk index
 994   size_t group_no;
 995   for (group_no = 0; new_index >= _group_entries[group_no]; group_no++)
 996     ;
 997   assert(group_no < _num_groups, "Cannot have group no greater or equal to _num_groups");
 998 
 999   // All size computations measured in HeapWord
1000   size_t region_size_words = ShenandoahHeapRegion::region_size_words();
1001   size_t group_region_index = _region_index[group_no];
1002   size_t group_region_offset = _group_offset[group_no];
1003 
1004   size_t index_within_group = (group_no == 0)? new_index: new_index - _group_entries[group_no - 1];
1005   size_t group_chunk_size = _group_chunk_size[group_no];
1006   size_t offset_of_this_chunk = group_region_offset + index_within_group * group_chunk_size;
1007   size_t regions_spanned_by_chunk_offset = offset_of_this_chunk / region_size_words;
1008   size_t offset_within_region = offset_of_this_chunk % region_size_words;
1009 
1010   size_t region_index = group_region_index + regions_spanned_by_chunk_offset;
1011 
1012   assignment->_r = _heap->get_region(region_index);
1013   assignment->_chunk_offset = offset_within_region;
1014   assignment->_chunk_size = group_chunk_size;
1015   return true;
1016 }
1017 
1018 template<class T>
1019 inline void ShenandoahVerifyNoYoungRefsClosure::work(T* p) {
1020   T o = RawAccess<>::oop_load(p);
1021   if (!CompressedOops::is_null(o)) {
1022     oop obj = CompressedOops::decode_not_null(o);
1023     assert(!_heap->is_in_young(obj), "Found a young ref");
1024   }
1025 }
1026 
1027 #endif   // SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP