1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP 27 28 #include "memory/iterator.hpp" 29 #include "oops/oop.hpp" 30 #include "oops/objArrayOop.hpp" 31 #include "gc/shared/collectorCounters.hpp" 32 #include "gc/shenandoah/shenandoahCardStats.hpp" 33 #include "gc/shenandoah/shenandoahCardTable.hpp" 34 #include "gc/shenandoah/shenandoahHeap.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 36 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 37 #include "gc/shenandoah/shenandoahScanRemembered.hpp" 38 #include "gc/shenandoah/mode/shenandoahMode.hpp" 39 #include "logging/log.hpp" 40 41 inline size_t 42 ShenandoahDirectCardMarkRememberedSet::last_valid_index() const { 43 return _card_table->last_valid_index(); 44 } 45 46 inline size_t 47 ShenandoahDirectCardMarkRememberedSet::total_cards() const { 48 return _total_card_count; 49 } 50 51 inline size_t 52 ShenandoahDirectCardMarkRememberedSet::card_index_for_addr(HeapWord *p) const { 53 return _card_table->index_for(p); 54 } 55 56 inline HeapWord* 57 ShenandoahDirectCardMarkRememberedSet::addr_for_card_index(size_t card_index) const { 58 return _whole_heap_base + CardTable::card_size_in_words() * card_index; 59 } 60 61 inline const CardValue* 62 ShenandoahDirectCardMarkRememberedSet::get_card_table_byte_map(bool use_write_table) const { 63 return use_write_table ? 64 _card_table->write_byte_map() 65 : _card_table->read_byte_map(); 66 } 67 68 inline bool 69 ShenandoahDirectCardMarkRememberedSet::is_write_card_dirty(size_t card_index) const { 70 CardValue* bp = &(_card_table->write_byte_map())[card_index]; 71 return (bp[0] == CardTable::dirty_card_val()); 72 } 73 74 inline bool 75 ShenandoahDirectCardMarkRememberedSet::is_card_dirty(size_t card_index) const { 76 CardValue* bp = &(_card_table->read_byte_map())[card_index]; 77 return (bp[0] == CardTable::dirty_card_val()); 78 } 79 80 inline void 81 ShenandoahDirectCardMarkRememberedSet::mark_card_as_dirty(size_t card_index) { 82 CardValue* bp = &(_card_table->write_byte_map())[card_index]; 83 bp[0] = CardTable::dirty_card_val(); 84 } 85 86 inline void 87 ShenandoahDirectCardMarkRememberedSet::mark_range_as_dirty(size_t card_index, size_t num_cards) { 88 CardValue* bp = &(_card_table->write_byte_map())[card_index]; 89 while (num_cards-- > 0) { 90 *bp++ = CardTable::dirty_card_val(); 91 } 92 } 93 94 inline void 95 ShenandoahDirectCardMarkRememberedSet::mark_card_as_clean(size_t card_index) { 96 CardValue* bp = &(_card_table->write_byte_map())[card_index]; 97 bp[0] = CardTable::clean_card_val(); 98 } 99 100 inline void 101 ShenandoahDirectCardMarkRememberedSet::mark_range_as_clean(size_t card_index, size_t num_cards) { 102 CardValue* bp = &(_card_table->write_byte_map())[card_index]; 103 while (num_cards-- > 0) { 104 *bp++ = CardTable::clean_card_val(); 105 } 106 } 107 108 inline bool 109 ShenandoahDirectCardMarkRememberedSet::is_card_dirty(HeapWord *p) const { 110 size_t index = card_index_for_addr(p); 111 CardValue* bp = &(_card_table->read_byte_map())[index]; 112 return (bp[0] == CardTable::dirty_card_val()); 113 } 114 115 inline void 116 ShenandoahDirectCardMarkRememberedSet::mark_card_as_dirty(HeapWord *p) { 117 size_t index = card_index_for_addr(p); 118 CardValue* bp = &(_card_table->write_byte_map())[index]; 119 bp[0] = CardTable::dirty_card_val(); 120 } 121 122 inline void 123 ShenandoahDirectCardMarkRememberedSet::mark_range_as_dirty(HeapWord *p, size_t num_heap_words) { 124 CardValue* bp = &(_card_table->write_byte_map_base())[uintptr_t(p) >> _card_shift]; 125 CardValue* end_bp = &(_card_table->write_byte_map_base())[uintptr_t(p + num_heap_words) >> _card_shift]; 126 // If (p + num_heap_words) is not aligned on card boundary, we also need to dirty last card. 127 if (((unsigned long long) (p + num_heap_words)) & (CardTable::card_size() - 1)) { 128 end_bp++; 129 } 130 while (bp < end_bp) { 131 *bp++ = CardTable::dirty_card_val(); 132 } 133 } 134 135 inline void 136 ShenandoahDirectCardMarkRememberedSet::mark_card_as_clean(HeapWord *p) { 137 size_t index = card_index_for_addr(p); 138 CardValue* bp = &(_card_table->write_byte_map())[index]; 139 bp[0] = CardTable::clean_card_val(); 140 } 141 142 inline void 143 ShenandoahDirectCardMarkRememberedSet::mark_range_as_clean(HeapWord *p, size_t num_heap_words) { 144 CardValue* bp = &(_card_table->write_byte_map_base())[uintptr_t(p) >> _card_shift]; 145 CardValue* end_bp = &(_card_table->write_byte_map_base())[uintptr_t(p + num_heap_words) >> _card_shift]; 146 // If (p + num_heap_words) is not aligned on card boundary, we also need to clean last card. 147 if (((unsigned long long) (p + num_heap_words)) & (CardTable::card_size() - 1)) { 148 end_bp++; 149 } 150 while (bp < end_bp) { 151 *bp++ = CardTable::clean_card_val(); 152 } 153 } 154 155 inline size_t 156 ShenandoahDirectCardMarkRememberedSet::cluster_count() const { 157 return _cluster_count; 158 } 159 160 // No lock required because arguments align with card boundaries. 161 template<typename RememberedSet> 162 inline void 163 ShenandoahCardCluster<RememberedSet>::reset_object_range(HeapWord* from, HeapWord* to) { 164 assert(((((unsigned long long) from) & (CardTable::card_size() - 1)) == 0) && 165 ((((unsigned long long) to) & (CardTable::card_size() - 1)) == 0), 166 "reset_object_range bounds must align with card boundaries"); 167 size_t card_at_start = _rs->card_index_for_addr(from); 168 size_t num_cards = (to - from) / CardTable::card_size_in_words(); 169 170 for (size_t i = 0; i < num_cards; i++) { 171 object_starts[card_at_start + i].short_word = 0; 172 } 173 } 174 175 // Assume only one thread at a time registers objects pertaining to 176 // each card-table entry's range of memory. 177 template<typename RememberedSet> 178 inline void 179 ShenandoahCardCluster<RememberedSet>::register_object(HeapWord* address) { 180 shenandoah_assert_heaplocked(); 181 182 register_object_without_lock(address); 183 } 184 185 template<typename RememberedSet> 186 inline void 187 ShenandoahCardCluster<RememberedSet>::register_object_without_lock(HeapWord* address) { 188 size_t card_at_start = _rs->card_index_for_addr(address); 189 HeapWord *card_start_address = _rs->addr_for_card_index(card_at_start); 190 uint8_t offset_in_card = address - card_start_address; 191 192 if (!starts_object(card_at_start)) { 193 set_starts_object_bit(card_at_start); 194 set_first_start(card_at_start, offset_in_card); 195 set_last_start(card_at_start, offset_in_card); 196 } else { 197 if (offset_in_card < get_first_start(card_at_start)) 198 set_first_start(card_at_start, offset_in_card); 199 if (offset_in_card > get_last_start(card_at_start)) 200 set_last_start(card_at_start, offset_in_card); 201 } 202 } 203 204 template<typename RememberedSet> 205 inline void 206 ShenandoahCardCluster<RememberedSet>::coalesce_objects(HeapWord* address, size_t length_in_words) { 207 208 size_t card_at_start = _rs->card_index_for_addr(address); 209 HeapWord *card_start_address = _rs->addr_for_card_index(card_at_start); 210 size_t card_at_end = card_at_start + ((address + length_in_words) - card_start_address) / CardTable::card_size_in_words(); 211 212 if (card_at_start == card_at_end) { 213 // There are no changes to the get_first_start array. Either get_first_start(card_at_start) returns this coalesced object, 214 // or it returns an object that precedes the coalesced object. 215 if (card_start_address + get_last_start(card_at_start) < address + length_in_words) { 216 uint8_t coalesced_offset = static_cast<uint8_t>(address - card_start_address); 217 // The object that used to be the last object starting within this card is being subsumed within the coalesced 218 // object. Since we always coalesce entire objects, this condition only occurs if the last object ends before or at 219 // the end of the card's memory range and there is no object following this object. In this case, adjust last_start 220 // to represent the start of the coalesced range. 221 set_last_start(card_at_start, coalesced_offset); 222 } 223 // Else, no changes to last_starts information. Either get_last_start(card_at_start) returns the object that immediately 224 // follows the coalesced object, or it returns an object that follows the object immediately following the coalesced object. 225 } else { 226 uint8_t coalesced_offset = static_cast<uint8_t>(address - card_start_address); 227 if (get_last_start(card_at_start) > coalesced_offset) { 228 // Existing last start is being coalesced, create new last start 229 set_last_start(card_at_start, coalesced_offset); 230 } 231 // otherwise, get_last_start(card_at_start) must equal coalesced_offset 232 233 // All the cards between first and last get cleared. 234 for (size_t i = card_at_start + 1; i < card_at_end; i++) { 235 clear_starts_object_bit(i); 236 } 237 238 uint8_t follow_offset = static_cast<uint8_t>((address + length_in_words) - _rs->addr_for_card_index(card_at_end)); 239 if (starts_object(card_at_end) && (get_first_start(card_at_end) < follow_offset)) { 240 // It may be that after coalescing within this last card's memory range, the last card 241 // no longer holds an object. 242 if (get_last_start(card_at_end) >= follow_offset) { 243 set_first_start(card_at_end, follow_offset); 244 } else { 245 // last_start is being coalesced so this card no longer has any objects. 246 clear_starts_object_bit(card_at_end); 247 } 248 } 249 // else 250 // card_at_end did not have an object, so it still does not have an object, or 251 // card_at_end had an object that starts after the coalesced object, so no changes required for card_at_end 252 253 } 254 } 255 256 257 template<typename RememberedSet> 258 inline size_t 259 ShenandoahCardCluster<RememberedSet>::get_first_start(size_t card_index) const { 260 assert(starts_object(card_index), "Can't get first start because no object starts here"); 261 return object_starts[card_index].offsets.first & FirstStartBits; 262 } 263 264 template<typename RememberedSet> 265 inline size_t 266 ShenandoahCardCluster<RememberedSet>::get_last_start(size_t card_index) const { 267 assert(starts_object(card_index), "Can't get last start because no object starts here"); 268 return object_starts[card_index].offsets.last; 269 } 270 271 // Given a card_index, return the starting address of the first block in the heap 272 // that straddles into this card. If this card is co-initial with an object, then 273 // this would return the first address of the range that this card covers, which is 274 // where the card's first object also begins. 275 // TODO: collect some stats for the size of walks backward over cards. 276 // For larger objects, a logarithmic BOT such as used by G1 might make the 277 // backwards walk potentially faster. 278 template<typename RememberedSet> 279 HeapWord* 280 ShenandoahCardCluster<RememberedSet>::block_start(const size_t card_index) const { 281 282 HeapWord* left = _rs->addr_for_card_index(card_index); 283 284 #ifdef ASSERT 285 assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode"); 286 ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left); 287 assert(region->is_old(), "Do not use for young regions"); 288 // For HumongousRegion:s it's more efficient to jump directly to the 289 // start region. 290 assert(!region->is_humongous(), "Use region->humongous_start_region() instead"); 291 #endif 292 if (starts_object(card_index) && get_first_start(card_index) == 0) { 293 // This card contains a co-initial object; a fortiori, it covers 294 // also the case of a card being the first in a region. 295 assert(oopDesc::is_oop(cast_to_oop(left)), "Should be an object"); 296 return left; 297 } 298 299 HeapWord* p = nullptr; 300 oop obj = cast_to_oop(p); 301 ssize_t cur_index = (ssize_t)card_index; 302 assert(cur_index >= 0, "Overflow"); 303 assert(cur_index > 0, "Should have returned above"); 304 // Walk backwards over the cards... 305 while (--cur_index > 0 && !starts_object(cur_index)) { 306 // ... to the one that starts the object 307 } 308 // cur_index should start an object: we should not have walked 309 // past the left end of the region. 310 assert(cur_index >= 0 && (cur_index <= (ssize_t)card_index), "Error"); 311 assert(region->bottom() <= _rs->addr_for_card_index(cur_index), 312 "Fell off the bottom of containing region"); 313 assert(starts_object(cur_index), "Error"); 314 size_t offset = get_last_start(cur_index); 315 // can avoid call via card size arithmetic below instead 316 p = _rs->addr_for_card_index(cur_index) + offset; 317 // Recall that we already dealt with the co-initial object case above 318 assert(p < left, "obj should start before left"); 319 // While it is safe to ask an object its size in the loop that 320 // follows, the (ifdef'd out) loop should never be needed. 321 // 1. we ask this question only for regions in the old generation 322 // 2. there is no direct allocation ever by mutators in old generation 323 // regions. Only GC will ever allocate in old regions, and then 324 // too only during promotion/evacuation phases. Thus there is no danger 325 // of races between reading from and writing to the object start array, 326 // or of asking partially initialized objects their size (in the loop below). 327 // 3. only GC asks this question during phases when it is not concurrently 328 // evacuating/promoting, viz. during concurrent root scanning (before 329 // the evacuation phase) and during concurrent update refs (after the 330 // evacuation phase) of young collections. This is never called 331 // during old or global collections. 332 // 4. Every allocation under TAMS updates the object start array. 333 NOT_PRODUCT(obj = cast_to_oop(p);) 334 assert(oopDesc::is_oop(obj), "Should be an object"); 335 #define WALK_FORWARD_IN_BLOCK_START false 336 while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) { 337 p += obj->size(); 338 } 339 #undef WALK_FORWARD_IN_BLOCK_START // false 340 assert(p + obj->size() > left, "obj should end after left"); 341 return p; 342 } 343 344 template<typename RememberedSet> 345 inline size_t 346 ShenandoahScanRemembered<RememberedSet>::last_valid_index() { return _rs->last_valid_index(); } 347 348 template<typename RememberedSet> 349 inline size_t 350 ShenandoahScanRemembered<RememberedSet>::total_cards() { return _rs->total_cards(); } 351 352 template<typename RememberedSet> 353 inline size_t 354 ShenandoahScanRemembered<RememberedSet>::card_index_for_addr(HeapWord *p) { return _rs->card_index_for_addr(p); } 355 356 template<typename RememberedSet> 357 inline HeapWord * 358 ShenandoahScanRemembered<RememberedSet>::addr_for_card_index(size_t card_index) { return _rs->addr_for_card_index(card_index); } 359 360 template<typename RememberedSet> 361 inline bool 362 ShenandoahScanRemembered<RememberedSet>::is_card_dirty(size_t card_index) { return _rs->is_card_dirty(card_index); } 363 364 template<typename RememberedSet> 365 inline void 366 ShenandoahScanRemembered<RememberedSet>::mark_card_as_dirty(size_t card_index) { _rs->mark_card_as_dirty(card_index); } 367 368 template<typename RememberedSet> 369 inline void 370 ShenandoahScanRemembered<RememberedSet>::mark_range_as_dirty(size_t card_index, size_t num_cards) { _rs->mark_range_as_dirty(card_index, num_cards); } 371 372 template<typename RememberedSet> 373 inline void 374 ShenandoahScanRemembered<RememberedSet>::mark_card_as_clean(size_t card_index) { _rs->mark_card_as_clean(card_index); } 375 376 template<typename RememberedSet> 377 inline void 378 ShenandoahScanRemembered<RememberedSet>::mark_range_as_clean(size_t card_index, size_t num_cards) { _rs->mark_range_as_clean(card_index, num_cards); } 379 380 template<typename RememberedSet> 381 inline bool 382 ShenandoahScanRemembered<RememberedSet>::is_card_dirty(HeapWord *p) { return _rs->is_card_dirty(p); } 383 384 template<typename RememberedSet> 385 inline void 386 ShenandoahScanRemembered<RememberedSet>::mark_card_as_dirty(HeapWord *p) { _rs->mark_card_as_dirty(p); } 387 388 template<typename RememberedSet> 389 inline void 390 ShenandoahScanRemembered<RememberedSet>::mark_range_as_dirty(HeapWord *p, size_t num_heap_words) { _rs->mark_range_as_dirty(p, num_heap_words); } 391 392 template<typename RememberedSet> 393 inline void 394 ShenandoahScanRemembered<RememberedSet>::mark_card_as_clean(HeapWord *p) { _rs->mark_card_as_clean(p); } 395 396 template<typename RememberedSet> 397 inline void 398 ShenandoahScanRemembered<RememberedSet>:: mark_range_as_clean(HeapWord *p, size_t num_heap_words) { _rs->mark_range_as_clean(p, num_heap_words); } 399 400 template<typename RememberedSet> 401 inline size_t 402 ShenandoahScanRemembered<RememberedSet>::cluster_count() { return _rs->cluster_count(); } 403 404 template<typename RememberedSet> 405 inline void 406 ShenandoahScanRemembered<RememberedSet>::reset_object_range(HeapWord *from, HeapWord *to) { 407 _scc->reset_object_range(from, to); 408 } 409 410 template<typename RememberedSet> 411 inline void 412 ShenandoahScanRemembered<RememberedSet>::register_object(HeapWord *addr) { 413 _scc->register_object(addr); 414 } 415 416 template<typename RememberedSet> 417 inline void 418 ShenandoahScanRemembered<RememberedSet>::register_object_without_lock(HeapWord *addr) { 419 _scc->register_object_without_lock(addr); 420 } 421 422 template <typename RememberedSet> 423 inline bool 424 ShenandoahScanRemembered<RememberedSet>::verify_registration(HeapWord* address, ShenandoahMarkingContext* ctx) { 425 426 size_t index = card_index_for_addr(address); 427 if (!_scc->starts_object(index)) { 428 return false; 429 } 430 HeapWord* base_addr = addr_for_card_index(index); 431 size_t offset = _scc->get_first_start(index); 432 ShenandoahHeap* heap = ShenandoahHeap::heap(); 433 434 // Verify that I can find this object within its enclosing card by scanning forward from first_start. 435 while (base_addr + offset < address) { 436 oop obj = cast_to_oop(base_addr + offset); 437 if (!ctx || ctx->is_marked(obj)) { 438 offset += obj->size(); 439 } else { 440 // If this object is not live, don't trust its size(); all objects above tams are live. 441 ShenandoahHeapRegion* r = heap->heap_region_containing(obj); 442 HeapWord* tams = ctx->top_at_mark_start(r); 443 offset = ctx->get_next_marked_addr(base_addr + offset, tams) - base_addr; 444 } 445 } 446 if (base_addr + offset != address){ 447 return false; 448 } 449 450 // At this point, offset represents object whose registration we are verifying. We know that at least this object resides 451 // within this card's memory. 452 453 // Make sure that last_offset is properly set for the enclosing card, but we can't verify this for 454 // candidate collection-set regions during mixed evacuations, so disable this check in general 455 // during mixed evacuations. 456 457 ShenandoahHeapRegion* r = heap->heap_region_containing(base_addr + offset); 458 size_t max_offset = r->top() - base_addr; 459 if (max_offset > CardTable::card_size_in_words()) { 460 max_offset = CardTable::card_size_in_words(); 461 } 462 size_t prev_offset; 463 if (!ctx) { 464 do { 465 oop obj = cast_to_oop(base_addr + offset); 466 prev_offset = offset; 467 offset += obj->size(); 468 } while (offset < max_offset); 469 if (_scc->get_last_start(index) != prev_offset) { 470 return false; 471 } 472 473 // base + offset represents address of first object that starts on following card, if there is one. 474 475 // Notes: base_addr is addr_for_card_index(index) 476 // base_addr + offset is end of the object we are verifying 477 // cannot use card_index_for_addr(base_addr + offset) because it asserts arg < end of whole heap 478 size_t end_card_index = index + offset / CardTable::card_size_in_words(); 479 480 if (end_card_index > index && end_card_index <= _rs->last_valid_index()) { 481 // If there is a following object registered on the next card, it should begin where this object ends. 482 if (_scc->starts_object(end_card_index) && 483 ((addr_for_card_index(end_card_index) + _scc->get_first_start(end_card_index)) != (base_addr + offset))) { 484 return false; 485 } 486 } 487 488 // Assure that no other objects are registered "inside" of this one. 489 for (index++; index < end_card_index; index++) { 490 if (_scc->starts_object(index)) { 491 return false; 492 } 493 } 494 } else { 495 // This is a mixed evacuation or a global collect: rely on mark bits to identify which objects need to be properly registered 496 assert(!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Cannot rely on mark context here."); 497 // If the object reaching or spanning the end of this card's memory is marked, then last_offset for this card 498 // should represent this object. Otherwise, last_offset is a don't care. 499 ShenandoahHeapRegion* region = heap->heap_region_containing(base_addr + offset); 500 HeapWord* tams = ctx->top_at_mark_start(region); 501 oop last_obj = nullptr; 502 do { 503 oop obj = cast_to_oop(base_addr + offset); 504 if (ctx->is_marked(obj)) { 505 prev_offset = offset; 506 offset += obj->size(); 507 last_obj = obj; 508 } else { 509 offset = ctx->get_next_marked_addr(base_addr + offset, tams) - base_addr; 510 // If there are no marked objects remaining in this region, offset equals tams - base_addr. If this offset is 511 // greater than max_offset, we will immediately exit this loop. Otherwise, the next iteration of the loop will 512 // treat the object at offset as marked and live (because address >= tams) and we will continue iterating object 513 // by consulting the size() fields of each. 514 } 515 } while (offset < max_offset); 516 if (last_obj != nullptr && prev_offset + last_obj->size() >= max_offset) { 517 // last marked object extends beyond end of card 518 if (_scc->get_last_start(index) != prev_offset) { 519 return false; 520 } 521 // otherwise, the value of _scc->get_last_start(index) is a don't care because it represents a dead object and we 522 // cannot verify its context 523 } 524 } 525 return true; 526 } 527 528 template<typename RememberedSet> 529 inline void 530 ShenandoahScanRemembered<RememberedSet>::coalesce_objects(HeapWord *addr, size_t length_in_words) { 531 _scc->coalesce_objects(addr, length_in_words); 532 } 533 534 template<typename RememberedSet> 535 inline void 536 ShenandoahScanRemembered<RememberedSet>::mark_range_as_empty(HeapWord *addr, size_t length_in_words) { 537 _rs->mark_range_as_clean(addr, length_in_words); 538 _scc->clear_objects_in_range(addr, length_in_words); 539 } 540 541 // Process all objects starting within count clusters beginning with first_cluster and for which the start address is 542 // less than end_of_range. For any non-array object whose header lies on a dirty card, scan the entire object, 543 // even if its end reaches beyond end_of_range. Object arrays, on the other hand, are precisely dirtied and 544 // only the portions of the array on dirty cards need to be scanned. 545 // 546 // Do not CANCEL within process_clusters. It is assumed that if a worker thread accepts responsibility for processing 547 // a chunk of work, it will finish the work it starts. Otherwise, the chunk of work will be lost in the transition to 548 // degenerated execution, leading to dangling references. 549 template<typename RememberedSet> 550 template <typename ClosureType> 551 void ShenandoahScanRemembered<RememberedSet>::process_clusters(size_t first_cluster, size_t count, HeapWord* end_of_range, 552 ClosureType* cl, bool use_write_table, uint worker_id) { 553 554 assert(ShenandoahHeap::heap()->old_generation()->is_parseable(), "Old generation regions must be parseable for remembered set scan"); 555 // If old-gen evacuation is active, then MarkingContext for old-gen heap regions is valid. We use the MarkingContext 556 // bits to determine which objects within a DIRTY card need to be scanned. This is necessary because old-gen heap 557 // regions that are in the candidate collection set have not been coalesced and filled. Thus, these heap regions 558 // may contain zombie objects. Zombie objects are known to be dead, but have not yet been "collected". Scanning 559 // zombie objects is unsafe because the Klass pointer is not reliable, objects referenced from a zombie may have been 560 // collected (if dead), or relocated (if live), or if dead but not yet collected, we don't want to "revive" them 561 // by marking them (when marking) or evacuating them (when updating references). 562 563 // start and end addresses of range of objects to be scanned, clipped to end_of_range 564 const size_t start_card_index = first_cluster * ShenandoahCardCluster<RememberedSet>::CardsPerCluster; 565 const HeapWord* start_addr = _rs->addr_for_card_index(start_card_index); 566 // clip at end_of_range (exclusive) 567 HeapWord* end_addr = MIN2(end_of_range, (HeapWord*)start_addr + (count * ShenandoahCardCluster<RememberedSet>::CardsPerCluster 568 * CardTable::card_size_in_words())); 569 assert(start_addr < end_addr, "Empty region?"); 570 571 const size_t whole_cards = (end_addr - start_addr + CardTable::card_size_in_words() - 1)/CardTable::card_size_in_words(); 572 const size_t end_card_index = start_card_index + whole_cards - 1; 573 log_debug(gc, remset)("Worker %u: cluster = " SIZE_FORMAT " count = " SIZE_FORMAT " eor = " INTPTR_FORMAT 574 " start_addr = " INTPTR_FORMAT " end_addr = " INTPTR_FORMAT " cards = " SIZE_FORMAT, 575 worker_id, first_cluster, count, p2i(end_of_range), p2i(start_addr), p2i(end_addr), whole_cards); 576 577 // use_write_table states whether we are using the card table that is being 578 // marked by the mutators. If false, we are using a snapshot of the card table 579 // that is not subject to modifications. Even when this arg is true, and 580 // the card table is being actively marked, SATB marking ensures that we need not 581 // worry about cards marked after the processing here has passed them. 582 const CardValue* const ctbm = _rs->get_card_table_byte_map(use_write_table); 583 584 // If old gen evacuation is active, ctx will hold the completed marking of 585 // old generation objects. We'll only scan objects that are marked live by 586 // the old generation marking. These include objects allocated since the 587 // start of old generation marking (being those above TAMS). 588 const ShenandoahHeap* heap = ShenandoahHeap::heap(); 589 const ShenandoahMarkingContext* ctx = heap->old_generation()->is_mark_complete() ? 590 heap->marking_context() : nullptr; 591 592 // The region we will scan is the half-open interval [start_addr, end_addr), 593 // and lies entirely within a single region. 594 const ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(start_addr); 595 assert(region->contains(end_addr - 1), "Slice shouldn't cross regions"); 596 597 // This code may have implicit assumptions of examining only old gen regions. 598 assert(region->is_old(), "We only expect to be processing old regions"); 599 assert(!region->is_humongous(), "Humongous regions can be processed more efficiently;" 600 "see process_humongous_clusters()"); 601 // tams and ctx below are for old generation marking. As such, young gen roots must 602 // consider everything above tams, since it doesn't represent a TAMS for young gen's 603 // SATB marking. 604 const HeapWord* tams = (ctx == nullptr ? region->bottom() : ctx->top_at_mark_start(region)); 605 606 NOT_PRODUCT(ShenandoahCardStats stats(whole_cards, card_stats(worker_id));) 607 608 // In the case of imprecise marking, we remember the lowest address 609 // scanned in a range of dirty cards, as we work our way left from the 610 // highest end_addr. This serves as another upper bound on the address we will 611 // scan as we move left over each contiguous range of dirty cards. 612 HeapWord* upper_bound = nullptr; 613 614 // Starting at the right end of the address range, walk backwards accumulating 615 // a maximal dirty range of cards, then process those cards. 616 ssize_t cur_index = (ssize_t) end_card_index; 617 assert(cur_index >= 0, "Overflow"); 618 assert(((ssize_t)start_card_index) >= 0, "Overflow"); 619 while (cur_index >= (ssize_t)start_card_index) { 620 621 // We'll continue the search starting with the card for the upper bound 622 // address identified by the last dirty range that we processed, if any, 623 // skipping any cards at higher addresses. 624 if (upper_bound != nullptr) { 625 ssize_t right_index = _rs->card_index_for_addr(upper_bound); 626 assert(right_index >= 0, "Overflow"); 627 cur_index = MIN2(cur_index, right_index); 628 assert(upper_bound < end_addr, "Program logic"); 629 end_addr = upper_bound; // lower end_addr 630 upper_bound = nullptr; // and clear upper_bound 631 if (end_addr <= start_addr) { 632 assert(right_index <= (ssize_t)start_card_index, "Program logic"); 633 // We are done with our cluster 634 return; 635 } 636 } 637 638 if (ctbm[cur_index] == CardTable::dirty_card_val()) { 639 // ==== BEGIN DIRTY card range processing ==== 640 641 const size_t dirty_r = cur_index; // record right end of dirty range (inclusive) 642 while (--cur_index >= (ssize_t)start_card_index && ctbm[cur_index] == CardTable::dirty_card_val()) { 643 // walk back over contiguous dirty cards to find left end of dirty range (inclusive) 644 } 645 // [dirty_l, dirty_r] is a "maximal" closed interval range of dirty card indices: 646 // it may not be maximal if we are using the write_table, because of concurrent 647 // mutations dirtying the card-table. It may also not be maximal if an upper bound 648 // was established by the scan of the previous chunk. 649 const size_t dirty_l = cur_index + 1; // record left end of dirty range (inclusive) 650 // Check that we identified a boundary on our left 651 assert(ctbm[dirty_l] == CardTable::dirty_card_val(), "First card in range should be dirty"); 652 assert(dirty_l == start_card_index || use_write_table 653 || ctbm[dirty_l - 1] == CardTable::clean_card_val(), 654 "Interval isn't maximal on the left"); 655 assert(dirty_r >= dirty_l, "Error"); 656 assert(ctbm[dirty_r] == CardTable::dirty_card_val(), "Last card in range should be dirty"); 657 // Record alternations, dirty run length, and dirty card count 658 NOT_PRODUCT(stats.record_dirty_run(dirty_r - dirty_l + 1);) 659 660 // Find first object that starts this range: 661 // [left, right) is a maximal right-open interval of dirty cards 662 HeapWord* left = _rs->addr_for_card_index(dirty_l); // inclusive 663 HeapWord* right = _rs->addr_for_card_index(dirty_r + 1); // exclusive 664 // Clip right to end_addr established above (still exclusive) 665 right = MIN2(right, end_addr); 666 assert(right <= region->top() && end_addr <= region->top(), "Busted bounds"); 667 const MemRegion mr(left, right); 668 669 // NOTE: We'll not call block_start() repeatedly 670 // on a very large object if its head card is dirty. If not, 671 // (i.e. the head card is clean) we'll call it each time we 672 // process a new dirty range on the object. This is always 673 // the case for large object arrays, which are typically more 674 // common. 675 // TODO: It is worthwhile to memoize this, so as to avoid that 676 // overhead, and it is easy to do, but deferred to a follow-up. 677 HeapWord* p = _scc->block_start(dirty_l); 678 oop obj = cast_to_oop(p); 679 680 // PREFIX: The object that straddles into this range of dirty cards 681 // from the left may be subject to special treatment unless 682 // it is an object array. 683 if (p < left && !obj->is_objArray()) { 684 // The mutator (both compiler and interpreter, but not JNI?) 685 // typically dirty imprecisely (i.e. only the head of an object), 686 // but GC closures typically dirty the object precisely. (It would 687 // be nice to have everything be precise for maximum efficiency.) 688 // 689 // To handle this, we check the head card of the object here and, 690 // if dirty, (arrange to) scan the object in its entirety. If we 691 // find the head card clean, we'll scan only the portion of the 692 // object lying in the dirty card range below, assuming this was 693 // the result of precise marking by GC closures. 694 695 // index of the "head card" for p 696 const size_t hc_index = _rs->card_index_for_addr(p); 697 if (ctbm[hc_index] == CardTable::dirty_card_val()) { 698 // Scan or skip the object, depending on location of its 699 // head card, and remember that we'll have processed all 700 // the objects back up to p, which is thus an upper bound 701 // for the next iteration of a dirty card loop. 702 upper_bound = p; // remember upper bound for next chunk 703 if (p < start_addr) { 704 // if object starts in a previous slice, it'll be handled 705 // in its entirety by the thread processing that slice; we can 706 // skip over it and avoid an unnecessary extra scan. 707 assert(obj == cast_to_oop(p), "Inconsistency detected"); 708 p += obj->size(); 709 } else { 710 // the object starts in our slice, we scan it in its entirety 711 assert(obj == cast_to_oop(p), "Inconsistency detected"); 712 if (ctx == nullptr || ctx->is_marked(obj)) { 713 // Scan the object in its entirety 714 p += obj->oop_iterate_size(cl); 715 } else { 716 assert(p < tams, "Error 1 in ctx/marking/tams logic"); 717 // Skip over any intermediate dead objects 718 p = ctx->get_next_marked_addr(p, tams); 719 assert(p <= tams, "Error 2 in ctx/marking/tams logic"); 720 } 721 } 722 assert(p > left, "Should have processed into interior of dirty range"); 723 } 724 } 725 726 size_t i = 0; 727 HeapWord* last_p = nullptr; 728 729 // BODY: Deal with (other) objects in this dirty card range 730 while (p < right) { 731 obj = cast_to_oop(p); 732 // walk right scanning eligible objects 733 if (ctx == nullptr || ctx->is_marked(obj)) { 734 // we need to remember the last object ptr we scanned, in case we need to 735 // complete a partial suffix scan after mr, see below 736 last_p = p; 737 // apply the closure to the oops in the portion of 738 // the object within mr. 739 p += obj->oop_iterate_size(cl, mr); 740 NOT_PRODUCT(i++); 741 } else { 742 // forget the last object pointer we remembered 743 last_p = nullptr; 744 assert(p < tams, "Tams and above are implicitly marked in ctx"); 745 // object under tams isn't marked: skip to next live object 746 p = ctx->get_next_marked_addr(p, tams); 747 assert(p <= tams, "Error 3 in ctx/marking/tams logic"); 748 } 749 } 750 751 // TODO: if an objArray then only use mr, else just iterate over entire object; 752 // that would avoid the special treatment of suffix below. 753 754 // SUFFIX: Fix up a possible incomplete scan at right end of window 755 // by scanning the portion of a non-objArray that wasn't done. 756 if (p > right && last_p != nullptr) { 757 assert(last_p < right, "Error"); 758 // check if last_p suffix needs scanning 759 const oop last_obj = cast_to_oop(last_p); 760 if (!last_obj->is_objArray()) { 761 // scan the remaining suffix of the object 762 const MemRegion last_mr(right, p); 763 assert(p == last_p + last_obj->size(), "Would miss portion of last_obj"); 764 last_obj->oop_iterate(cl, last_mr); 765 log_debug(gc, remset)("Fixed up non-objArray suffix scan in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 766 p2i(last_mr.start()), p2i(last_mr.end())); 767 } else { 768 log_debug(gc, remset)("Skipped suffix scan of objArray in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 769 p2i(right), p2i(p)); 770 } 771 } 772 NOT_PRODUCT(stats.record_scan_obj_cnt(i);) 773 774 // ==== END DIRTY card range processing ==== 775 } else { 776 // ==== BEGIN CLEAN card range processing ==== 777 778 // If we are using the write table (during update refs, e.g.), a mutator may dirty 779 // a card at any time. This is fine for the algorithm below because it is only 780 // counting contiguous runs of clean cards (and only for non-product builds). 781 assert(use_write_table || ctbm[cur_index] == CardTable::clean_card_val(), "Error"); 782 783 // walk back over contiguous clean cards 784 size_t i = 0; 785 while (--cur_index >= (ssize_t)start_card_index && ctbm[cur_index] == CardTable::clean_card_val()) { 786 NOT_PRODUCT(i++); 787 } 788 // Record alternations, clean run length, and clean card count 789 NOT_PRODUCT(stats.record_clean_run(i);) 790 791 // ==== END CLEAN card range processing ==== 792 } 793 } 794 } 795 796 // Given that this range of clusters is known to span a humongous object spanned by region r, scan the 797 // portion of the humongous object that corresponds to the specified range. 798 template<typename RememberedSet> 799 template <typename ClosureType> 800 inline void 801 ShenandoahScanRemembered<RememberedSet>::process_humongous_clusters(ShenandoahHeapRegion* r, size_t first_cluster, size_t count, 802 HeapWord *end_of_range, ClosureType *cl, bool use_write_table) { 803 ShenandoahHeapRegion* start_region = r->humongous_start_region(); 804 HeapWord* p = start_region->bottom(); 805 oop obj = cast_to_oop(p); 806 assert(r->is_humongous(), "Only process humongous regions here"); 807 assert(start_region->is_humongous_start(), "Should be start of humongous region"); 808 assert(p + obj->size() >= end_of_range, "Humongous object ends before range ends"); 809 810 size_t first_card_index = first_cluster * ShenandoahCardCluster<RememberedSet>::CardsPerCluster; 811 HeapWord* first_cluster_addr = _rs->addr_for_card_index(first_card_index); 812 size_t spanned_words = count * ShenandoahCardCluster<RememberedSet>::CardsPerCluster * CardTable::card_size_in_words(); 813 start_region->oop_iterate_humongous_slice(cl, true, first_cluster_addr, spanned_words, use_write_table); 814 } 815 816 817 // This method takes a region & determines the end of the region that the worker can scan. 818 template<typename RememberedSet> 819 template <typename ClosureType> 820 inline void 821 ShenandoahScanRemembered<RememberedSet>::process_region_slice(ShenandoahHeapRegion *region, size_t start_offset, size_t clusters, 822 HeapWord *end_of_range, ClosureType *cl, bool use_write_table, 823 uint worker_id) { 824 825 // This is called only for young gen collection, when we scan old gen regions 826 assert(region->is_old(), "Expecting an old region"); 827 HeapWord *start_of_range = region->bottom() + start_offset; 828 size_t start_cluster_no = cluster_for_addr(start_of_range); 829 assert(addr_for_cluster(start_cluster_no) == start_of_range, "process_region_slice range must align on cluster boundary"); 830 831 // region->end() represents the end of memory spanned by this region, but not all of this 832 // memory is eligible to be scanned because some of this memory has not yet been allocated. 833 // 834 // region->top() represents the end of allocated memory within this region. Any addresses 835 // beyond region->top() should not be scanned as that memory does not hold valid objects. 836 837 if (use_write_table) { 838 // This is update-refs servicing. 839 if (end_of_range > region->get_update_watermark()) { 840 end_of_range = region->get_update_watermark(); 841 } 842 } else { 843 // This is concurrent mark servicing. Note that TAMS for this region is TAMS at start of old-gen 844 // collection. Here, we need to scan up to TAMS for most recently initiated young-gen collection. 845 // Since all LABs are retired at init mark, and since replacement LABs are allocated lazily, and since no 846 // promotions occur until evacuation phase, TAMS for most recent young-gen is same as top(). 847 if (end_of_range > region->top()) { 848 end_of_range = region->top(); 849 } 850 } 851 852 log_debug(gc)("Remembered set scan processing Region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT ", using %s table", 853 region->index(), p2i(start_of_range), p2i(end_of_range), 854 use_write_table? "read/write (updating)": "read (marking)"); 855 856 // Note that end_of_range may point to the middle of a cluster because we limit scanning to 857 // region->top() or region->get_update_watermark(). We avoid processing past end_of_range. 858 // Objects that start between start_of_range and end_of_range, including humongous objects, will 859 // be fully processed by process_clusters. In no case should we need to scan past end_of_range. 860 if (start_of_range < end_of_range) { 861 if (region->is_humongous()) { 862 ShenandoahHeapRegion* start_region = region->humongous_start_region(); 863 // TODO: ysr : This will be called multiple times with same start_region, but different start_cluster_no. 864 // Check that it does the right thing here, and doesn't do redundant work. Also see if the call API/interface 865 // can be simplified. 866 process_humongous_clusters(start_region, start_cluster_no, clusters, end_of_range, cl, use_write_table); 867 } else { 868 // TODO: ysr The start_of_range calculated above is discarded and may be calculated again in process_clusters(). 869 // See if the redundant and wasted calculations can be avoided, and if the call parameters can be cleaned up. 870 // It almost sounds like this set of methods needs a working class to stash away some useful info that can be 871 // efficiently passed around amongst these methods, as well as related state. Note that we can't use 872 // ShenandoahScanRemembered as there seems to be only one instance of that object for the heap which is shared 873 // by all workers. Note that there are also task methods which call these which may have per worker storage. 874 // We need to be careful however that if the number of workers changes dynamically that state isn't sequestered 875 // and become obsolete. 876 process_clusters(start_cluster_no, clusters, end_of_range, cl, use_write_table, worker_id); 877 } 878 } 879 } 880 881 template<typename RememberedSet> 882 inline size_t 883 ShenandoahScanRemembered<RememberedSet>::cluster_for_addr(HeapWordImpl **addr) { 884 size_t card_index = _rs->card_index_for_addr(addr); 885 size_t result = card_index / ShenandoahCardCluster<RememberedSet>::CardsPerCluster; 886 return result; 887 } 888 889 template<typename RememberedSet> 890 inline HeapWord* 891 ShenandoahScanRemembered<RememberedSet>::addr_for_cluster(size_t cluster_no) { 892 size_t card_index = cluster_no * ShenandoahCardCluster<RememberedSet>::CardsPerCluster; 893 return addr_for_card_index(card_index); 894 } 895 896 // This is used only for debug verification so don't worry about making the scan parallel. 897 template<typename RememberedSet> 898 void ShenandoahScanRemembered<RememberedSet>::roots_do(OopIterateClosure* cl) { 899 ShenandoahHeap* heap = ShenandoahHeap::heap(); 900 bool old_bitmap_stable = heap->old_generation()->is_mark_complete(); 901 log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); 902 for (size_t i = 0, n = heap->num_regions(); i < n; ++i) { 903 ShenandoahHeapRegion* region = heap->get_region(i); 904 if (region->is_old() && region->is_active() && !region->is_cset()) { 905 HeapWord* start_of_range = region->bottom(); 906 HeapWord* end_of_range = region->top(); 907 size_t start_cluster_no = cluster_for_addr(start_of_range); 908 size_t num_heapwords = end_of_range - start_of_range; 909 unsigned int cluster_size = CardTable::card_size_in_words() * 910 ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster; 911 size_t num_clusters = (size_t) ((num_heapwords - 1 + cluster_size) / cluster_size); 912 913 // Remembered set scanner 914 if (region->is_humongous()) { 915 process_humongous_clusters(region->humongous_start_region(), start_cluster_no, num_clusters, end_of_range, cl, 916 false /* use_write_table */); 917 } else { 918 process_clusters(start_cluster_no, num_clusters, end_of_range, cl, 919 false /* use_write_table */, 0 /* fake worker id */); 920 } 921 } 922 } 923 } 924 925 #ifndef PRODUCT 926 // Log given card stats 927 template<typename RememberedSet> 928 inline void ShenandoahScanRemembered<RememberedSet>::log_card_stats(HdrSeq* stats) { 929 for (int i = 0; i < MAX_CARD_STAT_TYPE; i++) { 930 log_info(gc, remset)("%18s: [ %8.2f %8.2f %8.2f %8.2f %8.2f ]", 931 _card_stats_name[i], 932 stats[i].percentile(0), stats[i].percentile(25), 933 stats[i].percentile(50), stats[i].percentile(75), 934 stats[i].maximum()); 935 } 936 } 937 938 // Log card stats for all nworkers for a specific phase t 939 template<typename RememberedSet> 940 void ShenandoahScanRemembered<RememberedSet>::log_card_stats(uint nworkers, CardStatLogType t) { 941 assert(ShenandoahEnableCardStats, "Do not call"); 942 HdrSeq* sum_stats = card_stats_for_phase(t); 943 log_info(gc, remset)("%s", _card_stat_log_type[t]); 944 for (uint i = 0; i < nworkers; i++) { 945 log_worker_card_stats(i, sum_stats); 946 } 947 948 // Every so often, log the cumulative global stats 949 if (++_card_stats_log_counter[t] >= ShenandoahCardStatsLogInterval) { 950 _card_stats_log_counter[t] = 0; 951 log_info(gc, remset)("Cumulative stats"); 952 log_card_stats(sum_stats); 953 } 954 } 955 956 // Log card stats for given worker_id, & clear them after merging into given cumulative stats 957 template<typename RememberedSet> 958 void ShenandoahScanRemembered<RememberedSet>::log_worker_card_stats(uint worker_id, HdrSeq* sum_stats) { 959 assert(ShenandoahEnableCardStats, "Do not call"); 960 961 HdrSeq* worker_card_stats = card_stats(worker_id); 962 log_info(gc, remset)("Worker %u Card Stats: ", worker_id); 963 log_card_stats(worker_card_stats); 964 // Merge worker stats into the cumulative stats & clear worker stats 965 merge_worker_card_stats_cumulative(worker_card_stats, sum_stats); 966 } 967 968 template<typename RememberedSet> 969 void ShenandoahScanRemembered<RememberedSet>::merge_worker_card_stats_cumulative( 970 HdrSeq* worker_stats, HdrSeq* sum_stats) { 971 for (int i = 0; i < MAX_CARD_STAT_TYPE; i++) { 972 sum_stats[i].add(worker_stats[i]); 973 worker_stats[i].clear(); 974 } 975 } 976 #endif 977 978 inline bool ShenandoahRegionChunkIterator::has_next() const { 979 return _index < _total_chunks; 980 } 981 982 inline bool ShenandoahRegionChunkIterator::next(struct ShenandoahRegionChunk *assignment) { 983 if (_index >= _total_chunks) { 984 return false; 985 } 986 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); 987 if (new_index > _total_chunks) { 988 // First worker that hits new_index == _total_chunks continues, other 989 // contending workers return false. 990 return false; 991 } 992 // convert to zero-based indexing 993 new_index--; 994 assert(new_index < _total_chunks, "Error"); 995 996 // Find the group number for the assigned chunk index 997 size_t group_no; 998 for (group_no = 0; new_index >= _group_entries[group_no]; group_no++) 999 ; 1000 assert(group_no < _num_groups, "Cannot have group no greater or equal to _num_groups"); 1001 1002 // All size computations measured in HeapWord 1003 size_t region_size_words = ShenandoahHeapRegion::region_size_words(); 1004 size_t group_region_index = _region_index[group_no]; 1005 size_t group_region_offset = _group_offset[group_no]; 1006 1007 size_t index_within_group = (group_no == 0)? new_index: new_index - _group_entries[group_no - 1]; 1008 size_t group_chunk_size = _group_chunk_size[group_no]; 1009 size_t offset_of_this_chunk = group_region_offset + index_within_group * group_chunk_size; 1010 size_t regions_spanned_by_chunk_offset = offset_of_this_chunk / region_size_words; 1011 size_t offset_within_region = offset_of_this_chunk % region_size_words; 1012 1013 size_t region_index = group_region_index + regions_spanned_by_chunk_offset; 1014 1015 assignment->_r = _heap->get_region(region_index); 1016 assignment->_chunk_offset = offset_within_region; 1017 assignment->_chunk_size = group_chunk_size; 1018 return true; 1019 } 1020 1021 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP