1 /* 2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmClasses.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc/shared/blockOffsetTable.inline.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/genCollectedHeap.hpp" 31 #include "gc/shared/genOopClosures.inline.hpp" 32 #include "gc/shared/space.hpp" 33 #include "gc/shared/space.inline.hpp" 34 #include "gc/shared/spaceDecorator.inline.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "memory/universe.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/atomic.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/prefetch.inline.hpp" 41 #include "runtime/safepoint.hpp" 42 #include "utilities/align.hpp" 43 #include "utilities/copy.hpp" 44 #include "utilities/globalDefinitions.hpp" 45 #include "utilities/macros.hpp" 46 #if INCLUDE_SERIALGC 47 #include "gc/serial/defNewGeneration.hpp" 48 #endif 49 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 51 HeapWord* top_obj) { 52 if (top_obj != NULL) { 53 if (_sp->block_is_obj(top_obj)) { 54 if (_precision == CardTable::ObjHeadPreciseArray) { 55 if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) { 56 // An arrayOop is starting on the dirty card - since we do exact 57 // store checks for objArrays we are done. 58 } else { 59 // Otherwise, it is possible that the object starting on the dirty 60 // card spans the entire card, and that the store happened on a 61 // later card. Figure out where the object ends. 62 // Use the block_size() method of the space over which 63 // the iteration is being done. That space (e.g. CMS) may have 64 // specific requirements on object sizes which will 65 // be reflected in the block_size() method. 66 top = top_obj + cast_to_oop(top_obj)->size(); 67 } 68 } 69 } else { 70 top = top_obj; 71 } 72 } else { 73 assert(top == _sp->end(), "only case where top_obj == NULL"); 74 } 75 return top; 76 } 77 78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 79 HeapWord* bottom, 80 HeapWord* top) { 81 // 1. Blocks may or may not be objects. 82 // 2. Even when a block_is_obj(), it may not entirely 83 // occupy the block if the block quantum is larger than 84 // the object size. 85 // We can and should try to optimize by calling the non-MemRegion 86 // version of oop_iterate() for all but the extremal objects 87 // (for which we need to call the MemRegion version of 88 // oop_iterate()) To be done post-beta XXX 89 for (; bottom < top; bottom += _sp->block_size(bottom)) { 90 // As in the case of contiguous space above, we'd like to 91 // just use the value returned by oop_iterate to increment the 92 // current pointer; unfortunately, that won't work in CMS because 93 // we'd need an interface change (it seems) to have the space 94 // "adjust the object size" (for instance pad it up to its 95 // block alignment or minimum block size restrictions. XXX 96 if (_sp->block_is_obj(bottom) && 97 !_sp->obj_allocated_since_save_marks(cast_to_oop(bottom))) { 98 cast_to_oop(bottom)->oop_iterate(_cl, mr); 99 } 100 } 101 } 102 103 // We get called with "mr" representing the dirty region 104 // that we want to process. Because of imprecise marking, 105 // we may need to extend the incoming "mr" to the right, 106 // and scan more. However, because we may already have 107 // scanned some of that extended region, we may need to 108 // trim its right-end back some so we do not scan what 109 // we (or another worker thread) may already have scanned 110 // or planning to scan. 111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 112 HeapWord* bottom = mr.start(); 113 HeapWord* last = mr.last(); 114 HeapWord* top = mr.end(); 115 HeapWord* bottom_obj; 116 HeapWord* top_obj; 117 118 assert(_precision == CardTable::ObjHeadPreciseArray || 119 _precision == CardTable::Precise, 120 "Only ones we deal with for now."); 121 122 assert(_precision != CardTable::ObjHeadPreciseArray || 123 _last_bottom == NULL || top <= _last_bottom, 124 "Not decreasing"); 125 NOT_PRODUCT(_last_bottom = mr.start()); 126 127 bottom_obj = _sp->block_start(bottom); 128 top_obj = _sp->block_start(last); 129 130 assert(bottom_obj <= bottom, "just checking"); 131 assert(top_obj <= top, "just checking"); 132 133 // Given what we think is the top of the memory region and 134 // the start of the object at the top, get the actual 135 // value of the top. 136 top = get_actual_top(top, top_obj); 137 138 // If the previous call did some part of this region, don't redo. 139 if (_precision == CardTable::ObjHeadPreciseArray && 140 _min_done != NULL && 141 _min_done < top) { 142 top = _min_done; 143 } 144 145 // Top may have been reset, and in fact may be below bottom, 146 // e.g. the dirty card region is entirely in a now free object 147 // -- something that could happen with a concurrent sweeper. 148 bottom = MIN2(bottom, top); 149 MemRegion extended_mr = MemRegion(bottom, top); 150 assert(bottom <= top && 151 (_precision != CardTable::ObjHeadPreciseArray || 152 _min_done == NULL || 153 top <= _min_done), 154 "overlap!"); 155 156 // Walk the region if it is not empty; otherwise there is nothing to do. 157 if (!extended_mr.is_empty()) { 158 walk_mem_region(extended_mr, bottom_obj, top); 159 } 160 161 _min_done = bottom; 162 } 163 164 DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl, 165 CardTable::PrecisionStyle precision, 166 HeapWord* boundary) { 167 return new DirtyCardToOopClosure(this, cl, precision, boundary); 168 } 169 170 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 171 HeapWord* top_obj) { 172 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 173 if (_precision == CardTable::ObjHeadPreciseArray) { 174 if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) { 175 // An arrayOop is starting on the dirty card - since we do exact 176 // store checks for objArrays we are done. 177 } else { 178 // Otherwise, it is possible that the object starting on the dirty 179 // card spans the entire card, and that the store happened on a 180 // later card. Figure out where the object ends. 181 assert(_sp->block_size(top_obj) == cast_to_oop(top_obj)->size(), 182 "Block size and object size mismatch"); 183 top = top_obj + cast_to_oop(top_obj)->size(); 184 } 185 } 186 } else { 187 top = (_sp->toContiguousSpace())->top(); 188 } 189 return top; 190 } 191 192 void FilteringDCTOC::walk_mem_region(MemRegion mr, 193 HeapWord* bottom, 194 HeapWord* top) { 195 // Note that this assumption won't hold if we have a concurrent 196 // collector in this space, which may have freed up objects after 197 // they were dirtied and before the stop-the-world GC that is 198 // examining cards here. 199 assert(bottom < top, "ought to be at least one obj on a dirty card."); 200 201 if (_boundary != NULL) { 202 // We have a boundary outside of which we don't want to look 203 // at objects, so create a filtering closure around the 204 // oop closure before walking the region. 205 FilteringClosure filter(_boundary, _cl); 206 walk_mem_region_with_cl(mr, bottom, top, &filter); 207 } else { 208 // No boundary, simply walk the heap with the oop closure. 209 walk_mem_region_with_cl(mr, bottom, top, _cl); 210 } 211 212 } 213 214 // We must replicate this so that the static type of "FilteringClosure" 215 // (see above) is apparent at the oop_iterate calls. 216 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 217 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 218 HeapWord* bottom, \ 219 HeapWord* top, \ 220 ClosureType* cl) { \ 221 bottom += cast_to_oop(bottom)->oop_iterate_size(cl, mr); \ 222 if (bottom < top) { \ 223 HeapWord* next_obj = bottom + cast_to_oop(bottom)->size(); \ 224 while (next_obj < top) { \ 225 /* Bottom lies entirely below top, so we can call the */ \ 226 /* non-memRegion version of oop_iterate below. */ \ 227 cast_to_oop(bottom)->oop_iterate(cl); \ 228 bottom = next_obj; \ 229 next_obj = bottom + cast_to_oop(bottom)->size(); \ 230 } \ 231 /* Last object. */ \ 232 cast_to_oop(bottom)->oop_iterate(cl, mr); \ 233 } \ 234 } 235 236 // (There are only two of these, rather than N, because the split is due 237 // only to the introduction of the FilteringClosure, a local part of the 238 // impl of this abstraction.) 239 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure) 240 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 241 242 DirtyCardToOopClosure* 243 ContiguousSpace::new_dcto_cl(OopIterateClosure* cl, 244 CardTable::PrecisionStyle precision, 245 HeapWord* boundary) { 246 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 247 } 248 249 void Space::initialize(MemRegion mr, 250 bool clear_space, 251 bool mangle_space) { 252 HeapWord* bottom = mr.start(); 253 HeapWord* end = mr.end(); 254 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 255 "invalid space boundaries"); 256 set_bottom(bottom); 257 set_end(end); 258 if (clear_space) clear(mangle_space); 259 } 260 261 void Space::clear(bool mangle_space) { 262 if (ZapUnusedHeapArea && mangle_space) { 263 mangle_unused_area(); 264 } 265 } 266 267 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) { 268 _mangler = new GenSpaceMangler(this); 269 } 270 271 ContiguousSpace::~ContiguousSpace() { 272 delete _mangler; 273 } 274 275 void ContiguousSpace::initialize(MemRegion mr, 276 bool clear_space, 277 bool mangle_space) 278 { 279 CompactibleSpace::initialize(mr, clear_space, mangle_space); 280 } 281 282 void ContiguousSpace::clear(bool mangle_space) { 283 set_top(bottom()); 284 set_saved_mark(); 285 CompactibleSpace::clear(mangle_space); 286 } 287 288 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 289 return p >= _top; 290 } 291 292 void OffsetTableContigSpace::clear(bool mangle_space) { 293 ContiguousSpace::clear(mangle_space); 294 _offsets.initialize_threshold(); 295 } 296 297 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 298 Space::set_bottom(new_bottom); 299 _offsets.set_bottom(new_bottom); 300 } 301 302 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 303 // Space should not advertise an increase in size 304 // until after the underlying offset table has been enlarged. 305 _offsets.resize(pointer_delta(new_end, bottom())); 306 Space::set_end(new_end); 307 } 308 309 #ifndef PRODUCT 310 311 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 312 mangler()->set_top_for_allocations(v); 313 } 314 void ContiguousSpace::set_top_for_allocations() { 315 mangler()->set_top_for_allocations(top()); 316 } 317 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 318 mangler()->check_mangled_unused_area(limit); 319 } 320 321 void ContiguousSpace::check_mangled_unused_area_complete() { 322 mangler()->check_mangled_unused_area_complete(); 323 } 324 325 // Mangled only the unused space that has not previously 326 // been mangled and that has not been allocated since being 327 // mangled. 328 void ContiguousSpace::mangle_unused_area() { 329 mangler()->mangle_unused_area(); 330 } 331 void ContiguousSpace::mangle_unused_area_complete() { 332 mangler()->mangle_unused_area_complete(); 333 } 334 #endif // NOT_PRODUCT 335 336 void CompactibleSpace::initialize(MemRegion mr, 337 bool clear_space, 338 bool mangle_space) { 339 Space::initialize(mr, clear_space, mangle_space); 340 set_compaction_top(bottom()); 341 _next_compaction_space = NULL; 342 } 343 344 void CompactibleSpace::clear(bool mangle_space) { 345 Space::clear(mangle_space); 346 _compaction_top = bottom(); 347 } 348 349 HeapWord* CompactibleSpace::forward(oop q, size_t size, 350 CompactPoint* cp, HeapWord* compact_top) { 351 // q is alive 352 // First check if we should switch compaction space 353 assert(this == cp->space, "'this' should be current compaction space."); 354 size_t compaction_max_size = pointer_delta(end(), compact_top); 355 while (size > compaction_max_size) { 356 // switch to next compaction space 357 cp->space->set_compaction_top(compact_top); 358 cp->space = cp->space->next_compaction_space(); 359 if (cp->space == NULL) { 360 cp->gen = GenCollectedHeap::heap()->young_gen(); 361 assert(cp->gen != NULL, "compaction must succeed"); 362 cp->space = cp->gen->first_compaction_space(); 363 assert(cp->space != NULL, "generation must have a first compaction space"); 364 } 365 compact_top = cp->space->bottom(); 366 cp->space->set_compaction_top(compact_top); 367 cp->space->initialize_threshold(); 368 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 369 } 370 371 // store the forwarding pointer into the mark word 372 if (cast_from_oop<HeapWord*>(q) != compact_top) { 373 q->forward_to(cast_to_oop(compact_top)); 374 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 375 } else { 376 // if the object isn't moving we can just set the mark to the default 377 // mark and handle it specially later on. 378 q->init_mark(); 379 assert(!q->is_forwarded(), "should not be forwarded"); 380 } 381 382 compact_top += size; 383 384 // We need to update the offset table so that the beginnings of objects can be 385 // found during scavenge. Note that we are updating the offset table based on 386 // where the object will be once the compaction phase finishes. 387 cp->space->alloc_block(compact_top - size, compact_top); 388 return compact_top; 389 } 390 391 #if INCLUDE_SERIALGC 392 393 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 394 // Compute the new addresses for the live objects and store it in the mark 395 // Used by universe::mark_sweep_phase2() 396 397 // We're sure to be here before any objects are compacted into this 398 // space, so this is a good time to initialize this: 399 set_compaction_top(bottom()); 400 401 if (cp->space == NULL) { 402 assert(cp->gen != NULL, "need a generation"); 403 assert(cp->gen->first_compaction_space() == this, "just checking"); 404 cp->space = cp->gen->first_compaction_space(); 405 cp->space->initialize_threshold(); 406 cp->space->set_compaction_top(cp->space->bottom()); 407 } 408 409 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to. 410 411 DeadSpacer dead_spacer(this); 412 413 HeapWord* end_of_live = bottom(); // One byte beyond the last byte of the last live object. 414 HeapWord* first_dead = NULL; // The first dead object. 415 416 const intx interval = PrefetchScanIntervalInBytes; 417 418 HeapWord* cur_obj = bottom(); 419 HeapWord* scan_limit = top(); 420 421 while (cur_obj < scan_limit) { 422 if (cast_to_oop(cur_obj)->is_gc_marked()) { 423 // prefetch beyond cur_obj 424 Prefetch::write(cur_obj, interval); 425 size_t size = cast_to_oop(cur_obj)->size(); 426 compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top); 427 cur_obj += size; 428 end_of_live = cur_obj; 429 } else { 430 // run over all the contiguous dead objects 431 HeapWord* end = cur_obj; 432 do { 433 // prefetch beyond end 434 Prefetch::write(end, interval); 435 end += cast_to_oop(end)->size(); 436 } while (end < scan_limit && !cast_to_oop(end)->is_gc_marked()); 437 438 // see if we might want to pretend this object is alive so that 439 // we don't have to compact quite as often. 440 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) { 441 oop obj = cast_to_oop(cur_obj); 442 compact_top = cp->space->forward(obj, obj->size(), cp, compact_top); 443 end_of_live = end; 444 } else { 445 // otherwise, it really is a free region. 446 447 // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. 448 *(HeapWord**)cur_obj = end; 449 450 // see if this is the first dead region. 451 if (first_dead == NULL) { 452 first_dead = cur_obj; 453 } 454 } 455 456 // move on to the next object 457 cur_obj = end; 458 } 459 } 460 461 assert(cur_obj == scan_limit, "just checking"); 462 _end_of_live = end_of_live; 463 if (first_dead != NULL) { 464 _first_dead = first_dead; 465 } else { 466 _first_dead = end_of_live; 467 } 468 469 // save the compaction_top of the compaction space. 470 cp->space->set_compaction_top(compact_top); 471 } 472 473 void CompactibleSpace::adjust_pointers() { 474 // Check first is there is any work to do. 475 if (used() == 0) { 476 return; // Nothing to do. 477 } 478 479 // adjust all the interior pointers to point at the new locations of objects 480 // Used by MarkSweep::mark_sweep_phase3() 481 482 HeapWord* cur_obj = bottom(); 483 HeapWord* const end_of_live = _end_of_live; // Established by prepare_for_compaction(). 484 HeapWord* const first_dead = _first_dead; // Established by prepare_for_compaction(). 485 486 assert(first_dead <= end_of_live, "Stands to reason, no?"); 487 488 const intx interval = PrefetchScanIntervalInBytes; 489 490 debug_only(HeapWord* prev_obj = NULL); 491 while (cur_obj < end_of_live) { 492 Prefetch::write(cur_obj, interval); 493 if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) { 494 // cur_obj is alive 495 // point all the oops to the new location 496 size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj)); 497 debug_only(prev_obj = cur_obj); 498 cur_obj += size; 499 } else { 500 debug_only(prev_obj = cur_obj); 501 // cur_obj is not a live object, instead it points at the next live object 502 cur_obj = *(HeapWord**)cur_obj; 503 assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj)); 504 } 505 } 506 507 assert(cur_obj == end_of_live, "just checking"); 508 } 509 510 void CompactibleSpace::compact() { 511 // Copy all live objects to their new location 512 // Used by MarkSweep::mark_sweep_phase4() 513 514 verify_up_to_first_dead(this); 515 516 HeapWord* const start = bottom(); 517 HeapWord* const end_of_live = _end_of_live; 518 519 assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live)); 520 if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) { 521 // Nothing to compact. The space is either empty or all live object should be left in place. 522 clear_empty_region(this); 523 return; 524 } 525 526 const intx scan_interval = PrefetchScanIntervalInBytes; 527 const intx copy_interval = PrefetchCopyIntervalInBytes; 528 529 assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live)); 530 HeapWord* cur_obj = start; 531 if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) { 532 // All object before _first_dead can be skipped. They should not be moved. 533 // A pointer to the first live object is stored at the memory location for _first_dead. 534 cur_obj = *(HeapWord**)(_first_dead); 535 } 536 537 debug_only(HeapWord* prev_obj = NULL); 538 while (cur_obj < end_of_live) { 539 if (!cast_to_oop(cur_obj)->is_forwarded()) { 540 debug_only(prev_obj = cur_obj); 541 // The first word of the dead object contains a pointer to the next live object or end of space. 542 cur_obj = *(HeapWord**)cur_obj; 543 assert(cur_obj > prev_obj, "we should be moving forward through memory"); 544 } else { 545 // prefetch beyond q 546 Prefetch::read(cur_obj, scan_interval); 547 548 // size and destination 549 size_t size = cast_to_oop(cur_obj)->size(); 550 HeapWord* compaction_top = cast_from_oop<HeapWord*>(cast_to_oop(cur_obj)->forwardee()); 551 552 // prefetch beyond compaction_top 553 Prefetch::write(compaction_top, copy_interval); 554 555 // copy object and reinit its mark 556 assert(cur_obj != compaction_top, "everything in this pass should be moving"); 557 Copy::aligned_conjoint_words(cur_obj, compaction_top, size); 558 cast_to_oop(compaction_top)->init_mark(); 559 assert(cast_to_oop(compaction_top)->klass() != NULL, "should have a class"); 560 561 debug_only(prev_obj = cur_obj); 562 cur_obj += size; 563 } 564 } 565 566 clear_empty_region(this); 567 } 568 569 #endif // INCLUDE_SERIALGC 570 571 void Space::print_short() const { print_short_on(tty); } 572 573 void Space::print_short_on(outputStream* st) const { 574 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 575 (int) ((double) used() * 100 / capacity())); 576 } 577 578 void Space::print() const { print_on(tty); } 579 580 void Space::print_on(outputStream* st) const { 581 print_short_on(st); 582 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 583 p2i(bottom()), p2i(end())); 584 } 585 586 void ContiguousSpace::print_on(outputStream* st) const { 587 print_short_on(st); 588 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 589 p2i(bottom()), p2i(top()), p2i(end())); 590 } 591 592 void OffsetTableContigSpace::print_on(outputStream* st) const { 593 print_short_on(st); 594 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 595 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 596 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); 597 } 598 599 void ContiguousSpace::verify() const { 600 HeapWord* p = bottom(); 601 HeapWord* t = top(); 602 HeapWord* prev_p = NULL; 603 while (p < t) { 604 oopDesc::verify(cast_to_oop(p)); 605 prev_p = p; 606 p += cast_to_oop(p)->size(); 607 } 608 guarantee(p == top(), "end of last object must match end of space"); 609 if (top() != end()) { 610 guarantee(top() == block_start_const(end()-1) && 611 top() == block_start_const(top()), 612 "top should be start of unallocated block, if it exists"); 613 } 614 } 615 616 void Space::oop_iterate(OopIterateClosure* blk) { 617 ObjectToOopClosure blk2(blk); 618 object_iterate(&blk2); 619 } 620 621 bool Space::obj_is_alive(const HeapWord* p) const { 622 assert (block_is_obj(p), "The address should point to an object"); 623 return true; 624 } 625 626 void ContiguousSpace::oop_iterate(OopIterateClosure* blk) { 627 if (is_empty()) return; 628 HeapWord* obj_addr = bottom(); 629 HeapWord* t = top(); 630 // Could call objects iterate, but this is easier. 631 while (obj_addr < t) { 632 obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(blk); 633 } 634 } 635 636 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 637 if (is_empty()) return; 638 object_iterate_from(bottom(), blk); 639 } 640 641 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) { 642 while (mark < top()) { 643 blk->do_object(cast_to_oop(mark)); 644 mark += cast_to_oop(mark)->size(); 645 } 646 } 647 648 // Very general, slow implementation. 649 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 650 assert(MemRegion(bottom(), end()).contains(p), 651 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 652 p2i(p), p2i(bottom()), p2i(end())); 653 if (p >= top()) { 654 return top(); 655 } else { 656 HeapWord* last = bottom(); 657 HeapWord* cur = last; 658 while (cur <= p) { 659 last = cur; 660 cur += cast_to_oop(cur)->size(); 661 } 662 assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last)); 663 return last; 664 } 665 } 666 667 size_t ContiguousSpace::block_size(const HeapWord* p) const { 668 assert(MemRegion(bottom(), end()).contains(p), 669 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 670 p2i(p), p2i(bottom()), p2i(end())); 671 HeapWord* current_top = top(); 672 assert(p <= current_top, 673 "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 674 p2i(p), p2i(current_top)); 675 assert(p == current_top || oopDesc::is_oop(cast_to_oop(p)), 676 "p (" PTR_FORMAT ") is not a block start - " 677 "current_top: " PTR_FORMAT ", is_oop: %s", 678 p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(cast_to_oop(p)))); 679 if (p < current_top) { 680 return cast_to_oop(p)->size(); 681 } else { 682 assert(p == current_top, "just checking"); 683 return pointer_delta(end(), (HeapWord*) p); 684 } 685 } 686 687 // This version requires locking. 688 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { 689 assert(Heap_lock->owned_by_self() || 690 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), 691 "not locked"); 692 HeapWord* obj = top(); 693 if (pointer_delta(end(), obj) >= size) { 694 HeapWord* new_top = obj + size; 695 set_top(new_top); 696 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 697 return obj; 698 } else { 699 return NULL; 700 } 701 } 702 703 // This version is lock-free. 704 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { 705 do { 706 HeapWord* obj = top(); 707 if (pointer_delta(end(), obj) >= size) { 708 HeapWord* new_top = obj + size; 709 HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top); 710 // result can be one of two: 711 // the old top value: the exchange succeeded 712 // otherwise: the new value of the top is returned. 713 if (result == obj) { 714 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 715 return obj; 716 } 717 } else { 718 return NULL; 719 } 720 } while (true); 721 } 722 723 // Requires locking. 724 HeapWord* ContiguousSpace::allocate(size_t size) { 725 return allocate_impl(size); 726 } 727 728 // Lock-free. 729 HeapWord* ContiguousSpace::par_allocate(size_t size) { 730 return par_allocate_impl(size); 731 } 732 733 void OffsetTableContigSpace::initialize_threshold() { 734 _offsets.initialize_threshold(); 735 } 736 737 void OffsetTableContigSpace::alloc_block(HeapWord* start, HeapWord* end) { 738 _offsets.alloc_block(start, end); 739 } 740 741 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 742 MemRegion mr) : 743 _offsets(sharedOffsetArray, mr), 744 _par_alloc_lock(Mutex::safepoint, "OffsetTableContigSpaceParAlloc_lock", true) 745 { 746 _offsets.set_contig_space(this); 747 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 748 } 749 750 #define OBJ_SAMPLE_INTERVAL 0 751 #define BLOCK_SAMPLE_INTERVAL 100 752 753 void OffsetTableContigSpace::verify() const { 754 HeapWord* p = bottom(); 755 HeapWord* prev_p = NULL; 756 int objs = 0; 757 int blocks = 0; 758 759 if (VerifyObjectStartArray) { 760 _offsets.verify(); 761 } 762 763 while (p < top()) { 764 size_t size = cast_to_oop(p)->size(); 765 // For a sampling of objects in the space, find it using the 766 // block offset table. 767 if (blocks == BLOCK_SAMPLE_INTERVAL) { 768 guarantee(p == block_start_const(p + (size/2)), 769 "check offset computation"); 770 blocks = 0; 771 } else { 772 blocks++; 773 } 774 775 if (objs == OBJ_SAMPLE_INTERVAL) { 776 oopDesc::verify(cast_to_oop(p)); 777 objs = 0; 778 } else { 779 objs++; 780 } 781 prev_p = p; 782 p += size; 783 } 784 guarantee(p == top(), "end of last object must match end of space"); 785 } 786 787 788 size_t TenuredSpace::allowed_dead_ratio() const { 789 return MarkSweepDeadRatio; 790 }