1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmClasses.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc/shared/collectedHeap.inline.hpp"
29 #include "gc/shared/genCollectedHeap.hpp"
30 #include "gc/shared/space.hpp"
31 #include "gc/shared/space.inline.hpp"
32 #include "gc/shared/spaceDecorator.inline.hpp"
33 #include "memory/iterator.inline.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/java.hpp"
38 #include "runtime/prefetch.inline.hpp"
39 #include "runtime/safepoint.hpp"
40 #include "utilities/align.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/globalDefinitions.hpp"
43 #include "utilities/macros.hpp"
44 #if INCLUDE_SERIALGC
45 #include "gc/serial/serialBlockOffsetTable.inline.hpp"
46 #include "gc/serial/defNewGeneration.hpp"
47 #endif
48
49 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
50 HeapWord* top_obj) {
51 if (top_obj != nullptr && top_obj < (_sp->toContiguousSpace())->top()) {
52 if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) {
53 // An arrayOop is starting on the dirty card - since we do exact
54 // store checks for objArrays we are done.
55 } else {
56 // Otherwise, it is possible that the object starting on the dirty
57 // card spans the entire card, and that the store happened on a
58 // later card. Figure out where the object ends.
59 assert(_sp->block_size(top_obj) == cast_to_oop(top_obj)->size(),
60 "Block size and object size mismatch");
61 top = top_obj + cast_to_oop(top_obj)->size();
62 }
63 } else {
64 top = (_sp->toContiguousSpace())->top();
65 }
66 return top;
67 }
68
69 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
70 HeapWord* bottom,
71 HeapWord* top) {
72 // Note that this assumption won't hold if we have a concurrent
73 // collector in this space, which may have freed up objects after
74 // they were dirtied and before the stop-the-world GC that is
75 // examining cards here.
76 assert(bottom < top, "ought to be at least one obj on a dirty card.");
77
78 walk_mem_region_with_cl(mr, bottom, top, _cl);
79 }
80
81 // We get called with "mr" representing the dirty region
82 // that we want to process. Because of imprecise marking,
83 // we may need to extend the incoming "mr" to the right,
84 // and scan more. However, because we may already have
85 // scanned some of that extended region, we may need to
86 // trim its right-end back some so we do not scan what
87 // we (or another worker thread) may already have scanned
88 // or planning to scan.
89 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
90 HeapWord* bottom = mr.start();
91 HeapWord* last = mr.last();
92 HeapWord* top = mr.end();
93 HeapWord* bottom_obj;
94 HeapWord* top_obj;
95
96 assert(_last_bottom == nullptr || top <= _last_bottom,
97 "Not decreasing");
98 NOT_PRODUCT(_last_bottom = mr.start());
99
100 bottom_obj = _sp->block_start(bottom);
101 top_obj = _sp->block_start(last);
102
103 assert(bottom_obj <= bottom, "just checking");
104 assert(top_obj <= top, "just checking");
105
106 // Given what we think is the top of the memory region and
107 // the start of the object at the top, get the actual
108 // value of the top.
109 top = get_actual_top(top, top_obj);
110
111 // If the previous call did some part of this region, don't redo.
112 if (_min_done != nullptr && _min_done < top) {
113 top = _min_done;
114 }
115
116 // Top may have been reset, and in fact may be below bottom,
117 // e.g. the dirty card region is entirely in a now free object
118 // -- something that could happen with a concurrent sweeper.
119 bottom = MIN2(bottom, top);
120 MemRegion extended_mr = MemRegion(bottom, top);
121 assert(bottom <= top &&
122 (_min_done == nullptr || top <= _min_done),
123 "overlap!");
124
125 // Walk the region if it is not empty; otherwise there is nothing to do.
126 if (!extended_mr.is_empty()) {
127 walk_mem_region(extended_mr, bottom_obj, top);
128 }
129
130 _min_done = bottom;
131 }
132
133 void DirtyCardToOopClosure::walk_mem_region_with_cl(MemRegion mr,
134 HeapWord* bottom,
135 HeapWord* top,
136 OopIterateClosure* cl) {
137 bottom += cast_to_oop(bottom)->oop_iterate_size(cl, mr);
138 if (bottom < top) {
139 HeapWord* next_obj = bottom + cast_to_oop(bottom)->size();
140 while (next_obj < top) {
141 /* Bottom lies entirely below top, so we can call the */
142 /* non-memRegion version of oop_iterate below. */
143 cast_to_oop(bottom)->oop_iterate(cl);
144 bottom = next_obj;
145 next_obj = bottom + cast_to_oop(bottom)->size();
146 }
147 /* Last object. */
148 cast_to_oop(bottom)->oop_iterate(cl, mr);
149 }
150 }
151
152 void Space::initialize(MemRegion mr,
153 bool clear_space,
154 bool mangle_space) {
155 HeapWord* bottom = mr.start();
156 HeapWord* end = mr.end();
157 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
158 "invalid space boundaries");
159 set_bottom(bottom);
160 set_end(end);
161 if (clear_space) clear(mangle_space);
162 }
163
164 void Space::clear(bool mangle_space) {
165 if (ZapUnusedHeapArea && mangle_space) {
166 mangle_unused_area();
167 }
168 }
169
170 ContiguousSpace::ContiguousSpace(): Space(),
171 _compaction_top(nullptr),
172 _next_compaction_space(nullptr),
173 _top(nullptr) {
174 _mangler = new GenSpaceMangler(this);
175 }
176
177 ContiguousSpace::~ContiguousSpace() {
178 delete _mangler;
179 }
180
181 void ContiguousSpace::initialize(MemRegion mr,
182 bool clear_space,
183 bool mangle_space)
184 {
185 Space::initialize(mr, clear_space, mangle_space);
186 set_compaction_top(bottom());
187 _next_compaction_space = nullptr;
188 }
189
190 void ContiguousSpace::clear(bool mangle_space) {
191 set_top(bottom());
192 set_saved_mark();
193 Space::clear(mangle_space);
194 _compaction_top = bottom();
195 }
196
197 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
198 return p >= _top;
199 }
200
201 #if INCLUDE_SERIALGC
202 void TenuredSpace::clear(bool mangle_space) {
203 ContiguousSpace::clear(mangle_space);
204 _offsets.initialize_threshold();
205 }
206
207 void TenuredSpace::set_bottom(HeapWord* new_bottom) {
208 Space::set_bottom(new_bottom);
209 _offsets.set_bottom(new_bottom);
210 }
211
212 void TenuredSpace::set_end(HeapWord* new_end) {
213 // Space should not advertise an increase in size
214 // until after the underlying offset table has been enlarged.
215 _offsets.resize(pointer_delta(new_end, bottom()));
216 Space::set_end(new_end);
217 }
218 #endif // INCLUDE_SERIALGC
219
220 #ifndef PRODUCT
221
222 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
223 mangler()->set_top_for_allocations(v);
224 }
225 void ContiguousSpace::set_top_for_allocations() {
226 mangler()->set_top_for_allocations(top());
227 }
228 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
229 mangler()->check_mangled_unused_area(limit);
230 }
231
232 void ContiguousSpace::check_mangled_unused_area_complete() {
233 mangler()->check_mangled_unused_area_complete();
234 }
235
236 // Mangled only the unused space that has not previously
237 // been mangled and that has not been allocated since being
238 // mangled.
239 void ContiguousSpace::mangle_unused_area() {
240 mangler()->mangle_unused_area();
241 }
242 void ContiguousSpace::mangle_unused_area_complete() {
243 mangler()->mangle_unused_area_complete();
244 }
245 #endif // NOT_PRODUCT
246
247
248 HeapWord* ContiguousSpace::forward(oop q, size_t size,
249 CompactPoint* cp, HeapWord* compact_top) {
250 // q is alive
251 // First check if we should switch compaction space
252 assert(this == cp->space, "'this' should be current compaction space.");
253 size_t compaction_max_size = pointer_delta(end(), compact_top);
254 while (size > compaction_max_size) {
255 // switch to next compaction space
256 cp->space->set_compaction_top(compact_top);
257 cp->space = cp->space->next_compaction_space();
258 if (cp->space == nullptr) {
259 cp->gen = GenCollectedHeap::heap()->young_gen();
260 assert(cp->gen != nullptr, "compaction must succeed");
261 cp->space = cp->gen->first_compaction_space();
262 assert(cp->space != nullptr, "generation must have a first compaction space");
263 }
264 compact_top = cp->space->bottom();
265 cp->space->set_compaction_top(compact_top);
266 cp->space->initialize_threshold();
267 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
268 }
269
270 // store the forwarding pointer into the mark word
271 if (cast_from_oop<HeapWord*>(q) != compact_top) {
272 q->forward_to(cast_to_oop(compact_top));
273 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
274 } else {
275 // if the object isn't moving we can just set the mark to the default
276 // mark and handle it specially later on.
277 q->init_mark();
278 assert(!q->is_forwarded(), "should not be forwarded");
279 }
280
281 compact_top += size;
282
283 // We need to update the offset table so that the beginnings of objects can be
284 // found during scavenge. Note that we are updating the offset table based on
285 // where the object will be once the compaction phase finishes.
286 cp->space->alloc_block(compact_top - size, compact_top);
287 return compact_top;
288 }
289
290 #if INCLUDE_SERIALGC
291
292 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
293 // Compute the new addresses for the live objects and store it in the mark
294 // Used by universe::mark_sweep_phase2()
295
296 // We're sure to be here before any objects are compacted into this
297 // space, so this is a good time to initialize this:
298 set_compaction_top(bottom());
299
300 if (cp->space == nullptr) {
301 assert(cp->gen != nullptr, "need a generation");
302 assert(cp->gen->first_compaction_space() == this, "just checking");
303 cp->space = cp->gen->first_compaction_space();
304 cp->space->initialize_threshold();
305 cp->space->set_compaction_top(cp->space->bottom());
306 }
307
308 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
309
310 DeadSpacer dead_spacer(this);
311
312 HeapWord* end_of_live = bottom(); // One byte beyond the last byte of the last live object.
313 HeapWord* first_dead = nullptr; // The first dead object.
314
315 const intx interval = PrefetchScanIntervalInBytes;
316
317 HeapWord* cur_obj = bottom();
318 HeapWord* scan_limit = top();
319
320 while (cur_obj < scan_limit) {
321 if (cast_to_oop(cur_obj)->is_gc_marked()) {
322 // prefetch beyond cur_obj
323 Prefetch::write(cur_obj, interval);
324 size_t size = cast_to_oop(cur_obj)->size();
325 compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top);
326 cur_obj += size;
327 end_of_live = cur_obj;
328 } else {
329 // run over all the contiguous dead objects
330 HeapWord* end = cur_obj;
331 do {
332 // prefetch beyond end
333 Prefetch::write(end, interval);
334 end += cast_to_oop(end)->size();
335 } while (end < scan_limit && !cast_to_oop(end)->is_gc_marked());
336
337 // see if we might want to pretend this object is alive so that
338 // we don't have to compact quite as often.
339 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
340 oop obj = cast_to_oop(cur_obj);
341 compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
342 end_of_live = end;
343 } else {
344 // otherwise, it really is a free region.
345
346 // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
347 *(HeapWord**)cur_obj = end;
348
349 // see if this is the first dead region.
350 if (first_dead == nullptr) {
351 first_dead = cur_obj;
352 }
353 }
354
355 // move on to the next object
356 cur_obj = end;
357 }
358 }
359
360 assert(cur_obj == scan_limit, "just checking");
361 _end_of_live = end_of_live;
362 if (first_dead != nullptr) {
363 _first_dead = first_dead;
364 } else {
365 _first_dead = end_of_live;
366 }
367
368 // save the compaction_top of the compaction space.
369 cp->space->set_compaction_top(compact_top);
370 }
371
372 void ContiguousSpace::adjust_pointers() {
373 // Check first is there is any work to do.
374 if (used() == 0) {
375 return; // Nothing to do.
376 }
377
378 // adjust all the interior pointers to point at the new locations of objects
379 // Used by MarkSweep::mark_sweep_phase3()
380
381 HeapWord* cur_obj = bottom();
382 HeapWord* const end_of_live = _end_of_live; // Established by prepare_for_compaction().
383 HeapWord* const first_dead = _first_dead; // Established by prepare_for_compaction().
384
385 assert(first_dead <= end_of_live, "Stands to reason, no?");
386
387 const intx interval = PrefetchScanIntervalInBytes;
388
389 debug_only(HeapWord* prev_obj = nullptr);
390 while (cur_obj < end_of_live) {
391 Prefetch::write(cur_obj, interval);
392 if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
393 // cur_obj is alive
394 // point all the oops to the new location
395 size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj));
396 debug_only(prev_obj = cur_obj);
397 cur_obj += size;
398 } else {
399 debug_only(prev_obj = cur_obj);
400 // cur_obj is not a live object, instead it points at the next live object
401 cur_obj = *(HeapWord**)cur_obj;
402 assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
403 }
404 }
405
406 assert(cur_obj == end_of_live, "just checking");
407 }
408
409 void ContiguousSpace::compact() {
410 // Copy all live objects to their new location
411 // Used by MarkSweep::mark_sweep_phase4()
412
413 verify_up_to_first_dead(this);
414
415 HeapWord* const start = bottom();
416 HeapWord* const end_of_live = _end_of_live;
417
418 assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live));
419 if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) {
420 // Nothing to compact. The space is either empty or all live object should be left in place.
421 clear_empty_region(this);
422 return;
423 }
424
425 const intx scan_interval = PrefetchScanIntervalInBytes;
426 const intx copy_interval = PrefetchCopyIntervalInBytes;
427
428 assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live));
429 HeapWord* cur_obj = start;
430 if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
431 // All object before _first_dead can be skipped. They should not be moved.
432 // A pointer to the first live object is stored at the memory location for _first_dead.
433 cur_obj = *(HeapWord**)(_first_dead);
434 }
435
436 debug_only(HeapWord* prev_obj = nullptr);
437 while (cur_obj < end_of_live) {
438 if (!cast_to_oop(cur_obj)->is_forwarded()) {
439 debug_only(prev_obj = cur_obj);
440 // The first word of the dead object contains a pointer to the next live object or end of space.
441 cur_obj = *(HeapWord**)cur_obj;
442 assert(cur_obj > prev_obj, "we should be moving forward through memory");
443 } else {
444 // prefetch beyond q
445 Prefetch::read(cur_obj, scan_interval);
446
447 // size and destination
448 size_t size = cast_to_oop(cur_obj)->size();
449 HeapWord* compaction_top = cast_from_oop<HeapWord*>(cast_to_oop(cur_obj)->forwardee());
450
451 // prefetch beyond compaction_top
452 Prefetch::write(compaction_top, copy_interval);
453
454 // copy object and reinit its mark
455 assert(cur_obj != compaction_top, "everything in this pass should be moving");
456 Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
457 oop new_obj = cast_to_oop(compaction_top);
458
459 ContinuationGCSupport::transform_stack_chunk(new_obj);
460
461 new_obj->init_mark();
462 assert(new_obj->klass() != nullptr, "should have a class");
463
464 debug_only(prev_obj = cur_obj);
465 cur_obj += size;
466 }
467 }
468
469 clear_empty_region(this);
470 }
471
472 #endif // INCLUDE_SERIALGC
473
474 void Space::print_short() const { print_short_on(tty); }
475
476 void Space::print_short_on(outputStream* st) const {
477 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
478 (int) ((double) used() * 100 / capacity()));
479 }
480
481 void Space::print() const { print_on(tty); }
482
483 void Space::print_on(outputStream* st) const {
484 print_short_on(st);
485 st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ")",
486 p2i(bottom()), p2i(end()));
487 }
488
489 void ContiguousSpace::print_on(outputStream* st) const {
490 print_short_on(st);
491 st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
492 p2i(bottom()), p2i(top()), p2i(end()));
493 }
494
495 #if INCLUDE_SERIALGC
496 void TenuredSpace::print_on(outputStream* st) const {
497 print_short_on(st);
498 st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", "
499 PTR_FORMAT ", " PTR_FORMAT ")",
500 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
501 }
502 #endif
503
504 void ContiguousSpace::verify() const {
505 HeapWord* p = bottom();
506 HeapWord* t = top();
507 HeapWord* prev_p = nullptr;
508 while (p < t) {
509 oopDesc::verify(cast_to_oop(p));
510 prev_p = p;
511 p += cast_to_oop(p)->size();
512 }
513 guarantee(p == top(), "end of last object must match end of space");
514 if (top() != end()) {
515 guarantee(top() == block_start_const(end()-1) &&
516 top() == block_start_const(top()),
517 "top should be start of unallocated block, if it exists");
518 }
519 }
520
521 void Space::oop_iterate(OopIterateClosure* blk) {
522 ObjectToOopClosure blk2(blk);
523 object_iterate(&blk2);
524 }
525
526 bool Space::obj_is_alive(const HeapWord* p) const {
527 assert (block_is_obj(p), "The address should point to an object");
528 return true;
529 }
530
531 void ContiguousSpace::oop_iterate(OopIterateClosure* blk) {
532 if (is_empty()) return;
533 HeapWord* obj_addr = bottom();
534 HeapWord* t = top();
535 // Could call objects iterate, but this is easier.
536 while (obj_addr < t) {
537 obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(blk);
538 }
539 }
540
541 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
542 if (is_empty()) return;
543 object_iterate_from(bottom(), blk);
544 }
545
546 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
547 while (mark < top()) {
548 blk->do_object(cast_to_oop(mark));
549 mark += cast_to_oop(mark)->size();
550 }
551 }
552
553 // Very general, slow implementation.
554 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
555 assert(MemRegion(bottom(), end()).contains(p),
556 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
557 p2i(p), p2i(bottom()), p2i(end()));
558 if (p >= top()) {
559 return top();
560 } else {
561 HeapWord* last = bottom();
562 HeapWord* cur = last;
563 while (cur <= p) {
564 last = cur;
565 cur += cast_to_oop(cur)->size();
566 }
567 assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
568 return last;
569 }
570 }
571
572 size_t ContiguousSpace::block_size(const HeapWord* p) const {
573 assert(MemRegion(bottom(), end()).contains(p),
574 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
575 p2i(p), p2i(bottom()), p2i(end()));
576 HeapWord* current_top = top();
577 assert(p <= current_top,
578 "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
579 p2i(p), p2i(current_top));
580 assert(p == current_top || oopDesc::is_oop(cast_to_oop(p)),
581 "p (" PTR_FORMAT ") is not a block start - "
582 "current_top: " PTR_FORMAT ", is_oop: %s",
583 p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(cast_to_oop(p))));
584 if (p < current_top) {
585 return cast_to_oop(p)->size();
586 } else {
587 assert(p == current_top, "just checking");
588 return pointer_delta(end(), (HeapWord*) p);
589 }
590 }
591
592 // This version requires locking.
593 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
594 assert(Heap_lock->owned_by_self() ||
595 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
596 "not locked");
597 HeapWord* obj = top();
598 if (pointer_delta(end(), obj) >= size) {
599 HeapWord* new_top = obj + size;
600 set_top(new_top);
601 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
602 return obj;
603 } else {
604 return nullptr;
605 }
606 }
607
608 // This version is lock-free.
609 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
610 do {
611 HeapWord* obj = top();
612 if (pointer_delta(end(), obj) >= size) {
613 HeapWord* new_top = obj + size;
614 HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
615 // result can be one of two:
616 // the old top value: the exchange succeeded
617 // otherwise: the new value of the top is returned.
618 if (result == obj) {
619 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
620 return obj;
621 }
622 } else {
623 return nullptr;
624 }
625 } while (true);
626 }
627
628 // Requires locking.
629 HeapWord* ContiguousSpace::allocate(size_t size) {
630 return allocate_impl(size);
631 }
632
633 // Lock-free.
634 HeapWord* ContiguousSpace::par_allocate(size_t size) {
635 return par_allocate_impl(size);
636 }
637
638 #if INCLUDE_SERIALGC
639 void TenuredSpace::initialize_threshold() {
640 _offsets.initialize_threshold();
641 }
642
643 void TenuredSpace::alloc_block(HeapWord* start, HeapWord* end) {
644 _offsets.alloc_block(start, end);
645 }
646
647 TenuredSpace::TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
648 MemRegion mr) :
649 _offsets(sharedOffsetArray, mr),
650 _par_alloc_lock(Mutex::safepoint, "TenuredSpaceParAlloc_lock", true)
651 {
652 _offsets.set_contig_space(this);
653 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
654 }
655
656 #define OBJ_SAMPLE_INTERVAL 0
657 #define BLOCK_SAMPLE_INTERVAL 100
658
659 void TenuredSpace::verify() const {
660 HeapWord* p = bottom();
661 HeapWord* prev_p = nullptr;
662 int objs = 0;
663 int blocks = 0;
664
665 if (VerifyObjectStartArray) {
666 _offsets.verify();
667 }
668
669 while (p < top()) {
670 size_t size = cast_to_oop(p)->size();
671 // For a sampling of objects in the space, find it using the
672 // block offset table.
673 if (blocks == BLOCK_SAMPLE_INTERVAL) {
674 guarantee(p == block_start_const(p + (size/2)),
675 "check offset computation");
676 blocks = 0;
677 } else {
678 blocks++;
679 }
680
681 if (objs == OBJ_SAMPLE_INTERVAL) {
682 oopDesc::verify(cast_to_oop(p));
683 objs = 0;
684 } else {
685 objs++;
686 }
687 prev_p = p;
688 p += size;
689 }
690 guarantee(p == top(), "end of last object must match end of space");
691 }
692
693
694 size_t TenuredSpace::allowed_dead_ratio() const {
695 return MarkSweepDeadRatio;
696 }
697 #endif // INCLUDE_SERIALGC