1 /*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmClasses.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc/shared/blockOffsetTable.inline.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/genOopClosures.inline.hpp"
32 #include "gc/shared/slidingForwarding.inline.hpp"
33 #include "gc/shared/space.hpp"
34 #include "gc/shared/space.inline.hpp"
35 #include "gc/shared/spaceDecorator.inline.hpp"
36 #include "memory/iterator.inline.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/java.hpp"
41 #include "runtime/prefetch.inline.hpp"
42 #include "runtime/safepoint.hpp"
43 #include "utilities/align.hpp"
44 #include "utilities/copy.hpp"
45 #include "utilities/globalDefinitions.hpp"
46 #include "utilities/macros.hpp"
47 #if INCLUDE_SERIALGC
48 #include "gc/serial/defNewGeneration.hpp"
49 #endif
50
51 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
52 HeapWord* top_obj) {
53 if (top_obj != NULL) {
54 if (_sp->block_is_obj(top_obj)) {
55 if (_precision == CardTable::ObjHeadPreciseArray) {
56 if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) {
57 // An arrayOop is starting on the dirty card - since we do exact
58 // store checks for objArrays we are done.
59 } else {
60 // Otherwise, it is possible that the object starting on the dirty
61 // card spans the entire card, and that the store happened on a
62 // later card. Figure out where the object ends.
63 // Use the block_size() method of the space over which
64 // the iteration is being done. That space (e.g. CMS) may have
65 // specific requirements on object sizes which will
66 // be reflected in the block_size() method.
67 top = top_obj + cast_to_oop(top_obj)->size();
68 }
69 }
70 } else {
71 top = top_obj;
72 }
73 } else {
74 assert(top == _sp->end(), "only case where top_obj == NULL");
75 }
76 return top;
77 }
78
79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
80 HeapWord* bottom,
81 HeapWord* top) {
82 // 1. Blocks may or may not be objects.
83 // 2. Even when a block_is_obj(), it may not entirely
84 // occupy the block if the block quantum is larger than
85 // the object size.
86 // We can and should try to optimize by calling the non-MemRegion
87 // version of oop_iterate() for all but the extremal objects
88 // (for which we need to call the MemRegion version of
89 // oop_iterate()) To be done post-beta XXX
90 for (; bottom < top; bottom += _sp->block_size(bottom)) {
91 // As in the case of contiguous space above, we'd like to
92 // just use the value returned by oop_iterate to increment the
93 // current pointer; unfortunately, that won't work in CMS because
94 // we'd need an interface change (it seems) to have the space
95 // "adjust the object size" (for instance pad it up to its
96 // block alignment or minimum block size restrictions. XXX
97 if (_sp->block_is_obj(bottom) &&
98 !_sp->obj_allocated_since_save_marks(cast_to_oop(bottom))) {
99 cast_to_oop(bottom)->oop_iterate(_cl, mr);
100 }
101 }
102 }
103
104 // We get called with "mr" representing the dirty region
105 // that we want to process. Because of imprecise marking,
106 // we may need to extend the incoming "mr" to the right,
107 // and scan more. However, because we may already have
108 // scanned some of that extended region, we may need to
109 // trim its right-end back some so we do not scan what
110 // we (or another worker thread) may already have scanned
111 // or planning to scan.
112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
113 HeapWord* bottom = mr.start();
114 HeapWord* last = mr.last();
115 HeapWord* top = mr.end();
116 HeapWord* bottom_obj;
117 HeapWord* top_obj;
118
119 assert(_precision == CardTable::ObjHeadPreciseArray ||
120 _precision == CardTable::Precise,
121 "Only ones we deal with for now.");
122
123 assert(_precision != CardTable::ObjHeadPreciseArray ||
124 _last_bottom == NULL || top <= _last_bottom,
125 "Not decreasing");
126 NOT_PRODUCT(_last_bottom = mr.start());
127
128 bottom_obj = _sp->block_start(bottom);
129 top_obj = _sp->block_start(last);
130
131 assert(bottom_obj <= bottom, "just checking");
132 assert(top_obj <= top, "just checking");
133
134 // Given what we think is the top of the memory region and
135 // the start of the object at the top, get the actual
136 // value of the top.
137 top = get_actual_top(top, top_obj);
138
139 // If the previous call did some part of this region, don't redo.
140 if (_precision == CardTable::ObjHeadPreciseArray &&
141 _min_done != NULL &&
142 _min_done < top) {
143 top = _min_done;
144 }
145
146 // Top may have been reset, and in fact may be below bottom,
147 // e.g. the dirty card region is entirely in a now free object
148 // -- something that could happen with a concurrent sweeper.
149 bottom = MIN2(bottom, top);
150 MemRegion extended_mr = MemRegion(bottom, top);
151 assert(bottom <= top &&
152 (_precision != CardTable::ObjHeadPreciseArray ||
153 _min_done == NULL ||
154 top <= _min_done),
155 "overlap!");
156
157 // Walk the region if it is not empty; otherwise there is nothing to do.
158 if (!extended_mr.is_empty()) {
159 walk_mem_region(extended_mr, bottom_obj, top);
160 }
161
162 _min_done = bottom;
163 }
164
165 DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl,
166 CardTable::PrecisionStyle precision,
167 HeapWord* boundary) {
168 return new DirtyCardToOopClosure(this, cl, precision, boundary);
169 }
170
171 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
172 HeapWord* top_obj) {
173 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
174 if (_precision == CardTable::ObjHeadPreciseArray) {
175 if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) {
176 // An arrayOop is starting on the dirty card - since we do exact
177 // store checks for objArrays we are done.
178 } else {
179 // Otherwise, it is possible that the object starting on the dirty
180 // card spans the entire card, and that the store happened on a
181 // later card. Figure out where the object ends.
182 assert(_sp->block_size(top_obj) == (size_t) cast_to_oop(top_obj)->size(),
183 "Block size and object size mismatch");
184 top = top_obj + cast_to_oop(top_obj)->size();
185 }
186 }
187 } else {
188 top = (_sp->toContiguousSpace())->top();
189 }
190 return top;
191 }
192
193 void FilteringDCTOC::walk_mem_region(MemRegion mr,
194 HeapWord* bottom,
195 HeapWord* top) {
196 // Note that this assumption won't hold if we have a concurrent
197 // collector in this space, which may have freed up objects after
198 // they were dirtied and before the stop-the-world GC that is
199 // examining cards here.
200 assert(bottom < top, "ought to be at least one obj on a dirty card.");
201
202 if (_boundary != NULL) {
203 // We have a boundary outside of which we don't want to look
204 // at objects, so create a filtering closure around the
205 // oop closure before walking the region.
206 FilteringClosure filter(_boundary, _cl);
207 walk_mem_region_with_cl(mr, bottom, top, &filter);
208 } else {
209 // No boundary, simply walk the heap with the oop closure.
210 walk_mem_region_with_cl(mr, bottom, top, _cl);
211 }
212
213 }
214
215 // We must replicate this so that the static type of "FilteringClosure"
216 // (see above) is apparent at the oop_iterate calls.
217 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
218 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
219 HeapWord* bottom, \
220 HeapWord* top, \
221 ClosureType* cl) { \
222 bottom += cast_to_oop(bottom)->oop_iterate_size(cl, mr); \
223 if (bottom < top) { \
224 HeapWord* next_obj = bottom + cast_to_oop(bottom)->size(); \
225 while (next_obj < top) { \
226 /* Bottom lies entirely below top, so we can call the */ \
227 /* non-memRegion version of oop_iterate below. */ \
228 cast_to_oop(bottom)->oop_iterate(cl); \
229 bottom = next_obj; \
230 next_obj = bottom + cast_to_oop(bottom)->size(); \
231 } \
232 /* Last object. */ \
233 cast_to_oop(bottom)->oop_iterate(cl, mr); \
234 } \
235 }
236
237 // (There are only two of these, rather than N, because the split is due
238 // only to the introduction of the FilteringClosure, a local part of the
239 // impl of this abstraction.)
240 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
241 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
242
243 DirtyCardToOopClosure*
244 ContiguousSpace::new_dcto_cl(OopIterateClosure* cl,
245 CardTable::PrecisionStyle precision,
246 HeapWord* boundary) {
247 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
248 }
249
250 void Space::initialize(MemRegion mr,
251 bool clear_space,
252 bool mangle_space) {
253 HeapWord* bottom = mr.start();
254 HeapWord* end = mr.end();
255 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
256 "invalid space boundaries");
257 set_bottom(bottom);
258 set_end(end);
259 if (clear_space) clear(mangle_space);
260 }
261
262 void Space::clear(bool mangle_space) {
263 if (ZapUnusedHeapArea && mangle_space) {
264 mangle_unused_area();
265 }
266 }
267
268 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
269 _mangler = new GenSpaceMangler(this);
270 }
271
272 ContiguousSpace::~ContiguousSpace() {
273 delete _mangler;
274 }
275
276 void ContiguousSpace::initialize(MemRegion mr,
277 bool clear_space,
278 bool mangle_space)
279 {
280 CompactibleSpace::initialize(mr, clear_space, mangle_space);
281 }
282
283 void ContiguousSpace::clear(bool mangle_space) {
284 set_top(bottom());
285 set_saved_mark();
286 CompactibleSpace::clear(mangle_space);
287 }
288
289 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
290 return p >= _top;
291 }
292
293 void OffsetTableContigSpace::clear(bool mangle_space) {
294 ContiguousSpace::clear(mangle_space);
295 _offsets.initialize_threshold();
296 }
297
298 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
299 Space::set_bottom(new_bottom);
300 _offsets.set_bottom(new_bottom);
301 }
302
303 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
304 // Space should not advertise an increase in size
305 // until after the underlying offset table has been enlarged.
306 _offsets.resize(pointer_delta(new_end, bottom()));
307 Space::set_end(new_end);
308 }
309
310 #ifndef PRODUCT
311
312 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
313 mangler()->set_top_for_allocations(v);
314 }
315 void ContiguousSpace::set_top_for_allocations() {
316 mangler()->set_top_for_allocations(top());
317 }
318 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
319 mangler()->check_mangled_unused_area(limit);
320 }
321
322 void ContiguousSpace::check_mangled_unused_area_complete() {
323 mangler()->check_mangled_unused_area_complete();
324 }
325
326 // Mangled only the unused space that has not previously
327 // been mangled and that has not been allocated since being
328 // mangled.
329 void ContiguousSpace::mangle_unused_area() {
330 mangler()->mangle_unused_area();
331 }
332 void ContiguousSpace::mangle_unused_area_complete() {
333 mangler()->mangle_unused_area_complete();
334 }
335 #endif // NOT_PRODUCT
336
337 void CompactibleSpace::initialize(MemRegion mr,
338 bool clear_space,
339 bool mangle_space) {
340 Space::initialize(mr, clear_space, mangle_space);
341 set_compaction_top(bottom());
342 _next_compaction_space = NULL;
343 }
344
345 void CompactibleSpace::clear(bool mangle_space) {
346 Space::clear(mangle_space);
347 _compaction_top = bottom();
348 }
349
350 template <bool ALT_FWD>
351 HeapWord* CompactibleSpace::forward(oop q, size_t size,
352 CompactPoint* cp, HeapWord* compact_top) {
353 // q is alive
354 // First check if we should switch compaction space
355 assert(this == cp->space, "'this' should be current compaction space.");
356 size_t compaction_max_size = pointer_delta(end(), compact_top);
357 while (size > compaction_max_size) {
358 // switch to next compaction space
359 cp->space->set_compaction_top(compact_top);
360 cp->space = cp->space->next_compaction_space();
361 if (cp->space == NULL) {
362 cp->gen = GenCollectedHeap::heap()->young_gen();
363 assert(cp->gen != NULL, "compaction must succeed");
364 cp->space = cp->gen->first_compaction_space();
365 assert(cp->space != NULL, "generation must have a first compaction space");
366 }
367 compact_top = cp->space->bottom();
368 cp->space->set_compaction_top(compact_top);
369 cp->threshold = cp->space->initialize_threshold();
370 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
371 }
372
373 // store the forwarding pointer into the mark word
374 if (cast_from_oop<HeapWord*>(q) != compact_top) {
375 SlidingForwarding::forward_to<ALT_FWD>(q, cast_to_oop(compact_top));
376 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
377 } else {
378 // if the object isn't moving we can just set the mark to the default
379 // mark and handle it specially later on.
380 q->init_mark();
381 assert(SlidingForwarding::is_not_forwarded(q), "should not be forwarded");
382 }
383
384 compact_top += size;
385
386 // we need to update the offset table so that the beginnings of objects can be
387 // found during scavenge. Note that we are updating the offset table based on
388 // where the object will be once the compaction phase finishes.
389 if (compact_top > cp->threshold)
390 cp->threshold =
391 cp->space->cross_threshold(compact_top - size, compact_top);
392 return compact_top;
393 }
394
395 #if INCLUDE_SERIALGC
396
397 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
398 if (UseAltGCForwarding) {
399 scan_and_forward<true>(this, cp);
400 } else {
401 scan_and_forward<false>(this, cp);
402 }
403 }
404
405 void CompactibleSpace::adjust_pointers() {
406 // Check first is there is any work to do.
407 if (used() == 0) {
408 return; // Nothing to do.
409 }
410
411 if (UseAltGCForwarding) {
412 scan_and_adjust_pointers<true>(this);
413 } else {
414 scan_and_adjust_pointers<false>(this);
415 }
416 }
417
418 void CompactibleSpace::compact() {
419 if (UseAltGCForwarding) {
420 scan_and_compact<true>(this);
421 } else {
422 scan_and_compact<false>(this);
423 }
424 }
425
426 #endif // INCLUDE_SERIALGC
427
428 void Space::print_short() const { print_short_on(tty); }
429
430 void Space::print_short_on(outputStream* st) const {
431 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
432 (int) ((double) used() * 100 / capacity()));
433 }
434
435 void Space::print() const { print_on(tty); }
436
437 void Space::print_on(outputStream* st) const {
438 print_short_on(st);
439 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
440 p2i(bottom()), p2i(end()));
441 }
442
443 void ContiguousSpace::print_on(outputStream* st) const {
444 print_short_on(st);
445 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
446 p2i(bottom()), p2i(top()), p2i(end()));
447 }
448
449 void OffsetTableContigSpace::print_on(outputStream* st) const {
450 print_short_on(st);
451 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
452 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
453 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
454 }
455
456 void ContiguousSpace::verify() const {
457 HeapWord* p = bottom();
458 HeapWord* t = top();
459 HeapWord* prev_p = NULL;
460 while (p < t) {
461 oopDesc::verify(cast_to_oop(p));
462 prev_p = p;
463 p += cast_to_oop(p)->size();
464 }
465 guarantee(p == top(), "end of last object must match end of space");
466 if (top() != end()) {
467 guarantee(top() == block_start_const(end()-1) &&
468 top() == block_start_const(top()),
469 "top should be start of unallocated block, if it exists");
470 }
471 }
472
473 void Space::oop_iterate(OopIterateClosure* blk) {
474 ObjectToOopClosure blk2(blk);
475 object_iterate(&blk2);
476 }
477
478 bool Space::obj_is_alive(const HeapWord* p) const {
479 assert (block_is_obj(p), "The address should point to an object");
480 return true;
481 }
482
483 void ContiguousSpace::oop_iterate(OopIterateClosure* blk) {
484 if (is_empty()) return;
485 HeapWord* obj_addr = bottom();
486 HeapWord* t = top();
487 // Could call objects iterate, but this is easier.
488 while (obj_addr < t) {
489 obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(blk);
490 }
491 }
492
493 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
494 if (is_empty()) return;
495 object_iterate_from(bottom(), blk);
496 }
497
498 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
499 while (mark < top()) {
500 blk->do_object(cast_to_oop(mark));
501 mark += cast_to_oop(mark)->size();
502 }
503 }
504
505 // Very general, slow implementation.
506 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
507 assert(MemRegion(bottom(), end()).contains(p),
508 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
509 p2i(p), p2i(bottom()), p2i(end()));
510 if (p >= top()) {
511 return top();
512 } else {
513 HeapWord* last = bottom();
514 HeapWord* cur = last;
515 while (cur <= p) {
516 last = cur;
517 cur += cast_to_oop(cur)->size();
518 }
519 assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
520 return last;
521 }
522 }
523
524 size_t ContiguousSpace::block_size(const HeapWord* p) const {
525 assert(MemRegion(bottom(), end()).contains(p),
526 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
527 p2i(p), p2i(bottom()), p2i(end()));
528 HeapWord* current_top = top();
529 assert(p <= current_top,
530 "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
531 p2i(p), p2i(current_top));
532 assert(p == current_top || oopDesc::is_oop(cast_to_oop(p)),
533 "p (" PTR_FORMAT ") is not a block start - "
534 "current_top: " PTR_FORMAT ", is_oop: %s",
535 p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(cast_to_oop(p))));
536 if (p < current_top) {
537 return cast_to_oop(p)->size();
538 } else {
539 assert(p == current_top, "just checking");
540 return pointer_delta(end(), (HeapWord*) p);
541 }
542 }
543
544 // This version requires locking.
545 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
546 assert(Heap_lock->owned_by_self() ||
547 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
548 "not locked");
549 HeapWord* obj = top();
550 if (pointer_delta(end(), obj) >= size) {
551 HeapWord* new_top = obj + size;
552 set_top(new_top);
553 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
554 return obj;
555 } else {
556 return NULL;
557 }
558 }
559
560 // This version is lock-free.
561 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
562 do {
563 HeapWord* obj = top();
564 if (pointer_delta(end(), obj) >= size) {
565 HeapWord* new_top = obj + size;
566 HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
567 // result can be one of two:
568 // the old top value: the exchange succeeded
569 // otherwise: the new value of the top is returned.
570 if (result == obj) {
571 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
572 return obj;
573 }
574 } else {
575 return NULL;
576 }
577 } while (true);
578 }
579
580 // Requires locking.
581 HeapWord* ContiguousSpace::allocate(size_t size) {
582 return allocate_impl(size);
583 }
584
585 // Lock-free.
586 HeapWord* ContiguousSpace::par_allocate(size_t size) {
587 return par_allocate_impl(size);
588 }
589
590 void ContiguousSpace::allocate_temporary_filler(int factor) {
591 // allocate temporary type array decreasing free size with factor 'factor'
592 assert(factor >= 0, "just checking");
593 size_t size = pointer_delta(end(), top());
594
595 // if space is full, return
596 if (size == 0) return;
597
598 if (factor > 0) {
599 size -= size/factor;
600 }
601 size = align_object_size(size);
602
603 const size_t array_header_size = (arrayOopDesc::base_offset_in_bytes(T_INT) + BytesPerWord) / BytesPerWord;
604 if (size >= align_object_size(array_header_size)) {
605 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
606 // allocate uninitialized int array
607 typeArrayOop t = (typeArrayOop) cast_to_oop(allocate(size));
608 assert(t != NULL, "allocation should succeed");
609 if (UseCompactObjectHeaders) {
610 t->set_mark(Universe::intArrayKlassObj()->prototype_header());
611 } else {
612 t->set_mark(markWord::prototype());
613 t->set_klass(Universe::intArrayKlassObj());
614 }
615 t->set_length((int)length);
616 } else {
617 assert(size == CollectedHeap::min_fill_size(),
618 "size for smallest fake object doesn't match");
619 instanceOop obj = (instanceOop) cast_to_oop(allocate(size));
620 if (UseCompactObjectHeaders) {
621 obj->set_mark(vmClasses::Object_klass()->prototype_header());
622 } else {
623 obj->set_mark(markWord::prototype());
624 obj->set_klass_gap(0);
625 obj->set_klass(vmClasses::Object_klass());
626 }
627 }
628 }
629
630 HeapWord* OffsetTableContigSpace::initialize_threshold() {
631 return _offsets.initialize_threshold();
632 }
633
634 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
635 _offsets.alloc_block(start, end);
636 return _offsets.threshold();
637 }
638
639 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
640 MemRegion mr) :
641 _offsets(sharedOffsetArray, mr),
642 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
643 {
644 _offsets.set_contig_space(this);
645 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
646 }
647
648 #define OBJ_SAMPLE_INTERVAL 0
649 #define BLOCK_SAMPLE_INTERVAL 100
650
651 void OffsetTableContigSpace::verify() const {
652 HeapWord* p = bottom();
653 HeapWord* prev_p = NULL;
654 int objs = 0;
655 int blocks = 0;
656
657 if (VerifyObjectStartArray) {
658 _offsets.verify();
659 }
660
661 while (p < top()) {
662 size_t size = cast_to_oop(p)->size();
663 // For a sampling of objects in the space, find it using the
664 // block offset table.
665 if (blocks == BLOCK_SAMPLE_INTERVAL) {
666 guarantee(p == block_start_const(p + (size/2)),
667 "check offset computation");
668 blocks = 0;
669 } else {
670 blocks++;
671 }
672
673 if (objs == OBJ_SAMPLE_INTERVAL) {
674 oopDesc::verify(cast_to_oop(p));
675 objs = 0;
676 } else {
677 objs++;
678 }
679 prev_p = p;
680 p += size;
681 }
682 guarantee(p == top(), "end of last object must match end of space");
683 }
684
685
686 size_t TenuredSpace::allowed_dead_ratio() const {
687 return MarkSweepDeadRatio;
688 }