1 /*
  2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "gc/shared/blockOffsetTable.inline.hpp"
 29 #include "gc/shared/collectedHeap.inline.hpp"
 30 #include "gc/shared/genCollectedHeap.hpp"
 31 #include "gc/shared/genOopClosures.inline.hpp"
 32 #include "gc/shared/space.hpp"
 33 #include "gc/shared/space.inline.hpp"
 34 #include "gc/shared/spaceDecorator.inline.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/universe.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "runtime/atomic.hpp"
 39 #include "runtime/java.hpp"
 40 #include "runtime/prefetch.inline.hpp"
 41 #include "runtime/safepoint.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/copy.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 #include "utilities/macros.hpp"
 46 #if INCLUDE_SERIALGC
 47 #include "gc/serial/defNewGeneration.hpp"
 48 #endif
 49 
 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
 51                                                 HeapWord* top_obj) {
 52   if (top_obj != NULL) {
 53     if (_sp->block_is_obj(top_obj)) {
 54       if (_precision == CardTable::ObjHeadPreciseArray) {
 55         if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) {
 56           // An arrayOop is starting on the dirty card - since we do exact
 57           // store checks for objArrays we are done.
 58         } else {
 59           // Otherwise, it is possible that the object starting on the dirty
 60           // card spans the entire card, and that the store happened on a
 61           // later card.  Figure out where the object ends.
 62           // Use the block_size() method of the space over which
 63           // the iteration is being done.  That space (e.g. CMS) may have
 64           // specific requirements on object sizes which will
 65           // be reflected in the block_size() method.
 66           top = top_obj + cast_to_oop(top_obj)->size();
 67         }
 68       }
 69     } else {
 70       top = top_obj;
 71     }
 72   } else {
 73     assert(top == _sp->end(), "only case where top_obj == NULL");
 74   }
 75   return top;
 76 }
 77 
 78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
 79                                             HeapWord* bottom,
 80                                             HeapWord* top) {
 81   // 1. Blocks may or may not be objects.
 82   // 2. Even when a block_is_obj(), it may not entirely
 83   //    occupy the block if the block quantum is larger than
 84   //    the object size.
 85   // We can and should try to optimize by calling the non-MemRegion
 86   // version of oop_iterate() for all but the extremal objects
 87   // (for which we need to call the MemRegion version of
 88   // oop_iterate()) To be done post-beta XXX
 89   for (; bottom < top; bottom += _sp->block_size(bottom)) {
 90     // As in the case of contiguous space above, we'd like to
 91     // just use the value returned by oop_iterate to increment the
 92     // current pointer; unfortunately, that won't work in CMS because
 93     // we'd need an interface change (it seems) to have the space
 94     // "adjust the object size" (for instance pad it up to its
 95     // block alignment or minimum block size restrictions. XXX
 96     if (_sp->block_is_obj(bottom) &&
 97         !_sp->obj_allocated_since_save_marks(cast_to_oop(bottom))) {
 98       cast_to_oop(bottom)->oop_iterate(_cl, mr);
 99     }
100   }
101 }
102 
103 // We get called with "mr" representing the dirty region
104 // that we want to process. Because of imprecise marking,
105 // we may need to extend the incoming "mr" to the right,
106 // and scan more. However, because we may already have
107 // scanned some of that extended region, we may need to
108 // trim its right-end back some so we do not scan what
109 // we (or another worker thread) may already have scanned
110 // or planning to scan.
111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
112   HeapWord* bottom = mr.start();
113   HeapWord* last = mr.last();
114   HeapWord* top = mr.end();
115   HeapWord* bottom_obj;
116   HeapWord* top_obj;
117 
118   assert(_precision == CardTable::ObjHeadPreciseArray ||
119          _precision == CardTable::Precise,
120          "Only ones we deal with for now.");
121 
122   assert(_precision != CardTable::ObjHeadPreciseArray ||
123          _last_bottom == NULL || top <= _last_bottom,
124          "Not decreasing");
125   NOT_PRODUCT(_last_bottom = mr.start());
126 
127   bottom_obj = _sp->block_start(bottom);
128   top_obj    = _sp->block_start(last);
129 
130   assert(bottom_obj <= bottom, "just checking");
131   assert(top_obj    <= top,    "just checking");
132 
133   // Given what we think is the top of the memory region and
134   // the start of the object at the top, get the actual
135   // value of the top.
136   top = get_actual_top(top, top_obj);
137 
138   // If the previous call did some part of this region, don't redo.
139   if (_precision == CardTable::ObjHeadPreciseArray &&
140       _min_done != NULL &&
141       _min_done < top) {
142     top = _min_done;
143   }
144 
145   // Top may have been reset, and in fact may be below bottom,
146   // e.g. the dirty card region is entirely in a now free object
147   // -- something that could happen with a concurrent sweeper.
148   bottom = MIN2(bottom, top);
149   MemRegion extended_mr = MemRegion(bottom, top);
150   assert(bottom <= top &&
151          (_precision != CardTable::ObjHeadPreciseArray ||
152           _min_done == NULL ||
153           top <= _min_done),
154          "overlap!");
155 
156   // Walk the region if it is not empty; otherwise there is nothing to do.
157   if (!extended_mr.is_empty()) {
158     walk_mem_region(extended_mr, bottom_obj, top);
159   }
160 
161   _min_done = bottom;
162 }
163 
164 DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl,
165                                           CardTable::PrecisionStyle precision,
166                                           HeapWord* boundary) {
167   return new DirtyCardToOopClosure(this, cl, precision, boundary);
168 }
169 
170 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
171                                                HeapWord* top_obj) {
172   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
173     if (_precision == CardTable::ObjHeadPreciseArray) {
174       if (cast_to_oop(top_obj)->is_objArray() || cast_to_oop(top_obj)->is_typeArray()) {
175         // An arrayOop is starting on the dirty card - since we do exact
176         // store checks for objArrays we are done.
177       } else {
178         // Otherwise, it is possible that the object starting on the dirty
179         // card spans the entire card, and that the store happened on a
180         // later card.  Figure out where the object ends.
181         assert(_sp->block_size(top_obj) == (size_t) cast_to_oop(top_obj)->size(),
182           "Block size and object size mismatch");
183         top = top_obj + cast_to_oop(top_obj)->size();
184       }
185     }
186   } else {
187     top = (_sp->toContiguousSpace())->top();
188   }
189   return top;
190 }
191 
192 void FilteringDCTOC::walk_mem_region(MemRegion mr,
193                                      HeapWord* bottom,
194                                      HeapWord* top) {
195   // Note that this assumption won't hold if we have a concurrent
196   // collector in this space, which may have freed up objects after
197   // they were dirtied and before the stop-the-world GC that is
198   // examining cards here.
199   assert(bottom < top, "ought to be at least one obj on a dirty card.");
200 
201   if (_boundary != NULL) {
202     // We have a boundary outside of which we don't want to look
203     // at objects, so create a filtering closure around the
204     // oop closure before walking the region.
205     FilteringClosure filter(_boundary, _cl);
206     walk_mem_region_with_cl(mr, bottom, top, &filter);
207   } else {
208     // No boundary, simply walk the heap with the oop closure.
209     walk_mem_region_with_cl(mr, bottom, top, _cl);
210   }
211 
212 }
213 
214 // We must replicate this so that the static type of "FilteringClosure"
215 // (see above) is apparent at the oop_iterate calls.
216 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
217 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
218                                                    HeapWord* bottom,    \
219                                                    HeapWord* top,       \
220                                                    ClosureType* cl) {   \
221   bottom += cast_to_oop(bottom)->oop_iterate_size(cl, mr);              \
222   if (bottom < top) {                                                   \
223     HeapWord* next_obj = bottom + cast_to_oop(bottom)->size();          \
224     while (next_obj < top) {                                            \
225       /* Bottom lies entirely below top, so we can call the */          \
226       /* non-memRegion version of oop_iterate below. */                 \
227       cast_to_oop(bottom)->oop_iterate(cl);                             \
228       bottom = next_obj;                                                \
229       next_obj = bottom + cast_to_oop(bottom)->size();                  \
230     }                                                                   \
231     /* Last object. */                                                  \
232     cast_to_oop(bottom)->oop_iterate(cl, mr);                           \
233   }                                                                     \
234 }
235 
236 // (There are only two of these, rather than N, because the split is due
237 // only to the introduction of the FilteringClosure, a local part of the
238 // impl of this abstraction.)
239 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
240 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
241 
242 DirtyCardToOopClosure*
243 ContiguousSpace::new_dcto_cl(OopIterateClosure* cl,
244                              CardTable::PrecisionStyle precision,
245                              HeapWord* boundary) {
246   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
247 }
248 
249 void Space::initialize(MemRegion mr,
250                        bool clear_space,
251                        bool mangle_space) {
252   HeapWord* bottom = mr.start();
253   HeapWord* end    = mr.end();
254   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
255          "invalid space boundaries");
256   set_bottom(bottom);
257   set_end(end);
258   if (clear_space) clear(mangle_space);
259 }
260 
261 void Space::clear(bool mangle_space) {
262   if (ZapUnusedHeapArea && mangle_space) {
263     mangle_unused_area();
264   }
265 }
266 
267 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
268   _mangler = new GenSpaceMangler(this);
269 }
270 
271 ContiguousSpace::~ContiguousSpace() {
272   delete _mangler;
273 }
274 
275 void ContiguousSpace::initialize(MemRegion mr,
276                                  bool clear_space,
277                                  bool mangle_space)
278 {
279   CompactibleSpace::initialize(mr, clear_space, mangle_space);
280 }
281 
282 void ContiguousSpace::clear(bool mangle_space) {
283   set_top(bottom());
284   set_saved_mark();
285   CompactibleSpace::clear(mangle_space);
286 }
287 
288 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
289   return p >= _top;
290 }
291 
292 void OffsetTableContigSpace::clear(bool mangle_space) {
293   ContiguousSpace::clear(mangle_space);
294   _offsets.initialize_threshold();
295 }
296 
297 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
298   Space::set_bottom(new_bottom);
299   _offsets.set_bottom(new_bottom);
300 }
301 
302 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
303   // Space should not advertise an increase in size
304   // until after the underlying offset table has been enlarged.
305   _offsets.resize(pointer_delta(new_end, bottom()));
306   Space::set_end(new_end);
307 }
308 
309 #ifndef PRODUCT
310 
311 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
312   mangler()->set_top_for_allocations(v);
313 }
314 void ContiguousSpace::set_top_for_allocations() {
315   mangler()->set_top_for_allocations(top());
316 }
317 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
318   mangler()->check_mangled_unused_area(limit);
319 }
320 
321 void ContiguousSpace::check_mangled_unused_area_complete() {
322   mangler()->check_mangled_unused_area_complete();
323 }
324 
325 // Mangled only the unused space that has not previously
326 // been mangled and that has not been allocated since being
327 // mangled.
328 void ContiguousSpace::mangle_unused_area() {
329   mangler()->mangle_unused_area();
330 }
331 void ContiguousSpace::mangle_unused_area_complete() {
332   mangler()->mangle_unused_area_complete();
333 }
334 #endif  // NOT_PRODUCT
335 
336 void CompactibleSpace::initialize(MemRegion mr,
337                                   bool clear_space,
338                                   bool mangle_space) {
339   Space::initialize(mr, clear_space, mangle_space);
340   set_compaction_top(bottom());
341   _next_compaction_space = NULL;
342 }
343 
344 void CompactibleSpace::clear(bool mangle_space) {
345   Space::clear(mangle_space);
346   _compaction_top = bottom();
347 }
348 
349 HeapWord* CompactibleSpace::forward(oop q, size_t size,
350                                     CompactPoint* cp, HeapWord* compact_top) {
351   // q is alive
352   // First check if we should switch compaction space
353   assert(this == cp->space, "'this' should be current compaction space.");
354   size_t compaction_max_size = pointer_delta(end(), compact_top);
355   while (size > compaction_max_size) {
356     // switch to next compaction space
357     cp->space->set_compaction_top(compact_top);
358     cp->space = cp->space->next_compaction_space();
359     if (cp->space == NULL) {
360       cp->gen = GenCollectedHeap::heap()->young_gen();
361       assert(cp->gen != NULL, "compaction must succeed");
362       cp->space = cp->gen->first_compaction_space();
363       assert(cp->space != NULL, "generation must have a first compaction space");
364     }
365     compact_top = cp->space->bottom();
366     cp->space->set_compaction_top(compact_top);
367     cp->threshold = cp->space->initialize_threshold();
368     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
369   }
370 
371   // store the forwarding pointer into the mark word
372   if (cast_from_oop<HeapWord*>(q) != compact_top) {
373     q->forward_to(cast_to_oop(compact_top));
374     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
375   } else {
376     // if the object isn't moving we can just set the mark to the default
377     // mark and handle it specially later on.
378     q->init_mark();
379     assert(q->forwardee() == NULL, "should be forwarded to NULL");
380   }
381 
382   compact_top += size;
383 
384   // we need to update the offset table so that the beginnings of objects can be
385   // found during scavenge.  Note that we are updating the offset table based on
386   // where the object will be once the compaction phase finishes.
387   if (compact_top > cp->threshold)
388     cp->threshold =
389       cp->space->cross_threshold(compact_top - size, compact_top);
390   return compact_top;
391 }
392 
393 #if INCLUDE_SERIALGC
394 
395 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
396   scan_and_forward(this, cp);
397 }
398 
399 void CompactibleSpace::adjust_pointers() {
400   // Check first is there is any work to do.
401   if (used() == 0) {
402     return;   // Nothing to do.
403   }
404 
405   scan_and_adjust_pointers(this);
406 }
407 
408 void CompactibleSpace::compact() {
409   scan_and_compact(this);
410 }
411 
412 #endif // INCLUDE_SERIALGC
413 
414 void Space::print_short() const { print_short_on(tty); }
415 
416 void Space::print_short_on(outputStream* st) const {
417   st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
418               (int) ((double) used() * 100 / capacity()));
419 }
420 
421 void Space::print() const { print_on(tty); }
422 
423 void Space::print_on(outputStream* st) const {
424   print_short_on(st);
425   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
426                 p2i(bottom()), p2i(end()));
427 }
428 
429 void ContiguousSpace::print_on(outputStream* st) const {
430   print_short_on(st);
431   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
432                 p2i(bottom()), p2i(top()), p2i(end()));
433 }
434 
435 void OffsetTableContigSpace::print_on(outputStream* st) const {
436   print_short_on(st);
437   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
438                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
439               p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
440 }
441 
442 void ContiguousSpace::verify() const {
443   HeapWord* p = bottom();
444   HeapWord* t = top();
445   HeapWord* prev_p = NULL;
446   while (p < t) {
447     oopDesc::verify(cast_to_oop(p));
448     prev_p = p;
449     p += cast_to_oop(p)->size();
450   }
451   guarantee(p == top(), "end of last object must match end of space");
452   if (top() != end()) {
453     guarantee(top() == block_start_const(end()-1) &&
454               top() == block_start_const(top()),
455               "top should be start of unallocated block, if it exists");
456   }
457 }
458 
459 void Space::oop_iterate(OopIterateClosure* blk) {
460   ObjectToOopClosure blk2(blk);
461   object_iterate(&blk2);
462 }
463 
464 bool Space::obj_is_alive(const HeapWord* p) const {
465   assert (block_is_obj(p), "The address should point to an object");
466   return true;
467 }
468 
469 void ContiguousSpace::oop_iterate(OopIterateClosure* blk) {
470   if (is_empty()) return;
471   HeapWord* obj_addr = bottom();
472   HeapWord* t = top();
473   // Could call objects iterate, but this is easier.
474   while (obj_addr < t) {
475     obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(blk);
476   }
477 }
478 
479 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
480   if (is_empty()) return;
481   object_iterate_from(bottom(), blk);
482 }
483 
484 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
485   while (mark < top()) {
486     blk->do_object(cast_to_oop(mark));
487     mark += cast_to_oop(mark)->size();
488   }
489 }
490 
491 // Very general, slow implementation.
492 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
493   assert(MemRegion(bottom(), end()).contains(p),
494          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
495          p2i(p), p2i(bottom()), p2i(end()));
496   if (p >= top()) {
497     return top();
498   } else {
499     HeapWord* last = bottom();
500     HeapWord* cur = last;
501     while (cur <= p) {
502       last = cur;
503       cur += cast_to_oop(cur)->size();
504     }
505     assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
506     return last;
507   }
508 }
509 
510 size_t ContiguousSpace::block_size(const HeapWord* p) const {
511   assert(MemRegion(bottom(), end()).contains(p),
512          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
513          p2i(p), p2i(bottom()), p2i(end()));
514   HeapWord* current_top = top();
515   assert(p <= current_top,
516          "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
517          p2i(p), p2i(current_top));
518   assert(p == current_top || oopDesc::is_oop(cast_to_oop(p)),
519          "p (" PTR_FORMAT ") is not a block start - "
520          "current_top: " PTR_FORMAT ", is_oop: %s",
521          p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(cast_to_oop(p))));
522   if (p < current_top) {
523     return cast_to_oop(p)->size();
524   } else {
525     assert(p == current_top, "just checking");
526     return pointer_delta(end(), (HeapWord*) p);
527   }
528 }
529 
530 // This version requires locking.
531 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
532   assert(Heap_lock->owned_by_self() ||
533          (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
534          "not locked");
535   HeapWord* obj = top();
536   if (pointer_delta(end(), obj) >= size) {
537     HeapWord* new_top = obj + size;
538     set_top(new_top);
539     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
540     return obj;
541   } else {
542     return NULL;
543   }
544 }
545 
546 // This version is lock-free.
547 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
548   do {
549     HeapWord* obj = top();
550     if (pointer_delta(end(), obj) >= size) {
551       HeapWord* new_top = obj + size;
552       HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
553       // result can be one of two:
554       //  the old top value: the exchange succeeded
555       //  otherwise: the new value of the top is returned.
556       if (result == obj) {
557         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
558         return obj;
559       }
560     } else {
561       return NULL;
562     }
563   } while (true);
564 }
565 
566 // Requires locking.
567 HeapWord* ContiguousSpace::allocate(size_t size) {
568   return allocate_impl(size);
569 }
570 
571 // Lock-free.
572 HeapWord* ContiguousSpace::par_allocate(size_t size) {
573   return par_allocate_impl(size);
574 }
575 
576 void ContiguousSpace::allocate_temporary_filler(int factor) {
577   // allocate temporary type array decreasing free size with factor 'factor'
578   assert(factor >= 0, "just checking");
579   size_t size = pointer_delta(end(), top());
580 
581   // if space is full, return
582   if (size == 0) return;
583 
584   if (factor > 0) {
585     size -= size/factor;
586   }
587   size = align_object_size(size);
588 
589   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
590   if (size >= align_object_size(array_header_size)) {
591     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
592     // allocate uninitialized int array
593     typeArrayOop t = (typeArrayOop) cast_to_oop(allocate(size));
594     assert(t != NULL, "allocation should succeed");
595     t->set_mark(markWord::prototype());
596     t->set_klass(Universe::intArrayKlassObj());
597     t->set_length((int)length);
598   } else {
599     assert(size == CollectedHeap::min_fill_size(),
600            "size for smallest fake object doesn't match");
601     instanceOop obj = (instanceOop) cast_to_oop(allocate(size));
602     obj->set_mark(markWord::prototype());
603     obj->set_klass_gap(0);
604     obj->set_klass(vmClasses::Object_klass());
605   }
606 }
607 
608 HeapWord* OffsetTableContigSpace::initialize_threshold() {
609   return _offsets.initialize_threshold();
610 }
611 
612 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
613   _offsets.alloc_block(start, end);
614   return _offsets.threshold();
615 }
616 
617 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
618                                                MemRegion mr) :
619   _offsets(sharedOffsetArray, mr),
620   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
621 {
622   _offsets.set_contig_space(this);
623   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
624 }
625 
626 #define OBJ_SAMPLE_INTERVAL 0
627 #define BLOCK_SAMPLE_INTERVAL 100
628 
629 void OffsetTableContigSpace::verify() const {
630   HeapWord* p = bottom();
631   HeapWord* prev_p = NULL;
632   int objs = 0;
633   int blocks = 0;
634 
635   if (VerifyObjectStartArray) {
636     _offsets.verify();
637   }
638 
639   while (p < top()) {
640     size_t size = cast_to_oop(p)->size();
641     // For a sampling of objects in the space, find it using the
642     // block offset table.
643     if (blocks == BLOCK_SAMPLE_INTERVAL) {
644       guarantee(p == block_start_const(p + (size/2)),
645                 "check offset computation");
646       blocks = 0;
647     } else {
648       blocks++;
649     }
650 
651     if (objs == OBJ_SAMPLE_INTERVAL) {
652       oopDesc::verify(cast_to_oop(p));
653       objs = 0;
654     } else {
655       objs++;
656     }
657     prev_p = p;
658     p += size;
659   }
660   guarantee(p == top(), "end of last object must match end of space");
661 }
662 
663 
664 size_t TenuredSpace::allowed_dead_ratio() const {
665   return MarkSweepDeadRatio;
666 }