< prev index next >

src/hotspot/share/gc/parallel/mutableSpace.cpp

Print this page

216 }
217 
218 // Only used by oldgen allocation.
219 bool MutableSpace::needs_expand(size_t word_size) const {
220   assert_lock_strong(PSOldGenExpand_lock);
221   // Holding the lock means end is stable.  So while top may be advancing
222   // via concurrent allocations, there is no need to order the reads of top
223   // and end here, unlike in cas_allocate.
224   return pointer_delta(end(), top()) < word_size;
225 }
226 
227 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
228   HeapWord* obj_addr = bottom();
229   HeapWord* t = top();
230   // Could call objects iterate, but this is easier.
231   while (obj_addr < t) {
232     obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
233   }
234 }
235 
236 void MutableSpace::object_iterate(ObjectClosure* cl) {

237   HeapWord* p = bottom();
238   while (p < top()) {
239     oop obj = cast_to_oop(p);
240     // When promotion-failure occurs during Young GC, eden/from space is not cleared,
241     // so we can encounter objects with "forwarded" markword.
242     // They are essentially dead, so skipping them
243     if (!obj->is_forwarded()) {
244       cl->do_object(obj);
245     }
246 #ifdef ASSERT
247     else {
248       assert(obj->forwardee() != obj, "must not be self-forwarded");








249     }
250 #endif
251     p += obj->size();






252   }
253 }
254 
255 void MutableSpace::print_short() const { print_short_on(tty); }
256 void MutableSpace::print_short_on( outputStream* st) const {
257   st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
258             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
259 }
260 
261 void MutableSpace::print() const { print_on(tty); }
262 void MutableSpace::print_on(outputStream* st) const {
263   MutableSpace::print_short_on(st);
264   st->print_cr(" [" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT ")",
265                  p2i(bottom()), p2i(top()), p2i(end()));
266 }
267 
268 void MutableSpace::verify() {
269   HeapWord* p = bottom();
270   HeapWord* t = top();
271   while (p < t) {

216 }
217 
218 // Only used by oldgen allocation.
219 bool MutableSpace::needs_expand(size_t word_size) const {
220   assert_lock_strong(PSOldGenExpand_lock);
221   // Holding the lock means end is stable.  So while top may be advancing
222   // via concurrent allocations, there is no need to order the reads of top
223   // and end here, unlike in cas_allocate.
224   return pointer_delta(end(), top()) < word_size;
225 }
226 
227 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
228   HeapWord* obj_addr = bottom();
229   HeapWord* t = top();
230   // Could call objects iterate, but this is easier.
231   while (obj_addr < t) {
232     obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
233   }
234 }
235 
236 template<bool COMPACT_HEADERS>
237 void MutableSpace::object_iterate_impl(ObjectClosure* cl) {
238   HeapWord* p = bottom();
239   while (p < top()) {
240     oop obj = cast_to_oop(p);
241     // When promotion-failure occurs during Young GC, eden/from space is not cleared,
242     // so we can encounter objects with "forwarded" markword.
243     // They are essentially dead, so skipping them
244     if (!obj->is_forwarded()) {
245       cl->do_object(obj);
246       p += obj->size();
247     } else {

248       assert(obj->forwardee() != obj, "must not be self-forwarded");
249       if (COMPACT_HEADERS) {
250         // It is safe to use the forwardee here. Parallel GC only uses
251         // header-based forwarding during promotion. Full GC doesn't
252         // use the object header for forwarding at all.
253         p += obj->forwardee()->size();
254       } else {
255         p += obj->size();
256       }
257     }
258   }
259 }
260 
261 void MutableSpace::object_iterate(ObjectClosure* cl) {
262   if (UseCompactObjectHeaders) {
263     object_iterate_impl<true>(cl);
264   } else {
265     object_iterate_impl<false>(cl);
266   }
267 }
268 
269 void MutableSpace::print_short() const { print_short_on(tty); }
270 void MutableSpace::print_short_on( outputStream* st) const {
271   st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
272             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
273 }
274 
275 void MutableSpace::print() const { print_on(tty); }
276 void MutableSpace::print_on(outputStream* st) const {
277   MutableSpace::print_short_on(st);
278   st->print_cr(" [" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT ")",
279                  p2i(bottom()), p2i(top()), p2i(end()));
280 }
281 
282 void MutableSpace::verify() {
283   HeapWord* p = bottom();
284   HeapWord* t = top();
285   while (p < t) {
< prev index next >