216 }
217
218 // Only used by oldgen allocation.
219 bool MutableSpace::needs_expand(size_t word_size) const {
220 assert_lock_strong(ExpandHeap_lock);
221 // Holding the lock means end is stable. So while top may be advancing
222 // via concurrent allocations, there is no need to order the reads of top
223 // and end here, unlike in cas_allocate.
224 return pointer_delta(end(), top()) < word_size;
225 }
226
227 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
228 HeapWord* obj_addr = bottom();
229 HeapWord* t = top();
230 // Could call objects iterate, but this is easier.
231 while (obj_addr < t) {
232 obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
233 }
234 }
235
236 void MutableSpace::object_iterate(ObjectClosure* cl) {
237 HeapWord* p = bottom();
238 while (p < top()) {
239 cl->do_object(cast_to_oop(p));
240 p += cast_to_oop(p)->size();
241 }
242 }
243
244 void MutableSpace::print_short() const { print_short_on(tty); }
245 void MutableSpace::print_short_on( outputStream* st) const {
246 st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
247 (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
248 }
249
250 void MutableSpace::print() const { print_on(tty); }
251 void MutableSpace::print_on(outputStream* st) const {
252 MutableSpace::print_short_on(st);
253 st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
254 p2i(bottom()), p2i(top()), p2i(end()));
255 }
256
257 void MutableSpace::verify() {
258 HeapWord* p = bottom();
259 HeapWord* t = top();
260 HeapWord* prev_p = NULL;
|
216 }
217
218 // Only used by oldgen allocation.
219 bool MutableSpace::needs_expand(size_t word_size) const {
220 assert_lock_strong(ExpandHeap_lock);
221 // Holding the lock means end is stable. So while top may be advancing
222 // via concurrent allocations, there is no need to order the reads of top
223 // and end here, unlike in cas_allocate.
224 return pointer_delta(end(), top()) < word_size;
225 }
226
227 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
228 HeapWord* obj_addr = bottom();
229 HeapWord* t = top();
230 // Could call objects iterate, but this is easier.
231 while (obj_addr < t) {
232 obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
233 }
234 }
235
236 template<bool COMPACT_HEADERS>
237 void MutableSpace::object_iterate_impl(ObjectClosure* cl) {
238 HeapWord* p = bottom();
239 while (p < top()) {
240 oop obj = cast_to_oop(p);
241 // When promotion-failure occurs during Young GC, eden/from space is not cleared,
242 // so we can encounter objects with "forwarded" markword.
243 // They are essentially dead, so skipping them
244 if (!obj->is_forwarded()) {
245 cl->do_object(obj);
246 p += obj->size();
247 } else {
248 assert(obj->forwardee() != obj, "must not be self-forwarded");
249 if (COMPACT_HEADERS) {
250 // It is safe to use the forwardee here. Parallel GC only uses
251 // header-based forwarding during promotion. Full GC doesn't
252 // use the object header for forwarding at all.
253 p += obj->forwardee()->size();
254 } else {
255 p += obj->size();
256 }
257 }
258 }
259 }
260
261 void MutableSpace::object_iterate(ObjectClosure* cl) {
262 if (UseCompactObjectHeaders) {
263 object_iterate_impl<true>(cl);
264 } else {
265 object_iterate_impl<false>(cl);
266 }
267 }
268
269 void MutableSpace::print_short() const { print_short_on(tty); }
270 void MutableSpace::print_short_on( outputStream* st) const {
271 st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
272 (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
273 }
274
275 void MutableSpace::print() const { print_on(tty); }
276 void MutableSpace::print_on(outputStream* st) const {
277 MutableSpace::print_short_on(st);
278 st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
279 p2i(bottom()), p2i(top()), p2i(end()));
280 }
281
282 void MutableSpace::verify() {
283 HeapWord* p = bottom();
284 HeapWord* t = top();
285 HeapWord* prev_p = NULL;
|