< prev index next >

src/hotspot/share/oops/oop.inline.hpp

Print this page

 42 #include "utilities/align.hpp"
 43 #include "utilities/debug.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 #include "utilities/macros.hpp"
 46 
 47 // Implementation of all inlined member functions defined in oop.hpp
 48 // We need a separate file to avoid circular references
 49 
 50 void* oopDesc::base_addr() { return this; }
 51 const void* oopDesc::base_addr() const { return this; }
 52 
 53 markWord oopDesc::mark() const {
 54   return AtomicAccess::load(&_mark);
 55 }
 56 
 57 markWord oopDesc::mark_acquire() const {
 58   return AtomicAccess::load_acquire(&_mark);
 59 }
 60 
 61 void oopDesc::set_mark(markWord m) {








 62   AtomicAccess::store(&_mark, m);
 63 }
 64 
 65 void oopDesc::set_mark(HeapWord* mem, markWord m) {
 66   *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;




 67 }
 68 
 69 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
 70   AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);




 71 }
 72 
 73 void oopDesc::release_set_mark(markWord m) {
 74   AtomicAccess::release_store(&_mark, m);




 75 }
 76 
 77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
 78   return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark);
 79 }
 80 
 81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
 82   return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order);
 83 }
 84 
 85 markWord oopDesc::prototype_mark() const {
 86   if (UseCompactObjectHeaders) {
 87     return klass()->prototype_header();
 88   } else {
 89     return markWord::prototype();
 90   }
 91 }
 92 
 93 void oopDesc::init_mark() {
 94   set_mark(prototype_mark());






 95 }
 96 
 97 Klass* oopDesc::klass() const {
 98   switch (ObjLayout::klass_mode()) {
 99     case ObjLayout::Compact:
100       return mark().klass();
101     case ObjLayout::Compressed:
102       return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
103     default:
104       return _metadata._klass;
105   }
106 }
107 
108 Klass* oopDesc::klass_or_null() const {
109   switch (ObjLayout::klass_mode()) {
110     case ObjLayout::Compact:
111       return mark().klass_or_null();
112     case ObjLayout::Compressed:
113       return CompressedKlassPointers::decode(_metadata._compressed_klass);
114     default:

165   assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
166   assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
167   char* raw_mem = ((char*)mem + klass_offset_in_bytes());
168   if (UseCompressedClassPointers) {
169     AtomicAccess::release_store((narrowKlass*)raw_mem,
170                           CompressedKlassPointers::encode_not_null(k));
171   } else {
172     AtomicAccess::release_store((Klass**)raw_mem, k);
173   }
174 }
175 
176 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
177   assert(has_klass_gap(), "precondition");
178   *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
179 }
180 
181 bool oopDesc::is_a(Klass* k) const {
182   return klass()->is_subtype_of(k);
183 }
184 









































185 size_t oopDesc::size()  {
186   return size_given_klass(klass());
187 }
188 
189 size_t oopDesc::size_given_klass(Klass* klass)  {
190   int lh = klass->layout_helper();
191   size_t s;
192 
193   // lh is now a value computed at class initialization that may hint
194   // at the size.  For instances, this is positive and equal to the
195   // size.  For arrays, this is negative and provides log2 of the
196   // array element size.  For other oops, it is zero and thus requires
197   // a virtual call.
198   //
199   // We go to all this trouble because the size computation is at the
200   // heart of phase 2 of mark-compaction, and called for every object,
201   // alive or dead.  So the speed here is equal in importance to the
202   // speed of allocation.
203 
204   if (lh > Klass::_lh_neutral_value) {
205     if (!Klass::layout_helper_needs_slow_path(lh)) {
206       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
207     } else {
208       s = klass->oop_size(this);
209     }
210   } else if (lh <= Klass::_lh_neutral_value) {
211     // The most common case is instances; fall through if so.
212     if (lh < Klass::_lh_neutral_value) {
213       // Second most common case is arrays.  We have to fetch the
214       // length of the array, shift (multiply) it appropriately,
215       // up to wordSize, add the header, and align to object size.
216       size_t size_in_bytes;
217       size_t array_length = (size_t) ((arrayOop)this)->length();







218       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
219       size_in_bytes += Klass::layout_helper_header_size(lh);
220 
221       // This code could be simplified, but by keeping array_header_in_bytes
222       // in units of bytes and doing it this way we can round up just once,
223       // skipping the intermediate round to HeapWordSize.
224       s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
225 
226       assert(s == klass->oop_size(this), "wrong array object size");




227     } else {
228       // Must be zero, so bite the bullet and take the virtual call.
229       s = klass->oop_size(this);
230     }
231   }
232 
233   assert(s > 0, "Oop size must be greater than zero, not %zu", s);
234   assert(is_object_aligned(s), "Oop size is not properly aligned: %zu", s);
235   return s;
236 }
237 
238 bool oopDesc::is_instance()    const { return klass()->is_instance_klass();             }
239 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass();   }
240 bool oopDesc::is_stackChunk()  const { return klass()->is_stack_chunk_instance_klass(); }
241 bool oopDesc::is_array()       const { return klass()->is_array_klass();                }
242 bool oopDesc::is_objArray()    const { return klass()->is_objArray_klass();             }
243 bool oopDesc::is_typeArray()   const { return klass()->is_typeArray_klass();            }
244 
245 template<typename T>
246 T*       oopDesc::field_addr(int offset)     const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
247 
248 template <typename T>
249 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }

292 }
293 
294 bool oopDesc::is_gc_marked() const {
295   return mark().is_marked();
296 }
297 
298 // Used by scavengers
299 bool oopDesc::is_forwarded() const {
300   return mark().is_forwarded();
301 }
302 
303 bool oopDesc::is_self_forwarded() const {
304   return mark().is_self_forwarded();
305 }
306 
307 // Used by scavengers
308 void oopDesc::forward_to(oop p) {
309   assert(cast_from_oop<oopDesc*>(p) != this,
310          "must not be used for self-forwarding, use forward_to_self() instead");
311   markWord m = markWord::encode_pointer_as_mark(p);



312   assert(m.decode_pointer() == p, "encoding must be reversible");
313   set_mark(m);
314 }
315 
316 void oopDesc::forward_to_self() {
317   set_mark(mark().set_self_forwarded());
318 }
319 
















320 oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) {
321   markWord old_mark = cas_set_mark(new_mark, compare, order);
322   if (old_mark == compare) {
323     return nullptr;
324   } else {
325     assert(old_mark.is_forwarded(), "must be forwarded here");
326     return forwardee(old_mark);
327   }
328 }
329 
330 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
331   assert(cast_from_oop<oopDesc*>(p) != this,
332          "must not be used for self-forwarding, use forward_to_self_atomic() instead");
333   markWord m = markWord::encode_pointer_as_mark(p);
334   assert(forwardee(m) == p, "encoding must be reversible");
335   return cas_set_forwardee(m, compare, order);
336 }
337 
338 oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) {
339   markWord new_mark = old_mark.set_self_forwarded();

378   if (m.has_displaced_mark_helper()) {
379     m.set_displaced_mark_helper(m.displaced_mark_helper().incr_age());
380   } else {
381     set_mark(m.incr_age());
382   }
383 }
384 
385 template <typename OopClosureType>
386 void oopDesc::oop_iterate(OopClosureType* cl) {
387   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
388 }
389 
390 template <typename OopClosureType>
391 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
392   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
393 }
394 
395 template <typename OopClosureType>
396 size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
397   Klass* k = klass();
398   size_t size = size_given_klass(k);
399   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
400   return size;
401 }
402 
403 template <typename OopClosureType>
404 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
405   Klass* k = klass();
406   size_t size = size_given_klass(k);
407   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
408   return size;
409 }
410 
411 template <typename OopClosureType>
412 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
413   oop_iterate_backwards(cl, klass());
414 }
415 
416 template <typename OopClosureType>
417 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
418   // In this assert, we cannot safely access the Klass* with compact headers.
419   assert(k == klass(), "wrong klass");
420   OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
421 }
422 
423 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
424   return obj == nullptr || obj->klass()->is_subtype_of(klass);
425 }
426 
427 intptr_t oopDesc::identity_hash() {
428   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
429   // Note: The mark must be read into local variable to avoid concurrent updates.
430   markWord mrk = mark();
431   if (mrk.is_unlocked() && !mrk.has_no_hash()) {
432     return mrk.hash();
433   } else if (mrk.is_marked()) {
434     return mrk.hash();


435   } else {
436     return slow_identity_hash();






437   }

438 }
439 
440 // This checks fast simple case of whether the oop has_no_hash,
441 // to optimize JVMTI table lookup.
442 bool oopDesc::fast_no_hash_check() {
443   markWord mrk = mark_acquire();
444   assert(!mrk.is_marked(), "should never be marked");
445   return mrk.is_unlocked() && mrk.has_no_hash();
446 }
447 
448 bool oopDesc::has_displaced_mark() const {
449   return mark().has_displaced_mark_helper();
450 }
451 
452 markWord oopDesc::displaced_mark() const {
453   return mark().displaced_mark_helper();
454 }
455 
456 void oopDesc::set_displaced_mark(markWord m) {
457   mark().set_displaced_mark_helper(m);
458 }
459 
460 bool oopDesc::mark_must_be_preserved() const {
461   return mark_must_be_preserved(mark());
462 }
463 
464 bool oopDesc::mark_must_be_preserved(markWord m) const {
465   return m.must_be_preserved();
466 }
467 












468 #endif // SHARE_OOPS_OOP_INLINE_HPP

 42 #include "utilities/align.hpp"
 43 #include "utilities/debug.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 #include "utilities/macros.hpp"
 46 
 47 // Implementation of all inlined member functions defined in oop.hpp
 48 // We need a separate file to avoid circular references
 49 
 50 void* oopDesc::base_addr() { return this; }
 51 const void* oopDesc::base_addr() const { return this; }
 52 
 53 markWord oopDesc::mark() const {
 54   return AtomicAccess::load(&_mark);
 55 }
 56 
 57 markWord oopDesc::mark_acquire() const {
 58   return AtomicAccess::load_acquire(&_mark);
 59 }
 60 
 61 void oopDesc::set_mark(markWord m) {
 62   if (UseCompactObjectHeaders) {
 63     AtomicAccess::store(reinterpret_cast<uint32_t volatile*>(&_mark), m.value32());
 64   } else {
 65     AtomicAccess::store(&_mark, m);
 66   }
 67 }
 68 
 69 void oopDesc::set_mark_full(markWord m) {
 70   AtomicAccess::store(&_mark, m);
 71 }
 72 
 73 void oopDesc::set_mark(HeapWord* mem, markWord m) {
 74   if (UseCompactObjectHeaders) {
 75     *(uint32_t*)(((char*)mem) + mark_offset_in_bytes()) = m.value32();
 76   } else {
 77     *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
 78   }
 79 }
 80 
 81 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
 82   if (UseCompactObjectHeaders) {
 83     AtomicAccess::release_store((uint32_t*)(((char*)mem) + mark_offset_in_bytes()), m.value32());
 84   } else {
 85     AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
 86   }
 87 }
 88 
 89 void oopDesc::release_set_mark(markWord m) {
 90   if (UseCompactObjectHeaders) {
 91     AtomicAccess::release_store(reinterpret_cast<uint32_t volatile*>(&_mark), m.value32());
 92   } else {
 93     AtomicAccess::release_store(&_mark, m);
 94   }
 95 }
 96 
 97 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
 98   return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark);
 99 }
100 
101 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
102   return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order);
103 }
104 
105 markWord oopDesc::prototype_mark() const {
106   if (UseCompactObjectHeaders) {
107     return klass()->prototype_header();
108   } else {
109     return markWord::prototype();
110   }
111 }
112 
113 void oopDesc::init_mark() {
114   if (UseCompactObjectHeaders) {
115     markWord m = prototype_mark().copy_hashctrl_from(mark());
116     assert(m.is_neutral(), "must be neutral");
117     set_mark(m);
118   } else {
119     set_mark(prototype_mark());
120   }
121 }
122 
123 Klass* oopDesc::klass() const {
124   switch (ObjLayout::klass_mode()) {
125     case ObjLayout::Compact:
126       return mark().klass();
127     case ObjLayout::Compressed:
128       return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
129     default:
130       return _metadata._klass;
131   }
132 }
133 
134 Klass* oopDesc::klass_or_null() const {
135   switch (ObjLayout::klass_mode()) {
136     case ObjLayout::Compact:
137       return mark().klass_or_null();
138     case ObjLayout::Compressed:
139       return CompressedKlassPointers::decode(_metadata._compressed_klass);
140     default:

191   assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
192   assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
193   char* raw_mem = ((char*)mem + klass_offset_in_bytes());
194   if (UseCompressedClassPointers) {
195     AtomicAccess::release_store((narrowKlass*)raw_mem,
196                           CompressedKlassPointers::encode_not_null(k));
197   } else {
198     AtomicAccess::release_store((Klass**)raw_mem, k);
199   }
200 }
201 
202 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
203   assert(has_klass_gap(), "precondition");
204   *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
205 }
206 
207 bool oopDesc::is_a(Klass* k) const {
208   return klass()->is_subtype_of(k);
209 }
210 
211 size_t oopDesc::size_given_mark_and_klass(markWord mrk, const Klass* kls) {
212   size_t sz = base_size_given_klass(mrk, kls);
213   if (UseCompactObjectHeaders) {
214     assert(!mrk.has_displaced_mark_helper(), "must not be displaced");
215     if (mrk.is_expanded() && kls->expand_for_hash(cast_to_oop(this), mrk)) {
216       sz = align_object_size(sz + 1);
217     }
218   }
219   return sz;
220 }
221 
222 size_t oopDesc::copy_size(size_t size, markWord mark) const {
223   if (UseCompactObjectHeaders) {
224     assert(!mark.has_displaced_mark_helper(), "must not be displaced");
225     Klass* klass = mark.klass();
226     if (mark.is_hashed_not_expanded() && klass->expand_for_hash(cast_to_oop(this), mark)) {
227       size = align_object_size(size + 1);
228     }
229   }
230   assert(is_object_aligned(size), "Oop size is not properly aligned: %zu", size);
231   return size;
232 }
233 
234 size_t oopDesc::copy_size_cds(size_t size, markWord mark) const {
235   if (UseCompactObjectHeaders) {
236     assert(!mark.has_displaced_mark_helper(), "must not be displaced");
237     Klass* klass = mark.klass();
238     if (mark.is_not_hashed_expanded()) {
239       assert(klass->expand_for_hash(cast_to_oop(this), mark), "must be?");
240     }
241     if (mark.is_hashed_not_expanded() && klass->expand_for_hash(cast_to_oop(this), mark)) {
242       size = align_object_size(size + 1);
243     }
244     if (mark.is_not_hashed_expanded() && klass->expand_for_hash(cast_to_oop(this), mark)) {
245       size = align_object_size(size - ObjectAlignmentInBytes / HeapWordSize);
246     }
247   }
248   assert(is_object_aligned(size), "Oop size is not properly aligned: %zu", size);
249   return size;
250 }
251 
252 size_t oopDesc::size()  {
253   return size_given_mark_and_klass(mark(), klass());
254 }
255 
256 size_t oopDesc::base_size_given_klass(markWord mrk, const Klass* klass)  {
257   int lh = klass->layout_helper();
258   size_t s;
259 
260   // lh is now a value computed at class initialization that may hint
261   // at the size.  For instances, this is positive and equal to the
262   // size.  For arrays, this is negative and provides log2 of the
263   // array element size.  For other oops, it is zero and thus requires
264   // a virtual call.
265   //
266   // We go to all this trouble because the size computation is at the
267   // heart of phase 2 of mark-compaction, and called for every object,
268   // alive or dead.  So the speed here is equal in importance to the
269   // speed of allocation.
270 
271   if (lh > Klass::_lh_neutral_value) {
272     if (!Klass::layout_helper_needs_slow_path(lh)) {
273       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
274     } else {
275       s = klass->oop_size(this, mrk);
276     }
277   } else if (lh <= Klass::_lh_neutral_value) {
278     // The most common case is instances; fall through if so.
279     if (lh < Klass::_lh_neutral_value) {
280       // Second most common case is arrays.  We have to fetch the
281       // length of the array, shift (multiply) it appropriately,
282       // up to wordSize, add the header, and align to object size.
283       size_t size_in_bytes;
284       size_t array_length;
285 #ifdef _LP64
286       if (UseCompactObjectHeaders) {
287         array_length = (size_t) mrk.array_length();
288       } else
289 #endif
290         array_length = (size_t)((arrayOop)this)->length();
291 
292       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
293       size_in_bytes += Klass::layout_helper_header_size(lh);
294 
295       // This code could be simplified, but by keeping array_header_in_bytes
296       // in units of bytes and doing it this way we can round up just once,
297       // skipping the intermediate round to HeapWordSize.
298       s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
299       if (s != klass->oop_size(this, mrk)) {
300         tty->print_cr("length: %zu", array_length);
301         tty->print_cr("log element size: %d", Klass::layout_helper_log2_element_size(lh));
302         tty->print_cr("is_objArray: %s", BOOL_TO_STR(klass->is_objArray_klass()));
303       }
304       assert(s == klass->oop_size(this, mrk), "wrong array object size, s: %zu, oop_size: %zu", s, klass->oop_size(this, mrk));
305     } else {
306       // Must be zero, so bite the bullet and take the virtual call.
307       s = klass->oop_size(this, mrk);
308     }
309   }
310 
311   assert(s > 0, "Oop size must be greater than zero, not %zu", s);
312   assert(is_object_aligned(s), "Oop size is not properly aligned: %zu", s);
313   return s;
314 }
315 
316 bool oopDesc::is_instance()    const { return klass()->is_instance_klass();             }
317 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass();   }
318 bool oopDesc::is_stackChunk()  const { return klass()->is_stack_chunk_instance_klass(); }
319 bool oopDesc::is_array()       const { return klass()->is_array_klass();                }
320 bool oopDesc::is_objArray()    const { return klass()->is_objArray_klass();             }
321 bool oopDesc::is_typeArray()   const { return klass()->is_typeArray_klass();            }
322 
323 template<typename T>
324 T*       oopDesc::field_addr(int offset)     const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
325 
326 template <typename T>
327 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }

370 }
371 
372 bool oopDesc::is_gc_marked() const {
373   return mark().is_marked();
374 }
375 
376 // Used by scavengers
377 bool oopDesc::is_forwarded() const {
378   return mark().is_forwarded();
379 }
380 
381 bool oopDesc::is_self_forwarded() const {
382   return mark().is_self_forwarded();
383 }
384 
385 // Used by scavengers
386 void oopDesc::forward_to(oop p) {
387   assert(cast_from_oop<oopDesc*>(p) != this,
388          "must not be used for self-forwarding, use forward_to_self() instead");
389   markWord m = markWord::encode_pointer_as_mark(p);
390   if (UseCompactObjectHeaders && p->mark().is_expanded() && !mark().is_expanded()) {
391     m = m.set_forward_expanded();
392   }
393   assert(m.decode_pointer() == p, "encoding must be reversible");
394   set_mark_full(m);
395 }
396 
397 void oopDesc::forward_to_self() {
398   set_mark(mark().set_self_forwarded());
399 }
400 
401 void oopDesc::reset_forwarded() {
402   markWord m = mark();
403   if (m.is_self_forwarded()) {
404     unset_self_forwarded();
405   } else if (m.is_forwarded()) {
406     // Restore Klass* and hash-bits in the header,
407     // for correct iteration.
408     markWord fwd_mark = forwardee()->mark();
409     if (m.is_forward_expanded()) {
410       // Un-expand original object.
411       fwd_mark = fwd_mark.set_hashed_not_expanded();
412     }
413     set_mark_full(fwd_mark);
414   }
415 }
416 
417 oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) {
418   markWord old_mark = cas_set_mark(new_mark, compare, order);
419   if (old_mark == compare) {
420     return nullptr;
421   } else {
422     assert(old_mark.is_forwarded(), "must be forwarded here");
423     return forwardee(old_mark);
424   }
425 }
426 
427 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
428   assert(cast_from_oop<oopDesc*>(p) != this,
429          "must not be used for self-forwarding, use forward_to_self_atomic() instead");
430   markWord m = markWord::encode_pointer_as_mark(p);
431   assert(forwardee(m) == p, "encoding must be reversible");
432   return cas_set_forwardee(m, compare, order);
433 }
434 
435 oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) {
436   markWord new_mark = old_mark.set_self_forwarded();

475   if (m.has_displaced_mark_helper()) {
476     m.set_displaced_mark_helper(m.displaced_mark_helper().incr_age());
477   } else {
478     set_mark(m.incr_age());
479   }
480 }
481 
482 template <typename OopClosureType>
483 void oopDesc::oop_iterate(OopClosureType* cl) {
484   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
485 }
486 
487 template <typename OopClosureType>
488 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
489   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
490 }
491 
492 template <typename OopClosureType>
493 size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
494   Klass* k = klass();
495   size_t size = size_given_mark_and_klass(mark(), k);
496   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
497   return size;
498 }
499 
500 template <typename OopClosureType>
501 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
502   Klass* k = klass();
503   size_t size = size_given_mark_and_klass(mark(), k);
504   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
505   return size;
506 }
507 
508 template <typename OopClosureType>
509 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
510   oop_iterate_backwards(cl, klass());
511 }
512 
513 template <typename OopClosureType>
514 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
515   // In this assert, we cannot safely access the Klass* with compact headers.
516   assert(k == klass(), "wrong klass");
517   OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
518 }
519 
520 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
521   return obj == nullptr || obj->klass()->is_subtype_of(klass);
522 }
523 
524 intptr_t oopDesc::identity_hash() {
525   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
526   // Note: The mark must be read into local variable to avoid concurrent updates.
527   if (UseCompactObjectHeaders) {
528     markWord mrk = mark();
529     if (mrk.is_hashed_expanded()) {
530       Klass* klass = mrk.klass();
531       return int_field(klass->hash_offset_in_bytes(cast_to_oop(this), mrk));
532     }
533     // Fall-through to slow-case.
534   } else {
535     markWord mrk = mark();
536     if (mrk.is_unlocked() && !mrk.has_no_hash()) {
537       return mrk.hash();
538     } else if (mrk.is_marked()) {
539       return mrk.hash();
540     }
541     // Fall-through to slow-case.
542   }
543   return slow_identity_hash();
544 }
545 
546 // This checks fast simple case of whether the oop has_no_hash,
547 // to optimize JVMTI table lookup.
548 bool oopDesc::fast_no_hash_check() {
549   markWord mrk = mark_acquire();
550   assert(!mrk.is_marked(), "should never be marked");
551   return (UseCompactObjectHeaders || mrk.is_unlocked()) && mrk.has_no_hash();
552 }
553 
554 bool oopDesc::has_displaced_mark() const {
555   return mark().has_displaced_mark_helper();
556 }
557 
558 markWord oopDesc::displaced_mark() const {
559   return mark().displaced_mark_helper();
560 }
561 
562 void oopDesc::set_displaced_mark(markWord m) {
563   mark().set_displaced_mark_helper(m);
564 }
565 
566 bool oopDesc::mark_must_be_preserved() const {
567   return mark_must_be_preserved(mark());
568 }
569 
570 bool oopDesc::mark_must_be_preserved(markWord m) const {
571   return m.must_be_preserved();
572 }
573 
574 inline void oopDesc::initialize_hash_if_necessary(oop obj) {
575   if (!UseCompactObjectHeaders) {
576     return;
577   }
578   markWord m = mark();
579   assert(!m.has_displaced_mark_helper(), "must not be displaced header");
580   if (m.is_hashed_not_expanded()) {
581     set_mark(initialize_hash_if_necessary(obj, m.klass(), m));
582   }
583 }
584 
585 
586 #endif // SHARE_OOPS_OOP_INLINE_HPP
< prev index next >