42 #include "utilities/align.hpp"
43 #include "utilities/debug.hpp"
44 #include "utilities/globalDefinitions.hpp"
45 #include "utilities/macros.hpp"
46
47 // Implementation of all inlined member functions defined in oop.hpp
48 // We need a separate file to avoid circular references
49
50 void* oopDesc::base_addr() { return this; }
51 const void* oopDesc::base_addr() const { return this; }
52
53 markWord oopDesc::mark() const {
54 return AtomicAccess::load(&_mark);
55 }
56
57 markWord oopDesc::mark_acquire() const {
58 return AtomicAccess::load_acquire(&_mark);
59 }
60
61 void oopDesc::set_mark(markWord m) {
62 AtomicAccess::store(&_mark, m);
63 }
64
65 void oopDesc::set_mark(HeapWord* mem, markWord m) {
66 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
67 }
68
69 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
70 AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
71 }
72
73 void oopDesc::release_set_mark(markWord m) {
74 AtomicAccess::release_store(&_mark, m);
75 }
76
77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
78 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark);
79 }
80
81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
82 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order);
83 }
84
85 markWord oopDesc::prototype_mark() const {
86 if (UseCompactObjectHeaders) {
87 return klass()->prototype_header();
88 } else {
89 return markWord::prototype();
90 }
91 }
92
93 void oopDesc::init_mark() {
94 set_mark(prototype_mark());
95 }
96
97 Klass* oopDesc::klass() const {
98 switch (ObjLayout::klass_mode()) {
99 case ObjLayout::Compact:
100 return mark().klass();
101 case ObjLayout::Compressed:
102 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
103 default:
104 return _metadata._klass;
105 }
106 }
107
108 Klass* oopDesc::klass_or_null() const {
109 switch (ObjLayout::klass_mode()) {
110 case ObjLayout::Compact:
111 return mark().klass_or_null();
112 case ObjLayout::Compressed:
113 return CompressedKlassPointers::decode(_metadata._compressed_klass);
114 default:
115 return _metadata._klass;
116 }
165 assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
166 assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
167 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
168 if (UseCompressedClassPointers) {
169 AtomicAccess::release_store((narrowKlass*)raw_mem,
170 CompressedKlassPointers::encode_not_null(k));
171 } else {
172 AtomicAccess::release_store((Klass**)raw_mem, k);
173 }
174 }
175
176 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
177 assert(has_klass_gap(), "precondition");
178 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
179 }
180
181 bool oopDesc::is_a(Klass* k) const {
182 return klass()->is_subtype_of(k);
183 }
184
185 size_t oopDesc::size() {
186 return size_given_klass(klass());
187 }
188
189 size_t oopDesc::size_given_klass(Klass* klass) {
190 int lh = klass->layout_helper();
191 size_t s;
192
193 // lh is now a value computed at class initialization that may hint
194 // at the size. For instances, this is positive and equal to the
195 // size. For arrays, this is negative and provides log2 of the
196 // array element size. For other oops, it is zero and thus requires
197 // a virtual call.
198 //
199 // We go to all this trouble because the size computation is at the
200 // heart of phase 2 of mark-compaction, and called for every object,
201 // alive or dead. So the speed here is equal in importance to the
202 // speed of allocation.
203
204 if (lh > Klass::_lh_neutral_value) {
205 if (!Klass::layout_helper_needs_slow_path(lh)) {
206 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
207 } else {
208 s = klass->oop_size(this);
209 }
210 } else if (lh <= Klass::_lh_neutral_value) {
211 // The most common case is instances; fall through if so.
212 if (lh < Klass::_lh_neutral_value) {
213 // Second most common case is arrays. We have to fetch the
214 // length of the array, shift (multiply) it appropriately,
215 // up to wordSize, add the header, and align to object size.
216 size_t size_in_bytes;
217 size_t array_length = (size_t) ((arrayOop)this)->length();
218 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
219 size_in_bytes += Klass::layout_helper_header_size(lh);
220
221 // This code could be simplified, but by keeping array_header_in_bytes
222 // in units of bytes and doing it this way we can round up just once,
223 // skipping the intermediate round to HeapWordSize.
224 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
225
226 assert(s == klass->oop_size(this), "wrong array object size");
227 } else {
228 // Must be zero, so bite the bullet and take the virtual call.
229 s = klass->oop_size(this);
230 }
231 }
232
233 assert(s > 0, "Oop size must be greater than zero, not %zu", s);
234 assert(is_object_aligned(s), "Oop size is not properly aligned: %zu", s);
235 return s;
236 }
237
238 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
239 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); }
240 bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); }
241 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
242 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
243 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
244
245 template<typename T>
246 T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
247
248 template <typename T>
249 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
292 }
293
294 bool oopDesc::is_gc_marked() const {
295 return mark().is_marked();
296 }
297
298 // Used by scavengers
299 bool oopDesc::is_forwarded() const {
300 return mark().is_forwarded();
301 }
302
303 bool oopDesc::is_self_forwarded() const {
304 return mark().is_self_forwarded();
305 }
306
307 // Used by scavengers
308 void oopDesc::forward_to(oop p) {
309 assert(cast_from_oop<oopDesc*>(p) != this,
310 "must not be used for self-forwarding, use forward_to_self() instead");
311 markWord m = markWord::encode_pointer_as_mark(p);
312 assert(m.decode_pointer() == p, "encoding must be reversible");
313 set_mark(m);
314 }
315
316 void oopDesc::forward_to_self() {
317 set_mark(mark().set_self_forwarded());
318 }
319
320 oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) {
321 markWord old_mark = cas_set_mark(new_mark, compare, order);
322 if (old_mark == compare) {
323 return nullptr;
324 } else {
325 assert(old_mark.is_forwarded(), "must be forwarded here");
326 return forwardee(old_mark);
327 }
328 }
329
330 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
331 assert(cast_from_oop<oopDesc*>(p) != this,
332 "must not be used for self-forwarding, use forward_to_self_atomic() instead");
333 markWord m = markWord::encode_pointer_as_mark(p);
334 assert(forwardee(m) == p, "encoding must be reversible");
335 return cas_set_forwardee(m, compare, order);
336 }
337
338 oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) {
339 markWord new_mark = old_mark.set_self_forwarded();
340 assert(forwardee(new_mark) == cast_to_oop(this), "encoding must be reversible");
341 return cas_set_forwardee(new_mark, old_mark, order);
342 }
343
344 oop oopDesc::forwardee(markWord mark) const {
345 assert(mark.is_forwarded(), "only decode when actually forwarded");
346 if (mark.is_self_forwarded()) {
347 return cast_to_oop(this);
348 } else {
349 return mark.forwardee();
350 }
351 }
352
353 // Note that the forwardee is not the same thing as the displaced_mark.
378 if (m.has_displaced_mark_helper()) {
379 m.set_displaced_mark_helper(m.displaced_mark_helper().incr_age());
380 } else {
381 set_mark(m.incr_age());
382 }
383 }
384
385 template <typename OopClosureType>
386 void oopDesc::oop_iterate(OopClosureType* cl) {
387 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
388 }
389
390 template <typename OopClosureType>
391 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
392 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
393 }
394
395 template <typename OopClosureType>
396 size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
397 Klass* k = klass();
398 size_t size = size_given_klass(k);
399 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
400 return size;
401 }
402
403 template <typename OopClosureType>
404 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
405 Klass* k = klass();
406 size_t size = size_given_klass(k);
407 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
408 return size;
409 }
410
411 template <typename OopClosureType>
412 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
413 oop_iterate_backwards(cl, klass());
414 }
415
416 template <typename OopClosureType>
417 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
418 // In this assert, we cannot safely access the Klass* with compact headers.
419 assert(k == klass(), "wrong klass");
420 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
421 }
422
423 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
424 return obj == nullptr || obj->klass()->is_subtype_of(klass);
425 }
426
427 intptr_t oopDesc::identity_hash() {
428 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
429 // Note: The mark must be read into local variable to avoid concurrent updates.
430 markWord mrk = mark();
431 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
432 return mrk.hash();
433 } else if (mrk.is_marked()) {
434 return mrk.hash();
435 } else {
436 return slow_identity_hash();
437 }
438 }
439
440 // This checks fast simple case of whether the oop has_no_hash,
441 // to optimize JVMTI table lookup.
442 bool oopDesc::fast_no_hash_check() {
443 markWord mrk = mark_acquire();
444 assert(!mrk.is_marked(), "should never be marked");
445 return mrk.is_unlocked() && mrk.has_no_hash();
446 }
447
448 bool oopDesc::has_displaced_mark() const {
449 return mark().has_displaced_mark_helper();
450 }
451
452 markWord oopDesc::displaced_mark() const {
453 return mark().displaced_mark_helper();
454 }
455
456 void oopDesc::set_displaced_mark(markWord m) {
457 mark().set_displaced_mark_helper(m);
458 }
459
460 bool oopDesc::mark_must_be_preserved() const {
461 return mark_must_be_preserved(mark());
462 }
463
464 bool oopDesc::mark_must_be_preserved(markWord m) const {
465 return m.must_be_preserved();
466 }
467
468 #endif // SHARE_OOPS_OOP_INLINE_HPP
|
42 #include "utilities/align.hpp"
43 #include "utilities/debug.hpp"
44 #include "utilities/globalDefinitions.hpp"
45 #include "utilities/macros.hpp"
46
47 // Implementation of all inlined member functions defined in oop.hpp
48 // We need a separate file to avoid circular references
49
50 void* oopDesc::base_addr() { return this; }
51 const void* oopDesc::base_addr() const { return this; }
52
53 markWord oopDesc::mark() const {
54 return AtomicAccess::load(&_mark);
55 }
56
57 markWord oopDesc::mark_acquire() const {
58 return AtomicAccess::load_acquire(&_mark);
59 }
60
61 void oopDesc::set_mark(markWord m) {
62 if (UseCompactObjectHeaders) {
63 AtomicAccess::store(reinterpret_cast<uint32_t volatile*>(&_mark), m.value32());
64 } else {
65 AtomicAccess::store(&_mark, m);
66 }
67 }
68
69 void oopDesc::set_mark_full(markWord m) {
70 AtomicAccess::store(&_mark, m);
71 }
72
73 void oopDesc::set_mark(HeapWord* mem, markWord m) {
74 if (UseCompactObjectHeaders) {
75 *(uint32_t*)(((char*)mem) + mark_offset_in_bytes()) = m.value32();
76 } else {
77 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
78 }
79 }
80
81 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
82 if (UseCompactObjectHeaders) {
83 AtomicAccess::release_store((uint32_t*)(((char*)mem) + mark_offset_in_bytes()), m.value32());
84 } else {
85 AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
86 }
87 }
88
89 void oopDesc::release_set_mark(markWord m) {
90 if (UseCompactObjectHeaders) {
91 AtomicAccess::release_store(reinterpret_cast<uint32_t volatile*>(&_mark), m.value32());
92 } else {
93 AtomicAccess::release_store(&_mark, m);
94 }
95 }
96
97 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
98 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark);
99 }
100
101 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
102 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order);
103 }
104
105 markWord oopDesc::prototype_mark() const {
106 if (UseCompactObjectHeaders) {
107 return klass()->prototype_header();
108 } else {
109 return markWord::prototype();
110 }
111 }
112
113 void oopDesc::init_mark() {
114 set_mark(prototype_mark());
115 }
116
117 void oopDesc::reinit_mark() {
118 if (UseCompactObjectHeaders) {
119 markWord m = prototype_mark().copy_hashctrl_from(mark());
120 assert(m.is_neutral(), "must be neutral");
121 set_mark(m);
122 } else {
123 set_mark(prototype_mark());
124 }
125 }
126
127 Klass* oopDesc::klass() const {
128 switch (ObjLayout::klass_mode()) {
129 case ObjLayout::Compact:
130 return mark().klass();
131 case ObjLayout::Compressed:
132 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
133 default:
134 return _metadata._klass;
135 }
136 }
137
138 Klass* oopDesc::klass_or_null() const {
139 switch (ObjLayout::klass_mode()) {
140 case ObjLayout::Compact:
141 return mark().klass_or_null();
142 case ObjLayout::Compressed:
143 return CompressedKlassPointers::decode(_metadata._compressed_klass);
144 default:
145 return _metadata._klass;
146 }
195 assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
196 assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
197 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
198 if (UseCompressedClassPointers) {
199 AtomicAccess::release_store((narrowKlass*)raw_mem,
200 CompressedKlassPointers::encode_not_null(k));
201 } else {
202 AtomicAccess::release_store((Klass**)raw_mem, k);
203 }
204 }
205
206 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
207 assert(has_klass_gap(), "precondition");
208 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
209 }
210
211 bool oopDesc::is_a(Klass* k) const {
212 return klass()->is_subtype_of(k);
213 }
214
215 size_t oopDesc::size_given_mark_and_klass(markWord mrk, const Klass* kls) {
216 size_t sz = base_size_given_klass(mrk, kls);
217 if (UseCompactObjectHeaders) {
218 assert(!mrk.has_displaced_mark_helper(), "must not be displaced");
219 if (mrk.is_expanded() && kls->expand_for_hash(cast_to_oop(this), mrk)) {
220 sz = align_object_size(sz + 1);
221 }
222 }
223 return sz;
224 }
225
226 size_t oopDesc::size_forwarded() {
227 assert(is_forwarded(), "must be forwarded");
228 markWord m = mark();
229 oop fwd = forwardee(m);
230 if (!UseCompactObjectHeaders) {
231 return fwd->size();
232 }
233 markWord fm = fwd->mark();
234 Klass* klass = fm.klass();
235 if (m.is_forward_expanded()) {
236 // Forwardee was expanded during copy but the original was not.
237 // Original must have base size.
238 return fwd->base_size_given_klass(fm, klass);
239 }
240 // Original and copy have same size (whether expanded or not).
241 return fwd->size_given_mark_and_klass(fm, klass);
242 }
243
244 size_t oopDesc::copy_size(size_t size, markWord mark) const {
245 if (UseCompactObjectHeaders) {
246 assert(!mark.has_displaced_mark_helper(), "must not be displaced");
247 Klass* klass = mark.klass();
248 if (mark.is_hashed_not_expanded() && klass->expand_for_hash(cast_to_oop(this), mark)) {
249 size = align_object_size(size + 1);
250 }
251 }
252 assert(is_object_aligned(size), "Oop size is not properly aligned: %zu", size);
253 return size;
254 }
255
256 size_t oopDesc::copy_size_cds(size_t size, markWord mark) const {
257 if (UseCompactObjectHeaders) {
258 assert(!mark.has_displaced_mark_helper(), "must not be displaced");
259 Klass* klass = mark.klass();
260 if (mark.is_not_hashed_expanded()) {
261 assert(klass->expand_for_hash(cast_to_oop(this), mark), "must be?");
262 }
263 if (mark.is_hashed_not_expanded() && klass->expand_for_hash(cast_to_oop(this), mark)) {
264 size = align_object_size(size + 1);
265 }
266 if (mark.is_not_hashed_expanded() && klass->expand_for_hash(cast_to_oop(this), mark)) {
267 size = align_object_size(size - ObjectAlignmentInBytes / HeapWordSize);
268 }
269 }
270 assert(is_object_aligned(size), "Oop size is not properly aligned: %zu", size);
271 return size;
272 }
273
274 size_t oopDesc::size() {
275 return size_given_mark_and_klass(mark(), klass());
276 }
277
278 size_t oopDesc::base_size_given_klass(markWord mrk, const Klass* klass) {
279 int lh = klass->layout_helper();
280 size_t s;
281
282 // lh is now a value computed at class initialization that may hint
283 // at the size. For instances, this is positive and equal to the
284 // size. For arrays, this is negative and provides log2 of the
285 // array element size. For other oops, it is zero and thus requires
286 // a virtual call.
287 //
288 // We go to all this trouble because the size computation is at the
289 // heart of phase 2 of mark-compaction, and called for every object,
290 // alive or dead. So the speed here is equal in importance to the
291 // speed of allocation.
292
293 if (lh > Klass::_lh_neutral_value) {
294 if (!Klass::layout_helper_needs_slow_path(lh)) {
295 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
296 } else {
297 s = klass->oop_size(this, mrk);
298 }
299 } else if (lh <= Klass::_lh_neutral_value) {
300 // The most common case is instances; fall through if so.
301 if (lh < Klass::_lh_neutral_value) {
302 // Second most common case is arrays. We have to fetch the
303 // length of the array, shift (multiply) it appropriately,
304 // up to wordSize, add the header, and align to object size.
305 size_t size_in_bytes;
306 size_t array_length;
307 #ifdef _LP64
308 if (UseCompactObjectHeaders) {
309 array_length = (size_t) mrk.array_length();
310 } else
311 #endif
312 array_length = (size_t)((arrayOop)this)->length();
313
314 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
315 size_in_bytes += Klass::layout_helper_header_size(lh);
316
317 // This code could be simplified, but by keeping array_header_in_bytes
318 // in units of bytes and doing it this way we can round up just once,
319 // skipping the intermediate round to HeapWordSize.
320 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
321 if (s != klass->oop_size(this, mrk)) {
322 tty->print_cr("length: %zu", array_length);
323 tty->print_cr("log element size: %d", Klass::layout_helper_log2_element_size(lh));
324 tty->print_cr("is_objArray: %s", BOOL_TO_STR(klass->is_objArray_klass()));
325 }
326 assert(s == klass->oop_size(this, mrk), "wrong array object size, s: %zu, oop_size: %zu", s, klass->oop_size(this, mrk));
327 } else {
328 // Must be zero, so bite the bullet and take the virtual call.
329 s = klass->oop_size(this, mrk);
330 }
331 }
332
333 assert(s > 0, "Oop size must be greater than zero, not %zu", s);
334 assert(is_object_aligned(s), "Oop size is not properly aligned: %zu", s);
335 return s;
336 }
337
338 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
339 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); }
340 bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); }
341 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
342 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
343 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
344
345 template<typename T>
346 T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
347
348 template <typename T>
349 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
392 }
393
394 bool oopDesc::is_gc_marked() const {
395 return mark().is_marked();
396 }
397
398 // Used by scavengers
399 bool oopDesc::is_forwarded() const {
400 return mark().is_forwarded();
401 }
402
403 bool oopDesc::is_self_forwarded() const {
404 return mark().is_self_forwarded();
405 }
406
407 // Used by scavengers
408 void oopDesc::forward_to(oop p) {
409 assert(cast_from_oop<oopDesc*>(p) != this,
410 "must not be used for self-forwarding, use forward_to_self() instead");
411 markWord m = markWord::encode_pointer_as_mark(p);
412 if (UseCompactObjectHeaders && p->mark().is_expanded() && !mark().is_expanded()) {
413 m = m.set_forward_expanded();
414 }
415 assert(m.decode_pointer() == p, "encoding must be reversible");
416 set_mark_full(m);
417 }
418
419 void oopDesc::forward_to_self() {
420 set_mark(mark().set_self_forwarded());
421 }
422
423 void oopDesc::reset_forwarded() {
424 markWord m = mark();
425 if (m.is_self_forwarded()) {
426 unset_self_forwarded();
427 } else if (m.is_forwarded()) {
428 // Restore Klass* and hash-bits in the header,
429 // for correct iteration.
430 markWord fwd_mark = forwardee()->mark();
431 if (m.is_forward_expanded()) {
432 // Un-expand original object.
433 fwd_mark = fwd_mark.set_hashed_not_expanded();
434 }
435 set_mark_full(fwd_mark);
436 }
437 }
438
439 oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) {
440 markWord old_mark = cas_set_mark(new_mark, compare, order);
441 if (old_mark == compare) {
442 return nullptr;
443 } else {
444 assert(old_mark.is_forwarded(), "must be forwarded here");
445 return forwardee(old_mark);
446 }
447 }
448
449 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
450 assert(cast_from_oop<oopDesc*>(p) != this,
451 "must not be used for self-forwarding, use forward_to_self_atomic() instead");
452 markWord m = markWord::encode_pointer_as_mark(p);
453 if (UseCompactObjectHeaders && compare.is_hashed_not_expanded()) {
454 m = m.set_forward_expanded();
455 }
456 assert(forwardee(m) == p, "encoding must be reversible");
457 return cas_set_forwardee(m, compare, order);
458 }
459
460 oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) {
461 markWord new_mark = old_mark.set_self_forwarded();
462 assert(forwardee(new_mark) == cast_to_oop(this), "encoding must be reversible");
463 return cas_set_forwardee(new_mark, old_mark, order);
464 }
465
466 oop oopDesc::forwardee(markWord mark) const {
467 assert(mark.is_forwarded(), "only decode when actually forwarded");
468 if (mark.is_self_forwarded()) {
469 return cast_to_oop(this);
470 } else {
471 return mark.forwardee();
472 }
473 }
474
475 // Note that the forwardee is not the same thing as the displaced_mark.
500 if (m.has_displaced_mark_helper()) {
501 m.set_displaced_mark_helper(m.displaced_mark_helper().incr_age());
502 } else {
503 set_mark(m.incr_age());
504 }
505 }
506
507 template <typename OopClosureType>
508 void oopDesc::oop_iterate(OopClosureType* cl) {
509 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
510 }
511
512 template <typename OopClosureType>
513 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
514 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
515 }
516
517 template <typename OopClosureType>
518 size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
519 Klass* k = klass();
520 size_t size = size_given_mark_and_klass(mark(), k);
521 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
522 return size;
523 }
524
525 template <typename OopClosureType>
526 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
527 Klass* k = klass();
528 size_t size = size_given_mark_and_klass(mark(), k);
529 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
530 return size;
531 }
532
533 template <typename OopClosureType>
534 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
535 oop_iterate_backwards(cl, klass());
536 }
537
538 template <typename OopClosureType>
539 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
540 // In this assert, we cannot safely access the Klass* with compact headers.
541 assert(k == klass(), "wrong klass");
542 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
543 }
544
545 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
546 return obj == nullptr || obj->klass()->is_subtype_of(klass);
547 }
548
549 intptr_t oopDesc::identity_hash() {
550 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
551 // Note: The mark must be read into local variable to avoid concurrent updates.
552 if (UseCompactObjectHeaders) {
553 markWord mrk = mark();
554 if (mrk.is_hashed_expanded()) {
555 Klass* klass = mrk.klass();
556 return int_field(klass->hash_offset_in_bytes(cast_to_oop(this), mrk));
557 }
558 // Fall-through to slow-case.
559 } else {
560 markWord mrk = mark();
561 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
562 return mrk.hash();
563 } else if (mrk.is_marked()) {
564 return mrk.hash();
565 }
566 // Fall-through to slow-case.
567 }
568 return slow_identity_hash();
569 }
570
571 // This checks fast simple case of whether the oop has_no_hash,
572 // to optimize JVMTI table lookup.
573 bool oopDesc::fast_no_hash_check() {
574 markWord mrk = mark_acquire();
575 assert(!mrk.is_marked(), "should never be marked");
576 return (UseCompactObjectHeaders || mrk.is_unlocked()) && mrk.has_no_hash();
577 }
578
579 bool oopDesc::has_displaced_mark() const {
580 return mark().has_displaced_mark_helper();
581 }
582
583 markWord oopDesc::displaced_mark() const {
584 return mark().displaced_mark_helper();
585 }
586
587 void oopDesc::set_displaced_mark(markWord m) {
588 mark().set_displaced_mark_helper(m);
589 }
590
591 bool oopDesc::mark_must_be_preserved() const {
592 return mark_must_be_preserved(mark());
593 }
594
595 bool oopDesc::mark_must_be_preserved(markWord m) const {
596 return m.must_be_preserved();
597 }
598
599 inline void oopDesc::initialize_hash_if_necessary(oop obj) {
600 if (!UseCompactObjectHeaders) {
601 return;
602 }
603 markWord m = mark();
604 assert(!m.has_displaced_mark_helper(), "must not be displaced header");
605 if (m.is_hashed_not_expanded()) {
606 set_mark(initialize_hash_if_necessary(obj, m.klass(), m));
607 }
608 }
609
610
611 #endif // SHARE_OOPS_OOP_INLINE_HPP
|