19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
26 #define SHARE_OOPS_OOP_INLINE_HPP
27
28 #include "oops/oop.hpp"
29
30 #include "memory/universe.hpp"
31 #include "oops/access.inline.hpp"
32 #include "oops/arrayKlass.hpp"
33 #include "oops/arrayOop.hpp"
34 #include "oops/compressedOops.inline.hpp"
35 #include "oops/markWord.inline.hpp"
36 #include "oops/oopsHierarchy.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/globals.hpp"
39 #include "utilities/align.hpp"
40 #include "utilities/debug.hpp"
41 #include "utilities/macros.hpp"
42 #include "utilities/globalDefinitions.hpp"
43
44 // Implementation of all inlined member functions defined in oop.hpp
45 // We need a separate file to avoid circular references
46
47 markWord oopDesc::mark() const {
48 uintptr_t v = HeapAccess<MO_RELAXED>::load_at(as_oop(), mark_offset_in_bytes());
49 return markWord(v);
50 }
51
52 markWord* oopDesc::mark_addr() const {
53 return (markWord*) &_mark;
54 }
55
56 void oopDesc::set_mark(markWord m) {
57 HeapAccess<MO_RELAXED>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
58 }
59
60 void oopDesc::set_mark(HeapWord* mem, markWord m) {
61 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
62 }
63
64 void oopDesc::release_set_mark(markWord m) {
65 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
66 }
67
68 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
69 uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(as_oop(), mark_offset_in_bytes(), old_mark.value(), new_mark.value());
70 return markWord(v);
71 }
72
73 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
74 return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
75 }
76
77 void oopDesc::init_mark() {
78 set_mark(markWord::prototype_for_klass(klass()));
79 }
80
81 Klass* oopDesc::klass() const {
82 if (UseCompressedClassPointers) {
83 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
84 } else {
85 return _metadata._klass;
86 }
87 }
88
89 Klass* oopDesc::klass_or_null() const {
90 if (UseCompressedClassPointers) {
91 return CompressedKlassPointers::decode(_metadata._compressed_klass);
92 } else {
93 return _metadata._klass;
94 }
95 }
96
97 Klass* oopDesc::klass_or_null_acquire() const {
98 if (UseCompressedClassPointers) {
99 narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass);
100 return CompressedKlassPointers::decode(nklass);
101 } else {
102 return Atomic::load_acquire(&_metadata._klass);
103 }
104 }
105
106 void oopDesc::set_klass(Klass* k) {
107 assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
108 if (UseCompressedClassPointers) {
109 _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
110 } else {
111 _metadata._klass = k;
112 }
113 }
114
115 void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
116 assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
117 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
118 if (UseCompressedClassPointers) {
119 Atomic::release_store((narrowKlass*)raw_mem,
120 CompressedKlassPointers::encode_not_null(k));
121 } else {
122 Atomic::release_store((Klass**)raw_mem, k);
123 }
124 }
125
126 int oopDesc::klass_gap() const {
127 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
128 }
129
130 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
131 if (UseCompressedClassPointers) {
132 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
133 }
134 }
135
136 void oopDesc::set_klass_gap(int v) {
137 set_klass_gap((HeapWord*)this, v);
138 }
139
140 bool oopDesc::is_a(Klass* k) const {
141 return klass()->is_subtype_of(k);
142 }
143
144 int oopDesc::size() {
145 return size_given_klass(klass());
146 }
147
148 int oopDesc::size_given_klass(Klass* klass) {
149 int lh = klass->layout_helper();
150 int s;
151
152 // lh is now a value computed at class initialization that may hint
153 // at the size. For instances, this is positive and equal to the
154 // size. For arrays, this is negative and provides log2 of the
155 // array element size. For other oops, it is zero and thus requires
156 // a virtual call.
184
185 // UseParallelGC and UseG1GC can change the length field
186 // of an "old copy" of an object array in the young gen so it indicates
187 // the grey portion of an already copied array. This will cause the first
188 // disjunct below to fail if the two comparands are computed across such
189 // a concurrent change.
190 assert((s == klass->oop_size(this)) ||
191 (Universe::is_gc_active() && is_objArray() && is_forwarded() && (get_UseParallelGC() || get_UseG1GC())),
192 "wrong array object size");
193 } else {
194 // Must be zero, so bite the bullet and take the virtual call.
195 s = klass->oop_size(this);
196 }
197 }
198
199 assert(s > 0, "Oop size must be greater than zero, not %d", s);
200 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
201 return s;
202 }
203
204 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
205 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
206 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
207 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
208
209 void* oopDesc::field_addr(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
210
211 template <class T>
212 T* oopDesc::obj_field_addr(int offset) const { return (T*) field_addr(offset); }
213
214 template <typename T>
215 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
216
217 template <DecoratorSet decorators>
218 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
219 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
220
221 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
222
223 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
255 }
256
257 bool oopDesc::has_bias_pattern() const {
258 return mark().has_bias_pattern();
259 }
260
261 // Used only for markSweep, scavenging
262 bool oopDesc::is_gc_marked() const {
263 return mark().is_marked();
264 }
265
266 // Used by scavengers
267 bool oopDesc::is_forwarded() const {
268 // The extra heap check is needed since the obj might be locked, in which case the
269 // mark would point to a stack location and have the sentinel bit cleared
270 return mark().is_marked();
271 }
272
273 // Used by scavengers
274 void oopDesc::forward_to(oop p) {
275 verify_forwardee(p);
276 markWord m = markWord::encode_pointer_as_mark(p);
277 assert(m.decode_pointer() == p, "encoding must be reversable");
278 set_mark(m);
279 }
280
281 // Used by parallel scavengers
282 bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
283 verify_forwardee(p);
284 markWord m = markWord::encode_pointer_as_mark(p);
285 assert(m.decode_pointer() == p, "encoding must be reversable");
286 return cas_set_mark(m, compare, order) == compare;
287 }
288
289 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
290 verify_forwardee(p);
291 markWord m = markWord::encode_pointer_as_mark(p);
292 assert(m.decode_pointer() == p, "encoding must be reversable");
293 markWord old_mark = cas_set_mark(m, compare, order);
294 if (old_mark == compare) {
295 return NULL;
296 } else {
297 return cast_to_oop(old_mark.decode_pointer());
298 }
299 }
300
301 // Note that the forwardee is not the same thing as the displaced_mark.
302 // The forwardee is used when copying during scavenge and mark-sweep.
303 // It does need to clear the low two locking- and GC-related bits.
304 oop oopDesc::forwardee() const {
305 return cast_to_oop(mark().decode_pointer());
306 }
307
308 // The following method needs to be MT safe.
309 uint oopDesc::age() const {
310 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
311 if (has_displaced_mark()) {
312 return displaced_mark().age();
313 } else {
314 return mark().age();
315 }
316 }
317
318 void oopDesc::incr_age() {
319 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
320 if (has_displaced_mark()) {
321 set_displaced_mark(displaced_mark().incr_age());
322 } else {
323 set_mark(mark().incr_age());
324 }
325 }
340 int size = size_given_klass(k);
341 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
342 return size;
343 }
344
345 template <typename OopClosureType>
346 int oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
347 Klass* k = klass();
348 int size = size_given_klass(k);
349 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
350 return size;
351 }
352
353 template <typename OopClosureType>
354 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
355 oop_iterate_backwards(cl, klass());
356 }
357
358 template <typename OopClosureType>
359 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
360 assert(k == klass(), "wrong klass");
361 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
362 }
363
364 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
365 return obj == NULL || obj->klass()->is_subtype_of(klass);
366 }
367
368 intptr_t oopDesc::identity_hash() {
369 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
370 // Note: The mark must be read into local variable to avoid concurrent updates.
371 markWord mrk = mark();
372 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
373 return mrk.hash();
374 } else if (mrk.is_marked()) {
375 return mrk.hash();
376 } else {
377 return slow_identity_hash();
378 }
379 }
380
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
26 #define SHARE_OOPS_OOP_INLINE_HPP
27
28 #include "oops/oop.hpp"
29
30 #include "memory/universe.hpp"
31 #include "oops/access.inline.hpp"
32 #include "oops/arrayKlass.hpp"
33 #include "oops/arrayOop.hpp"
34 #include "oops/compressedOops.inline.hpp"
35 #include "oops/markWord.inline.hpp"
36 #include "oops/oopsHierarchy.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/globals.hpp"
39 #include "runtime/safepoint.hpp"
40 #include "runtime/objectMonitor.inline.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/macros.hpp"
44 #include "utilities/globalDefinitions.hpp"
45
46 // Implementation of all inlined member functions defined in oop.hpp
47 // We need a separate file to avoid circular references
48
49 markWord oopDesc::mark() const {
50 uintptr_t v = HeapAccess<MO_RELAXED>::load_at(as_oop(), mark_offset_in_bytes());
51 return markWord(v);
52 }
53
54 markWord oopDesc::mark_acquire() const {
55 return Atomic::load_acquire(&_mark);
56 }
57 markWord* oopDesc::mark_addr() const {
58 return (markWord*) &_mark;
59 }
60
61 void oopDesc::set_mark(markWord m) {
62 HeapAccess<MO_RELAXED>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
63 }
64
65 void oopDesc::set_mark(HeapWord* mem, markWord m) {
66 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
67 }
68
69 void oopDesc::release_set_mark(markWord m) {
70 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
71 }
72
73 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
74 Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
75 }
76
77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
78 uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(as_oop(), mark_offset_in_bytes(), old_mark.value(), new_mark.value());
79 return markWord(v);
80 }
81
82 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
83 return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
84 }
85
86 markWord oopDesc::resolve_mark() const {
87 assert(LockingMode != LM_LEGACY, "Not safe with legacy stack-locking");
88 markWord hdr = mark();
89 if (hdr.has_displaced_mark_helper()) {
90 hdr = hdr.displaced_mark_helper();
91 }
92 return hdr;
93 }
94
95 markWord oopDesc::prototype_mark() const {
96 if (UseCompactObjectHeaders) {
97 return klass()->prototype_header();
98 } else {
99 return markWord::prototype();
100 }
101 }
102
103 void oopDesc::init_mark() {
104 set_mark(markWord::prototype_for_klass(klass()));
105 }
106
107 Klass* oopDesc::klass() const {
108 #ifdef _LP64
109 if (UseCompactObjectHeaders) {
110 assert(UseCompressedClassPointers, "only with compressed class pointers");
111 markWord header = resolve_mark();
112 return header.klass();
113 } else if (UseCompressedClassPointers) {
114 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
115 } else
116 #endif
117 return _metadata._klass;
118 }
119
120 Klass* oopDesc::klass_or_null() const {
121 #ifdef _LP64
122 if (UseCompactObjectHeaders) {
123 assert(UseCompressedClassPointers, "only with compressed class pointers");
124 markWord header = resolve_mark();
125 return header.klass_or_null();
126 } else if (UseCompressedClassPointers) {
127 return CompressedKlassPointers::decode(_metadata._compressed_klass);
128 } else
129 #endif
130 return _metadata._klass;
131 }
132
133 Klass* oopDesc::klass_or_null_acquire() const {
134 #ifdef _LP64
135 if (UseCompactObjectHeaders) {
136 assert(UseCompressedClassPointers, "only with compressed class pointers");
137 markWord header = mark_acquire();
138 if (header.has_monitor()) {
139 header = header.monitor()->header();
140 }
141 return header.klass_or_null();
142 } else if (UseCompressedClassPointers) {
143 narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass);
144 return CompressedKlassPointers::decode(nklass);
145 } else
146 #endif
147 return Atomic::load_acquire(&_metadata._klass);
148 }
149
150 void oopDesc::set_klass(Klass* k) {
151 assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
152 assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
153 if (UseCompressedClassPointers) {
154 _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
155 } else {
156 _metadata._klass = k;
157 }
158 }
159
160 void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
161 assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
162 assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
163 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
164 if (UseCompressedClassPointers) {
165 Atomic::release_store((narrowKlass*)raw_mem,
166 CompressedKlassPointers::encode_not_null(k));
167 } else {
168 Atomic::release_store((Klass**)raw_mem, k);
169 }
170 }
171
172 int oopDesc::klass_gap() const {
173 assert(!UseCompactObjectHeaders, "don't get Klass* gap with compact headers");
174 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
175 }
176
177 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
178 assert(!UseCompactObjectHeaders, "don't set Klass* gap with compact headers");
179 if (UseCompressedClassPointers) {
180 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
181 }
182 }
183
184 void oopDesc::set_klass_gap(int v) {
185 assert(!UseCompactObjectHeaders, "don't set Klass* gap with compact headers");
186 set_klass_gap((HeapWord*)this, v);
187 }
188
189 bool oopDesc::is_a(Klass* k) const {
190 return klass()->is_subtype_of(k);
191 }
192
193 int oopDesc::size() {
194 return size_given_klass(klass());
195 }
196
197 int oopDesc::size_given_klass(Klass* klass) {
198 int lh = klass->layout_helper();
199 int s;
200
201 // lh is now a value computed at class initialization that may hint
202 // at the size. For instances, this is positive and equal to the
203 // size. For arrays, this is negative and provides log2 of the
204 // array element size. For other oops, it is zero and thus requires
205 // a virtual call.
233
234 // UseParallelGC and UseG1GC can change the length field
235 // of an "old copy" of an object array in the young gen so it indicates
236 // the grey portion of an already copied array. This will cause the first
237 // disjunct below to fail if the two comparands are computed across such
238 // a concurrent change.
239 assert((s == klass->oop_size(this)) ||
240 (Universe::is_gc_active() && is_objArray() && is_forwarded() && (get_UseParallelGC() || get_UseG1GC())),
241 "wrong array object size");
242 } else {
243 // Must be zero, so bite the bullet and take the virtual call.
244 s = klass->oop_size(this);
245 }
246 }
247
248 assert(s > 0, "Oop size must be greater than zero, not %d", s);
249 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
250 return s;
251 }
252
253 #ifdef _LP64
254 Klass* oopDesc::forward_safe_klass_impl(markWord m) const {
255 assert(UseCompactObjectHeaders, "Only get here with compact headers");
256 if (m.is_marked()) {
257 oop fwd = forwardee(m);
258 markWord m2 = fwd->mark();
259 assert(!m2.is_marked() || m2.self_forwarded(), "no double forwarding: this: " PTR_FORMAT " (" INTPTR_FORMAT "), fwd: " PTR_FORMAT " (" INTPTR_FORMAT ")", p2i(this), m.value(), p2i(fwd), m2.value());
260 m = m2;
261 }
262 return m.actual_mark().klass();
263 }
264 #endif
265
266 Klass* oopDesc::forward_safe_klass(markWord m) const {
267 #ifdef _LP64
268 if (UseCompactObjectHeaders) {
269 return forward_safe_klass_impl(m);
270 } else
271 #endif
272 {
273 return klass();
274 }
275 }
276
277 Klass* oopDesc::forward_safe_klass() const {
278 #ifdef _LP64
279 if (UseCompactObjectHeaders) {
280 return forward_safe_klass_impl(mark());
281 } else
282 #endif
283 {
284 return klass();
285 }
286 }
287
288 size_t oopDesc::forward_safe_size() {
289 return size_given_klass(forward_safe_klass());
290 }
291
292 void oopDesc::forward_safe_init_mark() {
293 if (UseCompactObjectHeaders) {
294 set_mark(forward_safe_klass()->prototype_header());
295 } else {
296 init_mark();
297 }
298 }
299
300 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
301 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
302 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
303 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
304
305 void* oopDesc::field_addr(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
306
307 template <class T>
308 T* oopDesc::obj_field_addr(int offset) const { return (T*) field_addr(offset); }
309
310 template <typename T>
311 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
312
313 template <DecoratorSet decorators>
314 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
315 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
316
317 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
318
319 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
351 }
352
353 bool oopDesc::has_bias_pattern() const {
354 return mark().has_bias_pattern();
355 }
356
357 // Used only for markSweep, scavenging
358 bool oopDesc::is_gc_marked() const {
359 return mark().is_marked();
360 }
361
362 // Used by scavengers
363 bool oopDesc::is_forwarded() const {
364 // The extra heap check is needed since the obj might be locked, in which case the
365 // mark would point to a stack location and have the sentinel bit cleared
366 return mark().is_marked();
367 }
368
369 // Used by scavengers
370 void oopDesc::forward_to(oop p) {
371 assert(p != cast_to_oop(this) || !UseAltGCForwarding, "Must not be called with self-forwarding");
372 verify_forwardee(p);
373 markWord m = markWord::encode_pointer_as_mark(p);
374 assert(forwardee(m) == p, "encoding must be reversable");
375 set_mark(m);
376 }
377
378 void oopDesc::forward_to_self() {
379 #ifdef _LP64
380 if (UseAltGCForwarding) {
381 markWord m = mark();
382 // If mark is displaced, we need to preserve the real header during GC.
383 // It will be restored to the displaced header after GC.
384 assert(SafepointSynchronize::is_at_safepoint(), "we can only safely fetch the displaced header at safepoint");
385 if (m.has_displaced_mark_helper()) {
386 m = m.displaced_mark_helper();
387 }
388 m = m.set_self_forwarded();
389 assert(forwardee(m) == cast_to_oop(this), "encoding must be reversible");
390 set_mark(m);
391 } else
392 #endif
393 {
394 forward_to(oop(this));
395 }
396 }
397
398 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
399 assert(p != cast_to_oop(this) || !UseAltGCForwarding, "Must not be called with self-forwarding");
400 verify_forwardee(p);
401 markWord m = markWord::encode_pointer_as_mark(p);
402 assert(forwardee(m) == p, "encoding must be reversable");
403 markWord old_mark = cas_set_mark(m, compare, order);
404 if (old_mark == compare) {
405 return NULL;
406 } else {
407 return forwardee(old_mark);
408 }
409 }
410
411 oop oopDesc::forward_to_self_atomic(markWord compare, atomic_memory_order order) {
412 #ifdef _LP64
413 if (UseAltGCForwarding) {
414 markWord m = compare;
415 // If mark is displaced, we need to preserve the real header during GC.
416 // It will be restored to the displaced header after GC.
417 assert(SafepointSynchronize::is_at_safepoint(), "we can only safely fetch the displaced header at safepoint");
418 if (m.has_displaced_mark_helper()) {
419 m = m.displaced_mark_helper();
420 }
421 m = m.set_self_forwarded();
422 assert(forwardee(m) == cast_to_oop(this), "encoding must be reversible");
423 markWord old_mark = cas_set_mark(m, compare, order);
424 if (old_mark == compare) {
425 return nullptr;
426 } else {
427 assert(old_mark.is_marked(), "must be marked here");
428 return forwardee(old_mark);
429 }
430 } else
431 #endif
432 {
433 return forward_to_atomic(cast_to_oop(this), compare, order);
434 }
435 }
436
437 oop oopDesc::forwardee(markWord header) const {
438 assert(header.is_marked(), "only decode when actually forwarded");
439 #ifdef _LP64
440 if (header.self_forwarded()) {
441 return cast_to_oop(this);
442 } else
443 #endif
444 {
445 return cast_to_oop(header.decode_pointer());
446 }
447 }
448
449 // Note that the forwardee is not the same thing as the displaced_mark.
450 // The forwardee is used when copying during scavenge and mark-sweep.
451 // It does need to clear the low two locking- and GC-related bits.
452 oop oopDesc::forwardee() const {
453 return forwardee(mark());
454 }
455
456 // The following method needs to be MT safe.
457 uint oopDesc::age() const {
458 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
459 if (has_displaced_mark()) {
460 return displaced_mark().age();
461 } else {
462 return mark().age();
463 }
464 }
465
466 void oopDesc::incr_age() {
467 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
468 if (has_displaced_mark()) {
469 set_displaced_mark(displaced_mark().incr_age());
470 } else {
471 set_mark(mark().incr_age());
472 }
473 }
488 int size = size_given_klass(k);
489 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
490 return size;
491 }
492
493 template <typename OopClosureType>
494 int oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
495 Klass* k = klass();
496 int size = size_given_klass(k);
497 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
498 return size;
499 }
500
501 template <typename OopClosureType>
502 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
503 oop_iterate_backwards(cl, klass());
504 }
505
506 template <typename OopClosureType>
507 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
508 assert(UseCompactObjectHeaders || k == klass(), "wrong klass");
509 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
510 }
511
512 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
513 return obj == NULL || obj->klass()->is_subtype_of(klass);
514 }
515
516 intptr_t oopDesc::identity_hash() {
517 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
518 // Note: The mark must be read into local variable to avoid concurrent updates.
519 markWord mrk = mark();
520 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
521 return mrk.hash();
522 } else if (mrk.is_marked()) {
523 return mrk.hash();
524 } else {
525 return slow_identity_hash();
526 }
527 }
528
|