17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
26 #define SHARE_OOPS_OOP_INLINE_HPP
27
28 #include "oops/oop.hpp"
29
30 #include "memory/universe.hpp"
31 #include "memory/iterator.inline.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/compressedKlass.inline.hpp"
36 #include "oops/instanceKlass.hpp"
37 #include "oops/markWord.hpp"
38 #include "oops/oopsHierarchy.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/globals.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/macros.hpp"
44 #include "utilities/globalDefinitions.hpp"
45
46 // Implementation of all inlined member functions defined in oop.hpp
47 // We need a separate file to avoid circular references
48
49 markWord oopDesc::mark() const {
50 return Atomic::load(&_mark);
51 }
52
53 markWord oopDesc::mark_acquire() const {
54 return Atomic::load_acquire(&_mark);
55 }
56
57 markWord* oopDesc::mark_addr() const {
58 return (markWord*) &_mark;
59 }
60
61 void oopDesc::set_mark(markWord m) {
62 Atomic::store(&_mark, m);
63 }
64
65 void oopDesc::set_mark(HeapWord* mem, markWord m) {
66 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
67 }
68
69 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
70 Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
71 }
72
73 void oopDesc::release_set_mark(markWord m) {
74 Atomic::release_store(&_mark, m);
75 }
76
77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
78 return Atomic::cmpxchg(&_mark, old_mark, new_mark);
79 }
80
81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
82 return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
83 }
84
85 void oopDesc::init_mark() {
86 set_mark(markWord::prototype());
87 }
88
89 Klass* oopDesc::klass() const {
90 if (UseCompressedClassPointers) {
91 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
92 } else {
93 return _metadata._klass;
94 }
95 }
96
97 Klass* oopDesc::klass_or_null() const {
98 if (UseCompressedClassPointers) {
99 return CompressedKlassPointers::decode(_metadata._compressed_klass);
100 } else {
101 return _metadata._klass;
102 }
103 }
104
105 Klass* oopDesc::klass_or_null_acquire() const {
106 if (UseCompressedClassPointers) {
107 narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass);
108 return CompressedKlassPointers::decode(nklass);
109 } else {
110 return Atomic::load_acquire(&_metadata._klass);
111 }
112 }
113
114 Klass* oopDesc::klass_without_asserts() const {
115 if (UseCompressedClassPointers) {
116 return CompressedKlassPointers::decode_without_asserts(_metadata._compressed_klass);
117 } else {
118 return _metadata._klass;
119 }
120 }
121
122 void oopDesc::set_klass(Klass* k) {
123 assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
124 if (UseCompressedClassPointers) {
125 _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
126 } else {
127 _metadata._klass = k;
128 }
129 }
130
131 void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
132 assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
133 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
134 if (UseCompressedClassPointers) {
135 Atomic::release_store((narrowKlass*)raw_mem,
136 CompressedKlassPointers::encode_not_null(k));
137 } else {
138 Atomic::release_store((Klass**)raw_mem, k);
139 }
140 }
141
142 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
143 if (UseCompressedClassPointers) {
144 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
145 }
146 }
147
148 bool oopDesc::is_a(Klass* k) const {
149 return klass()->is_subtype_of(k);
150 }
151
152 size_t oopDesc::size() {
153 return size_given_klass(klass());
154 }
155
156 size_t oopDesc::size_given_klass(Klass* klass) {
157 int lh = klass->layout_helper();
158 size_t s;
159
160 // lh is now a value computed at class initialization that may hint
161 // at the size. For instances, this is positive and equal to the
162 // size. For arrays, this is negative and provides log2 of the
173 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
174 } else {
175 s = klass->oop_size(this);
176 }
177 } else if (lh <= Klass::_lh_neutral_value) {
178 // The most common case is instances; fall through if so.
179 if (lh < Klass::_lh_neutral_value) {
180 // Second most common case is arrays. We have to fetch the
181 // length of the array, shift (multiply) it appropriately,
182 // up to wordSize, add the header, and align to object size.
183 size_t size_in_bytes;
184 size_t array_length = (size_t) ((arrayOop)this)->length();
185 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
186 size_in_bytes += Klass::layout_helper_header_size(lh);
187
188 // This code could be simplified, but by keeping array_header_in_bytes
189 // in units of bytes and doing it this way we can round up just once,
190 // skipping the intermediate round to HeapWordSize.
191 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
192
193 assert(s == klass->oop_size(this) || size_might_change(), "wrong array object size");
194 } else {
195 // Must be zero, so bite the bullet and take the virtual call.
196 s = klass->oop_size(this);
197 }
198 }
199
200 assert(s > 0, "Oop size must be greater than zero, not " SIZE_FORMAT, s);
201 assert(is_object_aligned(s), "Oop size is not properly aligned: " SIZE_FORMAT, s);
202 return s;
203 }
204
205 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
206 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); }
207 bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); }
208 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
209 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
210 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
211
212 template<typename T>
213 T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
214
215 template <typename T>
216 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
217
218 template <DecoratorSet decorators>
219 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
220 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
221
222 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
223 template <DecoratorSet decorators>
224 inline void oopDesc::obj_field_put_access(int offset, oop value) { HeapAccess<decorators>::oop_store_at(as_oop(), offset, value); }
250 inline jdouble oopDesc::double_field(int offset) const { return *field_addr<jdouble>(offset); }
251 inline void oopDesc::double_field_put(int offset, jdouble value) { *field_addr<jdouble>(offset) = value; }
252
253 bool oopDesc::is_locked() const {
254 return mark().is_locked();
255 }
256
257 bool oopDesc::is_unlocked() const {
258 return mark().is_unlocked();
259 }
260
261 bool oopDesc::is_gc_marked() const {
262 return mark().is_marked();
263 }
264
265 // Used by scavengers
266 bool oopDesc::is_forwarded() const {
267 return mark().is_forwarded();
268 }
269
270 // Used by scavengers
271 void oopDesc::forward_to(oop p) {
272 markWord m = markWord::encode_pointer_as_mark(p);
273 assert(m.decode_pointer() == p, "encoding must be reversible");
274 set_mark(m);
275 }
276
277 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
278 markWord m = markWord::encode_pointer_as_mark(p);
279 assert(m.decode_pointer() == p, "encoding must be reversible");
280 markWord old_mark = cas_set_mark(m, compare, order);
281 if (old_mark == compare) {
282 return nullptr;
283 } else {
284 return cast_to_oop(old_mark.decode_pointer());
285 }
286 }
287
288 // Note that the forwardee is not the same thing as the displaced_mark.
289 // The forwardee is used when copying during scavenge and mark-sweep.
290 // It does need to clear the low two locking- and GC-related bits.
291 oop oopDesc::forwardee() const {
292 return mark().forwardee();
293 }
294
295 // The following method needs to be MT safe.
296 uint oopDesc::age() const {
297 markWord m = mark();
298 assert(!m.is_marked(), "Attempt to read age from forwarded mark");
299 if (m.has_displaced_mark_helper()) {
300 return m.displaced_mark_helper().age();
301 } else {
302 return m.age();
303 }
304 }
305
306 void oopDesc::incr_age() {
307 markWord m = mark();
308 assert(!m.is_marked(), "Attempt to increment age of forwarded mark");
309 if (m.has_displaced_mark_helper()) {
310 m.set_displaced_mark_helper(m.displaced_mark_helper().incr_age());
311 } else {
312 set_mark(m.incr_age());
329 size_t size = size_given_klass(k);
330 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
331 return size;
332 }
333
334 template <typename OopClosureType>
335 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
336 Klass* k = klass();
337 size_t size = size_given_klass(k);
338 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
339 return size;
340 }
341
342 template <typename OopClosureType>
343 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
344 oop_iterate_backwards(cl, klass());
345 }
346
347 template <typename OopClosureType>
348 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
349 assert(k == klass(), "wrong klass");
350 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
351 }
352
353 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
354 return obj == nullptr || obj->klass()->is_subtype_of(klass);
355 }
356
357 intptr_t oopDesc::identity_hash() {
358 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
359 // Note: The mark must be read into local variable to avoid concurrent updates.
360 markWord mrk = mark();
361 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
362 return mrk.hash();
363 } else if (mrk.is_marked()) {
364 return mrk.hash();
365 } else {
366 return slow_identity_hash();
367 }
368 }
369
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
26 #define SHARE_OOPS_OOP_INLINE_HPP
27
28 #include "oops/oop.hpp"
29
30 #include "memory/universe.hpp"
31 #include "memory/iterator.inline.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/compressedKlass.inline.hpp"
36 #include "oops/instanceKlass.hpp"
37 #include "oops/markWord.inline.hpp"
38 #include "oops/oopsHierarchy.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/globals.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/macros.hpp"
44 #include "utilities/globalDefinitions.hpp"
45
46 // Implementation of all inlined member functions defined in oop.hpp
47 // We need a separate file to avoid circular references
48
49 markWord oopDesc::mark() const {
50 return Atomic::load(&_mark);
51 }
52
53 markWord oopDesc::mark_acquire() const {
54 return Atomic::load_acquire(&_mark);
55 }
56
57 markWord* oopDesc::mark_addr() const {
58 return (markWord*) &_mark;
59 }
60
61 void oopDesc::set_mark(markWord m) {
62 Atomic::store(&_mark, m);
63 }
64
65 void oopDesc::set_mark(HeapWord* mem, markWord m) {
66 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
67 }
68
69 void oopDesc::release_set_mark(markWord m) {
70 Atomic::release_store(&_mark, m);
71 }
72
73 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
74 Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
75 }
76
77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
78 return Atomic::cmpxchg(&_mark, old_mark, new_mark);
79 }
80
81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
82 return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
83 }
84
85 markWord oopDesc::prototype_mark() const {
86 if (UseCompactObjectHeaders) {
87 return klass()->prototype_header();
88 } else {
89 return markWord::prototype();
90 }
91 }
92
93 void oopDesc::init_mark() {
94 if (UseCompactObjectHeaders) {
95 set_mark(prototype_mark());
96 } else {
97 set_mark(markWord::prototype());
98 }
99 }
100
101 Klass* oopDesc::klass() const {
102 if (UseCompactObjectHeaders) {
103 return mark().klass();
104 } else if (UseCompressedClassPointers) {
105 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
106 } else {
107 return _metadata._klass;
108 }
109 }
110
111 Klass* oopDesc::klass_or_null() const {
112 if (UseCompactObjectHeaders) {
113 return mark().klass_or_null();
114 } else if (UseCompressedClassPointers) {
115 return CompressedKlassPointers::decode(_metadata._compressed_klass);
116 } else {
117 return _metadata._klass;
118 }
119 }
120
121 Klass* oopDesc::klass_or_null_acquire() const {
122 if (UseCompactObjectHeaders) {
123 return mark_acquire().klass();
124 } else if (UseCompressedClassPointers) {
125 narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass);
126 return CompressedKlassPointers::decode(nklass);
127 } else {
128 return Atomic::load_acquire(&_metadata._klass);
129 }
130 }
131
132 Klass* oopDesc::klass_without_asserts() const {
133 if (UseCompactObjectHeaders) {
134 return mark().klass_without_asserts();
135 } else if (UseCompressedClassPointers) {
136 return CompressedKlassPointers::decode_without_asserts(_metadata._compressed_klass);
137 } else {
138 return _metadata._klass;
139 }
140 }
141
142 void oopDesc::set_klass(Klass* k) {
143 assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
144 assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
145 if (UseCompressedClassPointers) {
146 _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
147 } else {
148 _metadata._klass = k;
149 }
150 }
151
152 void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
153 assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
154 assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
155 char* raw_mem = ((char*)mem + klass_offset_in_bytes());
156 if (UseCompressedClassPointers) {
157 Atomic::release_store((narrowKlass*)raw_mem,
158 CompressedKlassPointers::encode_not_null(k));
159 } else {
160 Atomic::release_store((Klass**)raw_mem, k);
161 }
162 }
163
164 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
165 assert(!UseCompactObjectHeaders, "don't set Klass* gap with compact headers");
166 if (UseCompressedClassPointers) {
167 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
168 }
169 }
170
171 bool oopDesc::is_a(Klass* k) const {
172 return klass()->is_subtype_of(k);
173 }
174
175 size_t oopDesc::size() {
176 return size_given_klass(klass());
177 }
178
179 size_t oopDesc::size_given_klass(Klass* klass) {
180 int lh = klass->layout_helper();
181 size_t s;
182
183 // lh is now a value computed at class initialization that may hint
184 // at the size. For instances, this is positive and equal to the
185 // size. For arrays, this is negative and provides log2 of the
196 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
197 } else {
198 s = klass->oop_size(this);
199 }
200 } else if (lh <= Klass::_lh_neutral_value) {
201 // The most common case is instances; fall through if so.
202 if (lh < Klass::_lh_neutral_value) {
203 // Second most common case is arrays. We have to fetch the
204 // length of the array, shift (multiply) it appropriately,
205 // up to wordSize, add the header, and align to object size.
206 size_t size_in_bytes;
207 size_t array_length = (size_t) ((arrayOop)this)->length();
208 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
209 size_in_bytes += Klass::layout_helper_header_size(lh);
210
211 // This code could be simplified, but by keeping array_header_in_bytes
212 // in units of bytes and doing it this way we can round up just once,
213 // skipping the intermediate round to HeapWordSize.
214 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
215
216 assert(s == klass->oop_size(this) || size_might_change(klass), "wrong array object size");
217 } else {
218 // Must be zero, so bite the bullet and take the virtual call.
219 s = klass->oop_size(this);
220 }
221 }
222
223 assert(s > 0, "Oop size must be greater than zero, not " SIZE_FORMAT, s);
224 assert(is_object_aligned(s), "Oop size is not properly aligned: " SIZE_FORMAT, s);
225 return s;
226 }
227
228 #ifdef _LP64
229 Klass* oopDesc::forward_safe_klass_impl(markWord m) const {
230 assert(UseCompactObjectHeaders, "Only get here with compact headers");
231 if (m.is_marked()) {
232 oop fwd = forwardee(m);
233 markWord m2 = fwd->mark();
234 assert(!m2.is_marked() || m2.self_forwarded(), "no double forwarding: this: " PTR_FORMAT " (" INTPTR_FORMAT "), fwd: " PTR_FORMAT " (" INTPTR_FORMAT ")", p2i(this), m.value(), p2i(fwd), m2.value());
235 m = m2;
236 }
237 return m.klass();
238 }
239 #endif
240
241 Klass* oopDesc::forward_safe_klass(markWord m) const {
242 #ifdef _LP64
243 if (UseCompactObjectHeaders) {
244 return forward_safe_klass_impl(m);
245 } else
246 #endif
247 {
248 return klass();
249 }
250 }
251
252 Klass* oopDesc::forward_safe_klass() const {
253 #ifdef _LP64
254 if (UseCompactObjectHeaders) {
255 return forward_safe_klass_impl(mark());
256 } else
257 #endif
258 {
259 return klass();
260 }
261 }
262
263 size_t oopDesc::forward_safe_size() {
264 return size_given_klass(forward_safe_klass());
265 }
266
267 void oopDesc::forward_safe_init_mark() {
268 if (UseCompactObjectHeaders) {
269 set_mark(forward_safe_klass()->prototype_header());
270 } else {
271 set_mark(markWord::prototype());
272 }
273 }
274
275 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
276 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); }
277 bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); }
278 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
279 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
280 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
281
282 template<typename T>
283 T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
284
285 template <typename T>
286 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
287
288 template <DecoratorSet decorators>
289 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
290 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
291
292 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
293 template <DecoratorSet decorators>
294 inline void oopDesc::obj_field_put_access(int offset, oop value) { HeapAccess<decorators>::oop_store_at(as_oop(), offset, value); }
320 inline jdouble oopDesc::double_field(int offset) const { return *field_addr<jdouble>(offset); }
321 inline void oopDesc::double_field_put(int offset, jdouble value) { *field_addr<jdouble>(offset) = value; }
322
323 bool oopDesc::is_locked() const {
324 return mark().is_locked();
325 }
326
327 bool oopDesc::is_unlocked() const {
328 return mark().is_unlocked();
329 }
330
331 bool oopDesc::is_gc_marked() const {
332 return mark().is_marked();
333 }
334
335 // Used by scavengers
336 bool oopDesc::is_forwarded() const {
337 return mark().is_forwarded();
338 }
339
340 bool oopDesc::is_self_forwarded() const {
341 return mark().self_forwarded();
342 }
343
344 // Used by scavengers
345 void oopDesc::forward_to(oop p) {
346 markWord m = markWord::encode_pointer_as_mark(p);
347 assert(m.decode_pointer() == p, "encoding must be reversible");
348 set_mark(m);
349 }
350
351 void oopDesc::forward_to_self() {
352 set_mark(mark().set_self_forwarded());
353 }
354
355 oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) {
356 markWord old_mark = cas_set_mark(new_mark, compare, order);
357 if (old_mark == compare) {
358 return nullptr;
359 } else {
360 assert(old_mark.is_forwarded(), "must be forwarded here");
361 return forwardee(old_mark);
362 }
363 }
364
365 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
366 markWord m = markWord::encode_pointer_as_mark(p);
367 assert(forwardee(m) == p, "encoding must be reversible");
368 return cas_set_forwardee(m, compare, order);
369 }
370
371 oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) {
372 markWord new_mark = old_mark.set_self_forwarded();
373 assert(forwardee(new_mark) == cast_to_oop(this), "encoding must be reversible");
374 return cas_set_forwardee(new_mark, old_mark, order);
375 }
376
377 oop oopDesc::forwardee(markWord mark) const {
378 assert(mark.is_forwarded(), "only decode when actually forwarded");
379 if (mark.self_forwarded()) {
380 return cast_to_oop(this);
381 } else {
382 return mark.forwardee();
383 }
384 }
385
386 // Note that the forwardee is not the same thing as the displaced_mark.
387 // The forwardee is used when copying during scavenge and mark-sweep.
388 // It does need to clear the low two locking- and GC-related bits.
389 oop oopDesc::forwardee() const {
390 return forwardee(mark());
391 }
392
393 void oopDesc::unset_self_forwarded() {
394 set_mark(mark().unset_self_forwarded());
395 }
396
397 // The following method needs to be MT safe.
398 uint oopDesc::age() const {
399 markWord m = mark();
400 assert(!m.is_marked(), "Attempt to read age from forwarded mark");
401 if (m.has_displaced_mark_helper()) {
402 return m.displaced_mark_helper().age();
403 } else {
404 return m.age();
405 }
406 }
407
408 void oopDesc::incr_age() {
409 markWord m = mark();
410 assert(!m.is_marked(), "Attempt to increment age of forwarded mark");
411 if (m.has_displaced_mark_helper()) {
412 m.set_displaced_mark_helper(m.displaced_mark_helper().incr_age());
413 } else {
414 set_mark(m.incr_age());
431 size_t size = size_given_klass(k);
432 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
433 return size;
434 }
435
436 template <typename OopClosureType>
437 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
438 Klass* k = klass();
439 size_t size = size_given_klass(k);
440 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
441 return size;
442 }
443
444 template <typename OopClosureType>
445 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
446 oop_iterate_backwards(cl, klass());
447 }
448
449 template <typename OopClosureType>
450 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
451 // In this assert, we cannot safely access the Klass* with compact headers.
452 assert(UseCompactObjectHeaders || k == klass(), "wrong klass");
453 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
454 }
455
456 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
457 return obj == nullptr || obj->klass()->is_subtype_of(klass);
458 }
459
460 intptr_t oopDesc::identity_hash() {
461 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
462 // Note: The mark must be read into local variable to avoid concurrent updates.
463 markWord mrk = mark();
464 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
465 return mrk.hash();
466 } else if (mrk.is_marked()) {
467 return mrk.hash();
468 } else {
469 return slow_identity_hash();
470 }
471 }
472
|