66 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
67 }
68
69 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
70 AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
71 }
72
73 void oopDesc::release_set_mark(markWord m) {
74 AtomicAccess::release_store(&_mark, m);
75 }
76
77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
78 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark);
79 }
80
81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
82 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order);
83 }
84
85 markWord oopDesc::prototype_mark() const {
86 if (UseCompactObjectHeaders) {
87 return klass()->prototype_header();
88 } else {
89 return markWord::prototype();
90 }
91 }
92
93 void oopDesc::init_mark() {
94 set_mark(prototype_mark());
95 }
96
97 Klass* oopDesc::klass() const {
98 switch (ObjLayout::klass_mode()) {
99 case ObjLayout::Compact:
100 return mark().klass();
101 case ObjLayout::Compressed:
102 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
103 default:
104 return _metadata._klass;
105 }
106 }
107
108 Klass* oopDesc::klass_or_null() const {
109 switch (ObjLayout::klass_mode()) {
110 case ObjLayout::Compact:
111 return mark().klass_or_null();
112 case ObjLayout::Compressed:
113 return CompressedKlassPointers::decode(_metadata._compressed_klass);
114 default:
115 return _metadata._klass;
116 }
223 // skipping the intermediate round to HeapWordSize.
224 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
225
226 assert(s == klass->oop_size(this), "wrong array object size");
227 } else {
228 // Must be zero, so bite the bullet and take the virtual call.
229 s = klass->oop_size(this);
230 }
231 }
232
233 assert(s > 0, "Oop size must be greater than zero, not %zu", s);
234 assert(is_object_aligned(s), "Oop size is not properly aligned: %zu", s);
235 return s;
236 }
237
238 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
239 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); }
240 bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); }
241 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
242 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
243 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
244
245 template<typename T>
246 T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
247
248 template <typename T>
249 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
250
251 template <DecoratorSet decorators>
252 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
253 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
254
255 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
256 template <DecoratorSet decorators>
257 inline void oopDesc::obj_field_put_access(int offset, oop value) { HeapAccess<decorators>::oop_store_at(as_oop(), offset, value); }
258
259 inline jbyte oopDesc::byte_field(int offset) const { return *field_addr<jbyte>(offset); }
260 inline void oopDesc::byte_field_put(int offset, jbyte value) { *field_addr<jbyte>(offset) = value; }
261
262 inline jchar oopDesc::char_field(int offset) const { return *field_addr<jchar>(offset); }
263 inline void oopDesc::char_field_put(int offset, jchar value) { *field_addr<jchar>(offset) = value; }
|
66 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
67 }
68
69 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
70 AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
71 }
72
73 void oopDesc::release_set_mark(markWord m) {
74 AtomicAccess::release_store(&_mark, m);
75 }
76
77 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
78 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark);
79 }
80
81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
82 return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order);
83 }
84
85 markWord oopDesc::prototype_mark() const {
86 if (UseCompactObjectHeaders || EnableValhalla) {
87 return klass()->prototype_header();
88 } else {
89 return markWord::prototype();
90 }
91 }
92
93 void oopDesc::init_mark() {
94 set_mark(prototype_mark());
95 }
96
97 // This is specifically for Parallel GC. The other collectors need klass()->prototype_header()
98 // even without using Compact Object Headers. The issue is that this operation is unsafe
99 // using Parallel, as there are multiple concurrent GC workers that could access it.
100 // In practice, this has lead to relatively frequent crashes.
101 // More work needs to be done in the future to consolidate reinit_mark with init_mark.
102 void oopDesc::reinit_mark() {
103 if (UseCompactObjectHeaders) {
104 set_mark(klass()->prototype_header());
105 } else {
106 set_mark(markWord::prototype());
107 }
108 }
109
110 Klass* oopDesc::klass() const {
111 switch (ObjLayout::klass_mode()) {
112 case ObjLayout::Compact:
113 return mark().klass();
114 case ObjLayout::Compressed:
115 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
116 default:
117 return _metadata._klass;
118 }
119 }
120
121 Klass* oopDesc::klass_or_null() const {
122 switch (ObjLayout::klass_mode()) {
123 case ObjLayout::Compact:
124 return mark().klass_or_null();
125 case ObjLayout::Compressed:
126 return CompressedKlassPointers::decode(_metadata._compressed_klass);
127 default:
128 return _metadata._klass;
129 }
236 // skipping the intermediate round to HeapWordSize.
237 s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
238
239 assert(s == klass->oop_size(this), "wrong array object size");
240 } else {
241 // Must be zero, so bite the bullet and take the virtual call.
242 s = klass->oop_size(this);
243 }
244 }
245
246 assert(s > 0, "Oop size must be greater than zero, not %zu", s);
247 assert(is_object_aligned(s), "Oop size is not properly aligned: %zu", s);
248 return s;
249 }
250
251 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
252 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); }
253 bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); }
254 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
255 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
256 bool oopDesc::is_refArray() const { return klass()->is_refArray_klass(); }
257 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
258 bool oopDesc::is_refined_objArray() const { return klass()->is_refined_objArray_klass(); }
259
260 bool oopDesc::is_inline_type() const { return mark().is_inline_type(); }
261 #ifdef _LP64
262 bool oopDesc::is_flatArray() const {
263 markWord mrk = mark();
264 return (mrk.is_unlocked()) ? mrk.is_flat_array() : klass()->is_flatArray_klass();
265 }
266 bool oopDesc::is_null_free_array() const {
267 markWord mrk = mark();
268 return (mrk.is_unlocked()) ? mrk.is_null_free_array() : klass()->is_null_free_array_klass();
269 }
270 #else
271 bool oopDesc::is_flatArray() const { return klass()->is_flatArray_klass(); }
272 bool oopDesc::is_null_free_array() const { return klass()->is_null_free_array_klass(); }
273 #endif
274
275 template<typename T>
276 T* oopDesc::field_addr(int offset) const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
277
278 template <typename T>
279 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
280
281 template <DecoratorSet decorators>
282 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
283 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
284
285 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
286 template <DecoratorSet decorators>
287 inline void oopDesc::obj_field_put_access(int offset, oop value) { HeapAccess<decorators>::oop_store_at(as_oop(), offset, value); }
288
289 inline jbyte oopDesc::byte_field(int offset) const { return *field_addr<jbyte>(offset); }
290 inline void oopDesc::byte_field_put(int offset, jbyte value) { *field_addr<jbyte>(offset) = value; }
291
292 inline jchar oopDesc::char_field(int offset) const { return *field_addr<jchar>(offset); }
293 inline void oopDesc::char_field_put(int offset, jchar value) { *field_addr<jchar>(offset) = value; }
|