137 VerifyOopClosure VerifyOopClosure::verify_oop;
138
139 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
140 oop obj = RawAccess<>::oop_load(p);
141 guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " PTR_FORMAT, p2i(obj));
142 }
143
144 void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); }
145 void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
146
147 // type test operations that doesn't require inclusion of oop.inline.hpp.
148 bool oopDesc::is_instance_noinline() const { return is_instance(); }
149 bool oopDesc::is_instanceRef_noinline() const { return is_instanceRef(); }
150 bool oopDesc::is_stackChunk_noinline() const { return is_stackChunk(); }
151 bool oopDesc::is_array_noinline() const { return is_array(); }
152 bool oopDesc::is_objArray_noinline() const { return is_objArray(); }
153 bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); }
154
155 bool oopDesc::has_klass_gap() {
156 // Only has a klass gap when compressed class pointers are used.
157 return UseCompressedClassPointers;
158 }
159
160 #if INCLUDE_CDS_JAVA_HEAP
161 void oopDesc::set_narrow_klass(narrowKlass nk) {
162 assert(CDSConfig::is_dumping_heap(), "Used by CDS only. Do not abuse!");
163 assert(UseCompressedClassPointers, "must be");
164 _metadata._compressed_klass = nk;
165 }
166 #endif
167
168 void* oopDesc::load_oop_raw(oop obj, int offset) {
169 uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
170 if (UseCompressedOops) {
171 narrowOop narrow_oop = *(narrowOop*)addr;
172 if (CompressedOops::is_null(narrow_oop)) return nullptr;
173 return (void*)CompressedOops::decode_raw(narrow_oop);
174 } else {
175 return *(void**)addr;
176 }
177 }
203
204 jboolean oopDesc::bool_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jboolean>(offset)); }
205 void oopDesc::release_bool_field_put(int offset, jboolean value) { Atomic::release_store(field_addr<jboolean>(offset), jboolean(value & 1)); }
206
207 jint oopDesc::int_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jint>(offset)); }
208 void oopDesc::release_int_field_put(int offset, jint value) { Atomic::release_store(field_addr<jint>(offset), value); }
209
210 jshort oopDesc::short_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jshort>(offset)); }
211 void oopDesc::release_short_field_put(int offset, jshort value) { Atomic::release_store(field_addr<jshort>(offset), value); }
212
213 jlong oopDesc::long_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jlong>(offset)); }
214 void oopDesc::release_long_field_put(int offset, jlong value) { Atomic::release_store(field_addr<jlong>(offset), value); }
215
216 jfloat oopDesc::float_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jfloat>(offset)); }
217 void oopDesc::release_float_field_put(int offset, jfloat value) { Atomic::release_store(field_addr<jfloat>(offset), value); }
218
219 jdouble oopDesc::double_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jdouble>(offset)); }
220 void oopDesc::release_double_field_put(int offset, jdouble value) { Atomic::release_store(field_addr<jdouble>(offset), value); }
221
222 #ifdef ASSERT
223 bool oopDesc::size_might_change() {
224 // UseParallelGC and UseG1GC can change the length field
225 // of an "old copy" of an object array in the young gen so it indicates
226 // the grey portion of an already copied array. This will cause the first
227 // disjunct below to fail if the two comparands are computed across such
228 // a concurrent change.
229 return Universe::heap()->is_stw_gc_active() && is_objArray() && is_forwarded() && (UseParallelGC || UseG1GC);
230 }
231 #endif
|
137 VerifyOopClosure VerifyOopClosure::verify_oop;
138
139 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
140 oop obj = RawAccess<>::oop_load(p);
141 guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " PTR_FORMAT, p2i(obj));
142 }
143
144 void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); }
145 void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
146
147 // type test operations that doesn't require inclusion of oop.inline.hpp.
148 bool oopDesc::is_instance_noinline() const { return is_instance(); }
149 bool oopDesc::is_instanceRef_noinline() const { return is_instanceRef(); }
150 bool oopDesc::is_stackChunk_noinline() const { return is_stackChunk(); }
151 bool oopDesc::is_array_noinline() const { return is_array(); }
152 bool oopDesc::is_objArray_noinline() const { return is_objArray(); }
153 bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); }
154
155 bool oopDesc::has_klass_gap() {
156 // Only has a klass gap when compressed class pointers are used.
157 // Except when using compact headers.
158 return UseCompressedClassPointers && !UseCompactObjectHeaders;
159 }
160
161 #if INCLUDE_CDS_JAVA_HEAP
162 void oopDesc::set_narrow_klass(narrowKlass nk) {
163 assert(CDSConfig::is_dumping_heap(), "Used by CDS only. Do not abuse!");
164 assert(UseCompressedClassPointers, "must be");
165 _metadata._compressed_klass = nk;
166 }
167 #endif
168
169 void* oopDesc::load_oop_raw(oop obj, int offset) {
170 uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
171 if (UseCompressedOops) {
172 narrowOop narrow_oop = *(narrowOop*)addr;
173 if (CompressedOops::is_null(narrow_oop)) return nullptr;
174 return (void*)CompressedOops::decode_raw(narrow_oop);
175 } else {
176 return *(void**)addr;
177 }
178 }
204
205 jboolean oopDesc::bool_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jboolean>(offset)); }
206 void oopDesc::release_bool_field_put(int offset, jboolean value) { Atomic::release_store(field_addr<jboolean>(offset), jboolean(value & 1)); }
207
208 jint oopDesc::int_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jint>(offset)); }
209 void oopDesc::release_int_field_put(int offset, jint value) { Atomic::release_store(field_addr<jint>(offset), value); }
210
211 jshort oopDesc::short_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jshort>(offset)); }
212 void oopDesc::release_short_field_put(int offset, jshort value) { Atomic::release_store(field_addr<jshort>(offset), value); }
213
214 jlong oopDesc::long_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jlong>(offset)); }
215 void oopDesc::release_long_field_put(int offset, jlong value) { Atomic::release_store(field_addr<jlong>(offset), value); }
216
217 jfloat oopDesc::float_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jfloat>(offset)); }
218 void oopDesc::release_float_field_put(int offset, jfloat value) { Atomic::release_store(field_addr<jfloat>(offset), value); }
219
220 jdouble oopDesc::double_field_acquire(int offset) const { return Atomic::load_acquire(field_addr<jdouble>(offset)); }
221 void oopDesc::release_double_field_put(int offset, jdouble value) { Atomic::release_store(field_addr<jdouble>(offset), value); }
222
223 #ifdef ASSERT
224 bool oopDesc::size_might_change(Klass* klass) {
225 // UseParallelGC and UseG1GC can change the length field
226 // of an "old copy" of an object array in the young gen so it indicates
227 // the grey portion of an already copied array. This will cause the first
228 // disjunct below to fail if the two comparands are computed across such
229 // a concurrent change.
230 return Universe::heap()->is_stw_gc_active() && klass->is_objArray_klass() && is_forwarded() && (UseParallelGC || UseG1GC);
231 }
232 #endif
|