88 void oopDesc::verify(oopDesc* oop_desc) {
89 verify_on(tty, oop_desc);
90 }
91
92 intptr_t oopDesc::slow_identity_hash() {
93 // slow case; we have to acquire the micro lock in order to locate the header
94 Thread* current = Thread::current();
95 ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
96 HandleMark hm(current);
97 Handle object(current, this);
98 return ObjectSynchronizer::identity_hash_value_for(object);
99 }
100
101 // used only for asserts and guarantees
102 bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
103 if (!Universe::heap()->is_oop(obj)) {
104 return false;
105 }
106
107 // Header verification: the mark is typically non-zero. If we're
108 // at a safepoint, it must not be zero.
109 // Outside of a safepoint, the header could be changing (for example,
110 // another thread could be inflating a lock on this object).
111 if (ignore_mark_word) {
112 return true;
113 }
114 if (obj->mark().value() != 0) {
115 return true;
116 }
117 return !SafepointSynchronize::is_at_safepoint();
118 }
119
120 // used only for asserts and guarantees
121 bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) {
122 return obj == NULL ? true : is_oop(obj, ignore_mark_word);
123 }
124
125 VerifyOopClosure VerifyOopClosure::verify_oop;
126
127 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
128 oop obj = RawAccess<>::oop_load(p);
129 guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj));
130 }
131
132 void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); }
133 void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
134
135 // type test operations that doesn't require inclusion of oop.inline.hpp.
136 bool oopDesc::is_instance_noinline() const { return is_instance(); }
137 bool oopDesc::is_array_noinline() const { return is_array(); }
138 bool oopDesc::is_objArray_noinline() const { return is_objArray(); }
139 bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); }
140
141 bool oopDesc::has_klass_gap() {
142 // Only has a klass gap when compressed class pointers are used.
143 return UseCompressedClassPointers;
144 }
145
146 #if INCLUDE_CDS_JAVA_HEAP
147 void oopDesc::set_narrow_klass(narrowKlass nk) {
148 assert(DumpSharedSpaces, "Used by CDS only. Do not abuse!");
149 assert(UseCompressedClassPointers, "must be");
150 _metadata._compressed_klass = nk;
151 }
152 #endif
153
154 void* oopDesc::load_klass_raw(oop obj) {
155 if (UseCompressedClassPointers) {
156 narrowKlass narrow_klass = obj->_metadata._compressed_klass;
157 if (narrow_klass == 0) return NULL;
158 return (void*)CompressedKlassPointers::decode_raw(narrow_klass);
159 } else {
160 return obj->_metadata._klass;
161 }
162 }
163
164 void* oopDesc::load_oop_raw(oop obj, int offset) {
165 uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
166 if (UseCompressedOops) {
167 narrowOop narrow_oop = *(narrowOop*)addr;
168 if (CompressedOops::is_null(narrow_oop)) return NULL;
169 return (void*)CompressedOops::decode_raw(narrow_oop);
170 } else {
171 return *(void**)addr;
172 }
173 }
174
175 oop oopDesc::obj_field_acquire(int offset) const { return HeapAccess<MO_ACQUIRE>::oop_load_at(as_oop(), offset); }
176
177 void oopDesc::obj_field_put_raw(int offset, oop value) { RawAccess<>::oop_store_at(as_oop(), offset, value); }
178 void oopDesc::release_obj_field_put(int offset, oop value) { HeapAccess<MO_RELEASE>::oop_store_at(as_oop(), offset, value); }
179 void oopDesc::obj_field_put_volatile(int offset, oop value) { HeapAccess<MO_SEQ_CST>::oop_store_at(as_oop(), offset, value); }
180
181 address oopDesc::address_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
|
88 void oopDesc::verify(oopDesc* oop_desc) {
89 verify_on(tty, oop_desc);
90 }
91
92 intptr_t oopDesc::slow_identity_hash() {
93 // slow case; we have to acquire the micro lock in order to locate the header
94 Thread* current = Thread::current();
95 ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
96 HandleMark hm(current);
97 Handle object(current, this);
98 return ObjectSynchronizer::identity_hash_value_for(object);
99 }
100
101 // used only for asserts and guarantees
102 bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
103 if (!Universe::heap()->is_oop(obj)) {
104 return false;
105 }
106
107 // Header verification: the mark is typically non-zero. If we're
108 // at a safepoint, it must not be zero, except when using the new lightweight locking.
109 // Outside of a safepoint, the header could be changing (for example,
110 // another thread could be inflating a lock on this object).
111 if (ignore_mark_word) {
112 return true;
113 }
114 if (obj->mark().value() != 0) {
115 return true;
116 }
117 return LockingMode == LM_LIGHTWEIGHT || !SafepointSynchronize::is_at_safepoint();
118 }
119
120 // used only for asserts and guarantees
121 bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) {
122 return obj == NULL ? true : is_oop(obj, ignore_mark_word);
123 }
124
125 VerifyOopClosure VerifyOopClosure::verify_oop;
126
127 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
128 oop obj = RawAccess<>::oop_load(p);
129 guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj));
130 }
131
132 void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); }
133 void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
134
135 // type test operations that doesn't require inclusion of oop.inline.hpp.
136 bool oopDesc::is_instance_noinline() const { return is_instance(); }
137 bool oopDesc::is_array_noinline() const { return is_array(); }
138 bool oopDesc::is_objArray_noinline() const { return is_objArray(); }
139 bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); }
140
141 bool oopDesc::has_klass_gap() {
142 // Only has a klass gap when compressed class pointers are used, but
143 // only if not using compact headers..
144 return UseCompressedClassPointers && !UseCompactObjectHeaders;
145 }
146
147 #if INCLUDE_CDS_JAVA_HEAP
148 void oopDesc::set_narrow_klass(narrowKlass nk) {
149 assert(DumpSharedSpaces, "Used by CDS only. Do not abuse!");
150 assert(UseCompressedClassPointers, "must be");
151 assert(!UseCompactObjectHeaders, "not with compact headers");
152 _metadata._compressed_klass = nk;
153 }
154 #endif
155
156 void* oopDesc::load_klass_raw(oop obj) {
157 // TODO: Remove method altogether and replace with calls to obj->klass() ?
158 // OTOH, we may eventually get rid of locking in header, and then no
159 // longer have to deal with that anymore.
160 #ifdef _LP64
161 return obj->klass();
162 #else
163 return obj->_metadata._klass;
164 #endif
165 }
166
167 void* oopDesc::load_oop_raw(oop obj, int offset) {
168 uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
169 if (UseCompressedOops) {
170 narrowOop narrow_oop = *(narrowOop*)addr;
171 if (CompressedOops::is_null(narrow_oop)) return NULL;
172 return (void*)CompressedOops::decode_raw(narrow_oop);
173 } else {
174 return *(void**)addr;
175 }
176 }
177
178 oop oopDesc::obj_field_acquire(int offset) const { return HeapAccess<MO_ACQUIRE>::oop_load_at(as_oop(), offset); }
179
180 void oopDesc::obj_field_put_raw(int offset, oop value) { RawAccess<>::oop_store_at(as_oop(), offset, value); }
181 void oopDesc::release_obj_field_put(int offset, oop value) { HeapAccess<MO_RELEASE>::oop_store_at(as_oop(), offset, value); }
182 void oopDesc::obj_field_put_volatile(int offset, oop value) { HeapAccess<MO_SEQ_CST>::oop_store_at(as_oop(), offset, value); }
183
184 address oopDesc::address_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
|