1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
 26 #define SHARE_OOPS_OOP_INLINE_HPP
 27 
 28 #include "oops/oop.hpp"
 29 
 30 #include "memory/universe.hpp"
 31 #include "memory/iterator.inline.hpp"
 32 #include "oops/access.inline.hpp"
 33 #include "oops/arrayKlass.hpp"
 34 #include "oops/arrayOop.hpp"
 35 #include "oops/compressedKlass.inline.hpp"
 36 #include "oops/compressedOops.inline.hpp"
 37 #include "oops/instanceKlass.hpp"
 38 #include "oops/markWord.inline.hpp"
 39 #include "oops/oopsHierarchy.hpp"
 40 #include "runtime/atomic.hpp"
 41 #include "runtime/globals.hpp"
 42 #include "runtime/objectMonitor.inline.hpp"
 43 #include "runtime/safepoint.hpp"
 44 #include "runtime/synchronizer.hpp"
 45 #include "utilities/align.hpp"
 46 #include "utilities/debug.hpp"
 47 #include "utilities/macros.hpp"
 48 #include "utilities/globalDefinitions.hpp"
 49 
 50 // Implementation of all inlined member functions defined in oop.hpp
 51 // We need a separate file to avoid circular references
 52 
 53 markWord oopDesc::mark() const {
 54   return Atomic::load(&_mark);
 55 }
 56 
 57 markWord oopDesc::mark_acquire() const {
 58   return Atomic::load_acquire(&_mark);
 59 }
 60 
 61 markWord* oopDesc::mark_addr() const {
 62   return (markWord*) &_mark;
 63 }
 64 
 65 void oopDesc::set_mark(markWord m) {
 66   Atomic::store(&_mark, m);
 67 }
 68 
 69 void oopDesc::set_mark(HeapWord* mem, markWord m) {
 70   *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
 71 }
 72 
 73 void oopDesc::release_set_mark(markWord m) {
 74   Atomic::release_store(&_mark, m);
 75 }
 76 
 77 void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
 78   Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
 79 }
 80 
 81 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
 82   return Atomic::cmpxchg(&_mark, old_mark, new_mark);
 83 }
 84 
 85 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
 86   return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
 87 }
 88 
 89 markWord oopDesc::resolve_mark() const {
 90   assert(LockingMode != LM_LEGACY, "Not safe with legacy stack-locking");
 91   markWord hdr = mark();
 92   if (hdr.has_monitor()) {
 93     ObjectMonitor* monitor = hdr.monitor();
 94     return monitor->header();
 95   }
 96   return hdr;
 97 }
 98 
 99 void oopDesc::init_mark() {
100 #ifdef _LP64
101   if (UseCompactObjectHeaders) {
102     markWord header = resolve_mark();
103     assert(CompressedKlassPointers::use_compressed_class_pointers(), "expect compressed klass pointers");
104     set_mark(markWord((header.value() & markWord::klass_mask_in_place) | markWord::prototype().value()));
105   } else
106 #endif
107   set_mark(markWord::prototype());
108 }
109 
110 Klass* oopDesc::klass() const {
111 #ifdef _LP64
112   if (CompressedKlassPointers::use_compact_object_headers()) {
113     assert(CompressedKlassPointers::use_compressed_class_pointers(), "only with compressed class pointers");
114     markWord header = resolve_mark();
115     return header.klass();
116   } else if (CompressedKlassPointers::use_compressed_class_pointers()) {
117     return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
118   } else
119 #endif
120   return _metadata._klass;
121 }
122 
123 Klass* oopDesc::klass_or_null() const {
124 #ifdef _LP64
125   if (CompressedKlassPointers::use_compact_object_headers()) {
126     assert(CompressedKlassPointers::use_compressed_class_pointers(), "only with compressed class pointers");
127     markWord header = resolve_mark();
128     return header.klass_or_null();
129   } else if (CompressedKlassPointers::use_compressed_class_pointers()) {
130     return CompressedKlassPointers::decode(_metadata._compressed_klass);
131   } else
132 #endif
133   return _metadata._klass;
134 }
135 
136 Klass* oopDesc::klass_or_null_acquire() const {
137 #ifdef _LP64
138   if (CompressedKlassPointers::use_compact_object_headers()) {
139     assert(CompressedKlassPointers::use_compressed_class_pointers(), "only with compressed class pointers");
140     markWord header = mark_acquire();
141     if (header.has_monitor()) {
142       header = header.monitor()->header();
143     }
144     return header.klass_or_null();
145   } else if (CompressedKlassPointers::use_compressed_class_pointers()) {
146      narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass);
147      return CompressedKlassPointers::decode(nklass);
148   } else
149 #endif
150   return Atomic::load_acquire(&_metadata._klass);
151 }
152 
153 Klass* oopDesc::klass_raw() const {
154   return klass();
155 }
156 
157 void oopDesc::set_klass(Klass* k) {
158   assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
159   assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
160   if (CompressedKlassPointers::use_compressed_class_pointers()) {
161     _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
162   } else {
163     _metadata._klass = k;
164   }
165 }
166 
167 void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
168   assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
169   assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers");
170   char* raw_mem = ((char*)mem + klass_offset_in_bytes());
171   if (CompressedKlassPointers::use_compressed_class_pointers()) {
172     Atomic::release_store((narrowKlass*)raw_mem,
173                           CompressedKlassPointers::encode_not_null(k));
174   } else {
175     Atomic::release_store((Klass**)raw_mem, k);
176   }
177 }
178 
179 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
180   assert(!UseCompactObjectHeaders, "don't set Klass* gap with compact headers");
181   if (CompressedKlassPointers::use_compressed_class_pointers()) {
182     *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
183   }
184 }
185 
186 bool oopDesc::is_a(Klass* k) const {
187   return klass()->is_subtype_of(k);
188 }
189 
190 size_t oopDesc::size()  {
191   return size_given_klass(klass());
192 }
193 
194 size_t oopDesc::size_given_klass(Klass* klass)  {
195   int lh = klass->layout_helper();
196   size_t s;
197 
198   // lh is now a value computed at class initialization that may hint
199   // at the size.  For instances, this is positive and equal to the
200   // size.  For arrays, this is negative and provides log2 of the
201   // array element size.  For other oops, it is zero and thus requires
202   // a virtual call.
203   //
204   // We go to all this trouble because the size computation is at the
205   // heart of phase 2 of mark-compaction, and called for every object,
206   // alive or dead.  So the speed here is equal in importance to the
207   // speed of allocation.
208 
209   if (lh > Klass::_lh_neutral_value) {
210     if (!Klass::layout_helper_needs_slow_path(lh)) {
211       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
212     } else {
213       s = klass->oop_size(this);
214     }
215   } else if (lh <= Klass::_lh_neutral_value) {
216     // The most common case is instances; fall through if so.
217     if (lh < Klass::_lh_neutral_value) {
218       // Second most common case is arrays.  We have to fetch the
219       // length of the array, shift (multiply) it appropriately,
220       // up to wordSize, add the header, and align to object size.
221       size_t size_in_bytes;
222       size_t array_length = (size_t) ((arrayOop)this)->length();
223       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
224       size_in_bytes += Klass::layout_helper_header_size(lh);
225 
226       // This code could be simplified, but by keeping array_header_in_bytes
227       // in units of bytes and doing it this way we can round up just once,
228       // skipping the intermediate round to HeapWordSize.
229       s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize;
230 
231       assert(s == klass->oop_size(this) || size_might_change(), "wrong array object size");
232     } else {
233       // Must be zero, so bite the bullet and take the virtual call.
234       s = klass->oop_size(this);
235     }
236   }
237 
238   assert(s > 0, "Oop size must be greater than zero, not " SIZE_FORMAT, s);
239   assert(is_object_aligned(s), "Oop size is not properly aligned: " SIZE_FORMAT, s);
240   return s;
241 }
242 
243 bool oopDesc::is_instance()    const { return klass()->is_instance_klass();             }
244 bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass();   }
245 bool oopDesc::is_stackChunk()  const { return klass()->is_stack_chunk_instance_klass(); }
246 bool oopDesc::is_array()       const { return klass()->is_array_klass();                }
247 bool oopDesc::is_objArray()    const { return klass()->is_objArray_klass();             }
248 bool oopDesc::is_typeArray()   const { return klass()->is_typeArray_klass();            }
249 
250 template<typename T>
251 T*       oopDesc::field_addr(int offset)     const { return reinterpret_cast<T*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
252 
253 template <typename T>
254 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
255 
256 template <DecoratorSet decorators>
257 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
258 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
259 
260 inline void oopDesc::obj_field_put(int offset, oop value)           { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
261 template <DecoratorSet decorators>
262 inline void oopDesc::obj_field_put_access(int offset, oop value)    { HeapAccess<decorators>::oop_store_at(as_oop(), offset, value); }
263 
264 inline jbyte oopDesc::byte_field(int offset) const                  { return *field_addr<jbyte>(offset);  }
265 inline void  oopDesc::byte_field_put(int offset, jbyte value)       { *field_addr<jbyte>(offset) = value; }
266 
267 inline jchar oopDesc::char_field(int offset) const                  { return *field_addr<jchar>(offset);  }
268 inline void  oopDesc::char_field_put(int offset, jchar value)       { *field_addr<jchar>(offset) = value; }
269 
270 inline jboolean oopDesc::bool_field(int offset) const               { return *field_addr<jboolean>(offset); }
271 inline void     oopDesc::bool_field_put(int offset, jboolean value) { *field_addr<jboolean>(offset) = jboolean(value & 1); }
272 inline jboolean oopDesc::bool_field_volatile(int offset) const      { return RawAccess<MO_SEQ_CST>::load(field_addr<jboolean>(offset)); }
273 inline void     oopDesc::bool_field_put_volatile(int offset, jboolean value) { RawAccess<MO_SEQ_CST>::store(field_addr<jboolean>(offset), jboolean(value & 1)); }
274 inline jshort oopDesc::short_field(int offset) const                { return *field_addr<jshort>(offset);   }
275 inline void   oopDesc::short_field_put(int offset, jshort value)    { *field_addr<jshort>(offset) = value;  }
276 
277 inline jint oopDesc::int_field(int offset) const                    { return *field_addr<jint>(offset);     }
278 inline void oopDesc::int_field_put(int offset, jint value)          { *field_addr<jint>(offset) = value;    }
279 
280 inline jlong oopDesc::long_field(int offset) const                  { return *field_addr<jlong>(offset);    }
281 inline void  oopDesc::long_field_put(int offset, jlong value)       { *field_addr<jlong>(offset) = value;   }
282 
283 inline jfloat oopDesc::float_field(int offset) const                { return *field_addr<jfloat>(offset);   }
284 inline void   oopDesc::float_field_put(int offset, jfloat value)    { *field_addr<jfloat>(offset) = value;  }
285 
286 inline jdouble oopDesc::double_field(int offset) const              { return *field_addr<jdouble>(offset);  }
287 inline void    oopDesc::double_field_put(int offset, jdouble value) { *field_addr<jdouble>(offset) = value; }
288 
289 bool oopDesc::is_locked() const {
290   return mark().is_locked();
291 }
292 
293 bool oopDesc::is_unlocked() const {
294   return mark().is_unlocked();
295 }
296 
297 // Used only for markSweep, scavenging
298 bool oopDesc::is_gc_marked() const {
299   return mark().is_marked();
300 }
301 
302 // Used by scavengers
303 bool oopDesc::is_forwarded() const {
304   // The extra heap check is needed since the obj might be locked, in which case the
305   // mark would point to a stack location and have the sentinel bit cleared
306   return mark().is_marked();
307 }
308 
309 // Used by scavengers
310 void oopDesc::forward_to(oop p) {
311   markWord m = markWord::encode_pointer_as_mark(p);
312   assert(forwardee(m) == p, "encoding must be reversable");
313   set_mark(m);
314 }
315 
316 void oopDesc::forward_to_self() {
317 #ifdef _LP64
318   markWord m = mark();
319   // If mark is displaced, we need to preserve the Klass* from real header.
320   assert(SafepointSynchronize::is_at_safepoint(), "we can only safely fetch the displaced header at safepoint");
321   if (m.has_displaced_mark_helper()) {
322     m = m.displaced_mark_helper();
323   }
324   m = m.set_self_forwarded();
325   assert(forwardee(m) == cast_to_oop(this), "encoding must be reversable");
326   set_mark(m);
327 #else
328   forward_to(oop(this));
329 #endif
330 }
331 
332 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
333   markWord m = markWord::encode_pointer_as_mark(p);
334   assert(forwardee(m) == p, "encoding must be reversable");
335   markWord old_mark = cas_set_mark(m, compare, order);
336   if (old_mark == compare) {
337     return nullptr;
338   } else {
339     return forwardee(old_mark);
340   }
341 }
342 
343 oop oopDesc::forward_to_self_atomic(markWord compare, atomic_memory_order order) {
344 #ifdef _LP64
345   markWord m = compare;
346   // If mark is displaced, we need to preserve the Klass* from real header.
347   assert(SafepointSynchronize::is_at_safepoint(), "we can only safely fetch the displaced header at safepoint");
348   if (m.has_displaced_mark_helper()) {
349     m = m.displaced_mark_helper();
350   }
351   m = m.set_self_forwarded();
352   assert(forwardee(m) == cast_to_oop(this), "encoding must be reversable");
353   markWord old_mark = cas_set_mark(m, compare, order);
354   if (old_mark == compare) {
355     return NULL;
356   } else {
357     assert(old_mark.is_marked(), "must be marked here");
358     return forwardee(old_mark);
359   }
360 #else
361   return forward_to_atomic(oop(this), compare, order);
362 #endif
363 }
364 
365 // Note that the forwardee is not the same thing as the displaced_mark.
366 // The forwardee is used when copying during scavenge and mark-sweep.
367 // It does need to clear the low two locking- and GC-related bits.
368 oop oopDesc::forwardee() const {
369   return forwardee(mark());
370 }
371 
372 oop oopDesc::forwardee(markWord header) const {
373   assert(header.is_marked(), "must be forwarded");
374 #ifdef _LP64
375   if (header.self_forwarded()) {
376     return cast_to_oop(this);
377   } else
378 #endif
379   {
380     assert(header.is_marked(), "only decode when actually forwarded");
381     return cast_to_oop(header.decode_pointer());
382   }
383 }
384 
385 // The following method needs to be MT safe.
386 uint oopDesc::age() const {
387   assert(!mark().is_marked(), "Attempt to read age from forwarded mark");
388   if (has_displaced_mark()) {
389     return displaced_mark().age();
390   } else {
391     return mark().age();
392   }
393 }
394 
395 void oopDesc::incr_age() {
396   assert(!mark().is_marked(), "Attempt to increment age of forwarded mark");
397   if (has_displaced_mark()) {
398     set_displaced_mark(displaced_mark().incr_age());
399   } else {
400     set_mark(mark().incr_age());
401   }
402 }
403 
404 template <typename OopClosureType>
405 void oopDesc::oop_iterate(OopClosureType* cl) {
406   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
407 }
408 
409 template <typename OopClosureType>
410 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
411   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
412 }
413 
414 template <typename OopClosureType>
415 size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
416   Klass* k = klass();
417   size_t size = size_given_klass(k);
418   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
419   return size;
420 }
421 
422 template <typename OopClosureType>
423 size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
424   Klass* k = klass();
425   size_t size = size_given_klass(k);
426   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
427   return size;
428 }
429 
430 template <typename OopClosureType>
431 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
432   oop_iterate_backwards(cl, klass());
433 }
434 
435 template <typename OopClosureType>
436 void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
437   OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k);
438 }
439 
440 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
441   return obj == nullptr || obj->klass()->is_subtype_of(klass);
442 }
443 
444 intptr_t oopDesc::identity_hash() {
445   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
446   // Note: The mark must be read into local variable to avoid concurrent updates.
447   markWord mrk = mark();
448   if (mrk.is_unlocked() && !mrk.has_no_hash()) {
449     return mrk.hash();
450   } else if (mrk.is_marked()) {
451     return mrk.hash();
452   } else {
453     return slow_identity_hash();
454   }
455 }
456 
457 // This checks fast simple case of whether the oop has_no_hash,
458 // to optimize JVMTI table lookup.
459 bool oopDesc::fast_no_hash_check() {
460   markWord mrk = mark_acquire();
461   assert(!mrk.is_marked(), "should never be marked");
462   return mrk.is_unlocked() && mrk.has_no_hash();
463 }
464 
465 bool oopDesc::has_displaced_mark() const {
466   return mark().has_displaced_mark_helper();
467 }
468 
469 markWord oopDesc::displaced_mark() const {
470   return mark().displaced_mark_helper();
471 }
472 
473 void oopDesc::set_displaced_mark(markWord m) {
474   mark().set_displaced_mark_helper(m);
475 }
476 
477 bool oopDesc::mark_must_be_preserved() const {
478   return mark_must_be_preserved(mark());
479 }
480 
481 bool oopDesc::mark_must_be_preserved(markWord m) const {
482   return m.must_be_preserved(this);
483 }
484 
485 #endif // SHARE_OOPS_OOP_INLINE_HPP