< prev index next >

src/hotspot/share/oops/oop.inline.hpp

Print this page




  70 void oopDesc::release_set_mark(markOop m) {
  71   HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
  72 }
  73 
  74 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
  75   return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
  76 }
  77 
  78 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
  79   return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
  80 }
  81 
  82 void oopDesc::init_mark() {
  83   set_mark(markOopDesc::prototype_for_object(this));
  84 }
  85 
  86 void oopDesc::init_mark_raw() {
  87   set_mark_raw(markOopDesc::prototype_for_object(this));
  88 }
  89 







  90 Klass* oopDesc::klass() const {
  91   if (UseCompressedClassPointers) {
  92     return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
  93   } else {
  94     return _metadata._klass;
  95   }
  96 }
  97 
  98 Klass* oopDesc::klass_or_null() const volatile {
  99   if (UseCompressedClassPointers) {
 100     return CompressedKlassPointers::decode(_metadata._compressed_klass);
 101   } else {
 102     return _metadata._klass;
 103   }
 104 }
 105 
 106 Klass* oopDesc::klass_or_null_acquire() const volatile {
 107   if (UseCompressedClassPointers) {
 108     // Workaround for non-const load_acquire parameter.
 109     const volatile narrowKlass* addr = &_metadata._compressed_klass;
 110     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
 111     return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
 112   } else {
 113     return OrderAccess::load_acquire(&_metadata._klass);
 114   }
 115 }
 116 
 117 Klass** oopDesc::klass_addr(HeapWord* mem) {
 118   // Only used internally and with CMS and will not work with
 119   // UseCompressedOops
 120   assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
 121   ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
 122   return (Klass**) (((char*)mem) + in_bytes(offset));
 123 }
 124 






 125 narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
 126   assert(UseCompressedClassPointers, "only called by compressed klass pointers");
 127   ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
 128   return (narrowKlass*) (((char*)mem) + in_bytes(offset));
 129 }
 130 
 131 Klass** oopDesc::klass_addr() {
 132   return klass_addr((HeapWord*)this);
 133 }
 134 
 135 narrowKlass* oopDesc::compressed_klass_addr() {
 136   return compressed_klass_addr((HeapWord*)this);
 137 }
 138 
 139 #define CHECK_SET_KLASS(k)                                                \
 140   do {                                                                    \
 141     assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass");      \
 142     assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \


 143   } while (0)
 144 
 145 void oopDesc::set_klass(Klass* k) {
 146   CHECK_SET_KLASS(k);
 147   if (UseCompressedClassPointers) {
 148     *compressed_klass_addr() = CompressedKlassPointers::encode_not_null(k);
 149   } else {
 150     *klass_addr() = k;
 151   }
 152 }
 153 
 154 void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
 155   CHECK_SET_KLASS(klass);
 156   if (UseCompressedClassPointers) {
 157     OrderAccess::release_store(compressed_klass_addr(mem),
 158                                CompressedKlassPointers::encode_not_null(klass));
 159   } else {
 160     OrderAccess::release_store(klass_addr(mem), klass);
 161   }

 162 }
 163 



















 164 #undef CHECK_SET_KLASS
 165 










 166 int oopDesc::klass_gap() const {
 167   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
 168 }
 169 
 170 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
 171   if (UseCompressedClassPointers) {
 172     *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
 173   }
 174 }
 175 
 176 void oopDesc::set_klass_gap(int v) {
 177   set_klass_gap((HeapWord*)this, v);
 178 }
 179 
 180 void oopDesc::set_klass_to_list_ptr(oop k) {
 181   // This is only to be used during GC, for from-space objects, so no
 182   // barrier is needed.
 183   if (UseCompressedClassPointers) {
 184     _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);  // may be null (parnew overflow handling)
 185   } else {


 260       assert((s == klass->oop_size(this)) ||
 261              (Universe::heap()->is_gc_active() &&
 262               ((is_typeArray() && UseConcMarkSweepGC) ||
 263                (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
 264              "wrong array object size");
 265     } else {
 266       // Must be zero, so bite the bullet and take the virtual call.
 267       s = klass->oop_size(this);
 268     }
 269   }
 270 
 271   assert(s > 0, "Oop size must be greater than zero, not %d", s);
 272   assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
 273   return s;
 274 }
 275 
 276 bool oopDesc::is_instance()  const { return klass()->is_instance_klass();  }
 277 bool oopDesc::is_array()     const { return klass()->is_array_klass();     }
 278 bool oopDesc::is_objArray()  const { return klass()->is_objArray_klass();  }
 279 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }


 280 
 281 void*    oopDesc::field_addr_raw(int offset)     const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
 282 void*    oopDesc::field_addr(int offset)         const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
 283 
 284 template <class T>
 285 T*       oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
 286 
 287 template <typename T>
 288 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
 289 
 290 template <DecoratorSet decorators>
 291 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
 292 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
 293 
 294 inline void oopDesc::obj_field_put(int offset, oop value)           { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
 295 
 296 inline jbyte oopDesc::byte_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
 297 inline void  oopDesc::byte_field_put(int offset, jbyte value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 298 
 299 inline jchar oopDesc::char_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }


 312 inline jlong oopDesc::long_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
 313 inline void  oopDesc::long_field_put(int offset, jlong value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 314 
 315 inline jfloat oopDesc::float_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
 316 inline void   oopDesc::float_field_put(int offset, jfloat value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
 317 
 318 inline jdouble oopDesc::double_field(int offset) const              { return HeapAccess<>::load_at(as_oop(), offset);  }
 319 inline void    oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
 320 
 321 bool oopDesc::is_locked() const {
 322   return mark()->is_locked();
 323 }
 324 
 325 bool oopDesc::is_unlocked() const {
 326   return mark()->is_unlocked();
 327 }
 328 
 329 bool oopDesc::has_bias_pattern() const {
 330   return mark()->has_bias_pattern();
 331 }

 332 
 333 bool oopDesc::has_bias_pattern_raw() const {
 334   return mark_raw()->has_bias_pattern();
 335 }
 336 
 337 // Used only for markSweep, scavenging
 338 bool oopDesc::is_gc_marked() const {
 339   return mark_raw()->is_marked();
 340 }
 341 
 342 // Used by scavengers
 343 bool oopDesc::is_forwarded() const {
 344   // The extra heap check is needed since the obj might be locked, in which case the
 345   // mark would point to a stack location and have the sentinel bit cleared
 346   return mark_raw()->is_marked();
 347 }
 348 
 349 // Used by scavengers
 350 void oopDesc::forward_to(oop p) {
 351   assert(check_obj_alignment(p),




  70 void oopDesc::release_set_mark(markOop m) {
  71   HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
  72 }
  73 
  74 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
  75   return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
  76 }
  77 
  78 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
  79   return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
  80 }
  81 
  82 void oopDesc::init_mark() {
  83   set_mark(markOopDesc::prototype_for_object(this));
  84 }
  85 
  86 void oopDesc::init_mark_raw() {
  87   set_mark_raw(markOopDesc::prototype_for_object(this));
  88 }
  89 
  90 narrowKlass oopDesc::compressed_klass_mask() { return ((narrowKlass) 1 << narrow_storage_props_shift) - 1; }
  91 uintptr_t   oopDesc::klass_mask()   { return ((uintptr_t) 1 << wide_storage_props_shift) - 1; }
  92 
  93 narrowKlass oopDesc::compressed_klass_masked(narrowKlass raw) { return raw & compressed_klass_mask(); }
  94 Klass*      oopDesc::klass_masked(uintptr_t raw)     { return reinterpret_cast<Klass*>(raw & klass_mask()); }
  95 
  96 
  97 Klass* oopDesc::klass() const {
  98   if (UseCompressedClassPointers) {
  99     return CompressedKlassPointers::decode_not_null(compressed_klass_masked(_metadata._compressed_klass));
 100   } else {
 101     return klass_masked(_metadata._wide_storage_props);
 102   }
 103 }
 104 
 105 Klass* oopDesc::klass_or_null() const volatile {
 106   if (UseCompressedClassPointers) {
 107     return CompressedKlassPointers::decode(compressed_klass_masked(_metadata._compressed_klass));
 108   } else {
 109     return klass_masked(_metadata._wide_storage_props);
 110   }
 111 }
 112 
 113 Klass* oopDesc::klass_or_null_acquire() const volatile {
 114   if (UseCompressedClassPointers) {
 115     // Workaround for non-const load_acquire parameter.
 116     const volatile narrowKlass* addr = &_metadata._compressed_klass;
 117     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
 118     return CompressedKlassPointers::decode(compressed_klass_masked(OrderAccess::load_acquire(xaddr)));
 119   } else {
 120     return klass_masked(OrderAccess::load_acquire(&_metadata._wide_storage_props));
 121   }
 122 }
 123 
 124 Klass** oopDesc::klass_addr(HeapWord* mem) {
 125   // Only used internally and with CMS and will not work with
 126   // UseCompressedOops
 127   assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
 128   ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
 129   return (Klass**) (((char*)mem) + in_bytes(offset));
 130 }
 131 
 132 uintptr_t* oopDesc::wide_metadata_addr(HeapWord* mem) {
 133   assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
 134   ByteSize offset = byte_offset_of(oopDesc, _metadata._wide_storage_props);
 135   return (uintptr_t*) (((char*)mem) + in_bytes(offset));
 136 }
 137 
 138 narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
 139   assert(UseCompressedClassPointers, "only called by compressed klass pointers");
 140   ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
 141   return (narrowKlass*) (((char*)mem) + in_bytes(offset));
 142 }
 143 
 144 Klass** oopDesc::klass_addr() {
 145   return klass_addr((HeapWord*)this);
 146 }
 147 
 148 narrowKlass* oopDesc::compressed_klass_addr() {
 149   return compressed_klass_addr((HeapWord*)this);
 150 }
 151 
 152 #define CHECK_SET_KLASS(k)                                                \
 153   do {                                                                    \
 154     assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass");      \
 155     assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \
 156     assert(((reinterpret_cast<uintptr_t>(k) & (~ oopDesc::klass_mask())) == 0), \
 157       "No room for storage props "); \
 158   } while (0)
 159 
 160 void oopDesc::set_klass(Klass* k) {
 161   CHECK_SET_KLASS(k);
 162   if (UseCompressedClassPointers) {
 163     *compressed_klass_addr() = CompressedKlassPointers::encode_not_null(k);
 164   } else {
 165     *klass_addr() = k;
 166   }
 167 }
 168 
 169 void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
 170   CHECK_SET_KLASS(klass);
 171   if (UseCompressedClassPointers) {
 172     OrderAccess::release_store(compressed_klass_addr(mem),
 173                                CompressedKlassPointers::encode_not_null(klass));
 174   } else {
 175     OrderAccess::release_store(klass_addr(mem), klass);
 176   }
 177   assert(((oopDesc*)mem)->klass() == klass, "failed oopDesc::klass() encode/decode");
 178 }
 179 
 180 void oopDesc::set_metadata(ArrayStorageProperties storage_props, Klass* klass) {
 181   CHECK_SET_KLASS(klass);
 182   if (UseCompressedClassPointers) {
 183     *compressed_klass_addr() = (CompressedKlassPointers::encode_not_null(klass) | storage_props.encode<narrowKlass>(narrow_storage_props_shift));
 184   } else {
 185     *wide_metadata_addr((HeapWord*)this) = (reinterpret_cast<uintptr_t>(klass) | storage_props.encode<uintptr_t>(wide_storage_props_shift));
 186   }
 187 }
 188 
 189 void oopDesc::release_set_metadata(HeapWord* mem, ArrayStorageProperties storage_props, Klass* klass) {
 190   CHECK_SET_KLASS(klass);
 191   if (UseCompressedClassPointers) {
 192     OrderAccess::release_store(oopDesc::compressed_klass_addr(mem),
 193                                CompressedKlassPointers::encode_not_null(klass) | storage_props.encode<narrowKlass>(narrow_storage_props_shift));
 194   } else {
 195     OrderAccess::release_store(oopDesc::wide_metadata_addr(mem),
 196                                (reinterpret_cast<uintptr_t>(klass) | storage_props.encode<uintptr_t>(wide_storage_props_shift)));
 197   }
 198 }
 199 #undef CHECK_SET_KLASS
 200 
 201 
 202 ArrayStorageProperties oopDesc::array_storage_properties() const {
 203   if (UseCompressedClassPointers) {
 204     return ArrayStorageProperties(_metadata._narrow_storage_props >> narrow_storage_props_shift);
 205   } else {
 206     return ArrayStorageProperties(_metadata._wide_storage_props >> wide_storage_props_shift);
 207   }
 208 }
 209 
 210 
 211 int oopDesc::klass_gap() const {
 212   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
 213 }
 214 
 215 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
 216   if (UseCompressedClassPointers) {
 217     *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
 218   }
 219 }
 220 
 221 void oopDesc::set_klass_gap(int v) {
 222   set_klass_gap((HeapWord*)this, v);
 223 }
 224 
 225 void oopDesc::set_klass_to_list_ptr(oop k) {
 226   // This is only to be used during GC, for from-space objects, so no
 227   // barrier is needed.
 228   if (UseCompressedClassPointers) {
 229     _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);  // may be null (parnew overflow handling)
 230   } else {


 305       assert((s == klass->oop_size(this)) ||
 306              (Universe::heap()->is_gc_active() &&
 307               ((is_typeArray() && UseConcMarkSweepGC) ||
 308                (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
 309              "wrong array object size");
 310     } else {
 311       // Must be zero, so bite the bullet and take the virtual call.
 312       s = klass->oop_size(this);
 313     }
 314   }
 315 
 316   assert(s > 0, "Oop size must be greater than zero, not %d", s);
 317   assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
 318   return s;
 319 }
 320 
 321 bool oopDesc::is_instance()  const { return klass()->is_instance_klass();  }
 322 bool oopDesc::is_array()     const { return klass()->is_array_klass();     }
 323 bool oopDesc::is_objArray()  const { return klass()->is_objArray_klass();  }
 324 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
 325 bool oopDesc::is_value()     const { return klass()->is_value(); }
 326 bool oopDesc::is_valueArray()  const { return klass()->is_valueArray_klass(); }
 327 
 328 void*    oopDesc::field_addr_raw(int offset)     const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
 329 void*    oopDesc::field_addr(int offset)         const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
 330 
 331 template <class T>
 332 T*       oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
 333 
 334 template <typename T>
 335 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
 336 
 337 template <DecoratorSet decorators>
 338 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
 339 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
 340 
 341 inline void oopDesc::obj_field_put(int offset, oop value)           { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
 342 
 343 inline jbyte oopDesc::byte_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
 344 inline void  oopDesc::byte_field_put(int offset, jbyte value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 345 
 346 inline jchar oopDesc::char_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }


 359 inline jlong oopDesc::long_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
 360 inline void  oopDesc::long_field_put(int offset, jlong value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 361 
 362 inline jfloat oopDesc::float_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
 363 inline void   oopDesc::float_field_put(int offset, jfloat value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
 364 
 365 inline jdouble oopDesc::double_field(int offset) const              { return HeapAccess<>::load_at(as_oop(), offset);  }
 366 inline void    oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
 367 
 368 bool oopDesc::is_locked() const {
 369   return mark()->is_locked();
 370 }
 371 
 372 bool oopDesc::is_unlocked() const {
 373   return mark()->is_unlocked();
 374 }
 375 
 376 bool oopDesc::has_bias_pattern() const {
 377   return mark()->has_bias_pattern();
 378 }
 379 
 380 
 381 bool oopDesc::has_bias_pattern_raw() const {
 382   return mark_raw()->has_bias_pattern();
 383 }
 384 
 385 // Used only for markSweep, scavenging
 386 bool oopDesc::is_gc_marked() const {
 387   return mark_raw()->is_marked();
 388 }
 389 
 390 // Used by scavengers
 391 bool oopDesc::is_forwarded() const {
 392   // The extra heap check is needed since the obj might be locked, in which case the
 393   // mark would point to a stack location and have the sentinel bit cleared
 394   return mark_raw()->is_marked();
 395 }
 396 
 397 // Used by scavengers
 398 void oopDesc::forward_to(oop p) {
 399   assert(check_obj_alignment(p),


< prev index next >