1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP 27 28 #include "gc_implementation/shared/ageTable.hpp" 29 #include "gc_implementation/shared/markSweep.inline.hpp" 30 #include "gc_interface/collectedHeap.inline.hpp" 31 #include "memory/barrierSet.inline.hpp" 32 #include "memory/cardTableModRefBS.hpp" 33 #include "memory/genCollectedHeap.hpp" 34 #include "memory/generation.hpp" 35 #include "memory/specialized_oop_closures.hpp" 36 #include "oops/arrayKlass.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/klass.inline.hpp" 39 #include "oops/markOop.inline.hpp" 40 #include "oops/oop.hpp" 41 #include "runtime/atomic.inline.hpp" 42 #include "runtime/orderAccess.inline.hpp" 43 #include "runtime/os.hpp" 44 #include "utilities/macros.hpp" 45 #ifdef TARGET_ARCH_x86 46 # include "bytes_x86.hpp" 47 #endif 48 #ifdef TARGET_ARCH_aarch64 49 # include "bytes_aarch64.hpp" 50 #endif 51 #ifdef TARGET_ARCH_sparc 52 # include "bytes_sparc.hpp" 53 #endif 54 #ifdef TARGET_ARCH_zero 55 # include "bytes_zero.hpp" 56 #endif 57 #ifdef TARGET_ARCH_arm 58 # include "bytes_arm.hpp" 59 #endif 60 #ifdef TARGET_ARCH_ppc 61 # include "bytes_ppc.hpp" 62 #endif 63 64 #if INCLUDE_ALL_GCS 65 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" 66 #endif 67 68 // Implementation of all inlined member functions defined in oop.hpp 69 // We need a separate file to avoid circular references 70 71 inline void oopDesc::release_set_mark(markOop m) { 72 OrderAccess::release_store_ptr(&_mark, m); 73 } 74 75 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 76 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 77 } 78 79 inline Klass* oopDesc::klass() const { 80 if (UseCompressedClassPointers) { 81 return Klass::decode_klass_not_null(_metadata._compressed_klass); 82 } else { 83 return _metadata._klass; 84 } 85 } 86 87 inline Klass* oopDesc::klass_or_null() const volatile { 88 if (UseCompressedClassPointers) { 89 return Klass::decode_klass(_metadata._compressed_klass); 90 } else { 91 return _metadata._klass; 92 } 93 } 94 95 inline Klass* oopDesc::klass_or_null_acquire() const volatile { 96 if (UseCompressedClassPointers) { 97 // Workaround for non-const load_acquire parameter. 98 const volatile narrowKlass* addr = &_metadata._compressed_klass; 99 volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr); 100 return Klass::decode_klass(OrderAccess::load_acquire(xaddr)); 101 } else { 102 return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass); 103 } 104 } 105 106 inline int oopDesc::klass_gap_offset_in_bytes() { 107 assert(UseCompressedClassPointers, "only applicable to compressed klass pointers"); 108 return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass); 109 } 110 111 inline Klass** oopDesc::klass_addr() { 112 // Only used internally and with CMS and will not work with 113 // UseCompressedOops 114 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); 115 return (Klass**) &_metadata._klass; 116 } 117 118 inline narrowKlass* oopDesc::compressed_klass_addr() { 119 assert(UseCompressedClassPointers, "only called by compressed klass pointers"); 120 return &_metadata._compressed_klass; 121 } 122 123 #define CHECK_SET_KLASS(k) \ 124 do { \ 125 assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \ 126 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \ 127 } while (0) 128 129 inline void oopDesc::set_klass(Klass* k) { 130 CHECK_SET_KLASS(k); 131 if (UseCompressedClassPointers) { 132 *compressed_klass_addr() = Klass::encode_klass_not_null(k); 133 } else { 134 *klass_addr() = k; 135 } 136 } 137 138 inline void oopDesc::release_set_klass(Klass* k) { 139 CHECK_SET_KLASS(k); 140 if (UseCompressedClassPointers) { 141 OrderAccess::release_store(compressed_klass_addr(), 142 Klass::encode_klass_not_null(k)); 143 } else { 144 OrderAccess::release_store_ptr(klass_addr(), k); 145 } 146 } 147 148 #undef CHECK_SET_KLASS 149 150 inline int oopDesc::klass_gap() const { 151 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 152 } 153 154 inline void oopDesc::set_klass_gap(int v) { 155 if (UseCompressedClassPointers) { 156 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 157 } 158 } 159 160 inline void oopDesc::set_klass_to_list_ptr(oop k) { 161 // This is only to be used during GC, for from-space objects, so no 162 // barrier is needed. 163 if (UseCompressedClassPointers) { 164 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) 165 } else { 166 _metadata._klass = (Klass*)(address)k; 167 } 168 } 169 170 inline oop oopDesc::list_ptr_from_klass() { 171 // This is only to be used during GC, for from-space objects. 172 if (UseCompressedClassPointers) { 173 return decode_heap_oop((narrowOop)_metadata._compressed_klass); 174 } else { 175 // Special case for GC 176 return (oop)(address)_metadata._klass; 177 } 178 } 179 180 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } 181 182 inline bool oopDesc::is_a(Klass* k) const { return klass()->is_subtype_of(k); } 183 184 inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); } 185 inline bool oopDesc::is_instanceClassLoader() const { return klass()->oop_is_instanceClassLoader(); } 186 inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); } 187 inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); } 188 inline bool oopDesc::is_array() const { return klass()->oop_is_array(); } 189 inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); } 190 inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); } 191 192 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 193 194 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); } 195 inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); } 196 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 197 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 198 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } 199 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 200 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 201 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } 202 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } 203 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } 204 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } 205 206 207 // Functions for getting and setting oops within instance objects. 208 // If the oops are compressed, the type passed to these overloaded functions 209 // is narrowOop. All functions are overloaded so they can be called by 210 // template functions without conditionals (the compiler instantiates via 211 // the right type and inlines the appopriate code). 212 213 inline bool oopDesc::is_null(oop obj) { return obj == NULL; } 214 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } 215 216 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 217 // offset from the heap base. Saving the check for null can save instructions 218 // in inner GC loops so these are separated. 219 220 inline bool check_obj_alignment(oop obj) { 221 return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0; 222 } 223 224 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 225 assert(!is_null(v), "oop value can never be zero"); 226 assert(check_obj_alignment(v), "Address not aligned"); 227 assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); 228 address base = Universe::narrow_oop_base(); 229 int shift = Universe::narrow_oop_shift(); 230 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); 231 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); 232 uint64_t result = pd >> shift; 233 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); 234 assert(decode_heap_oop(result) == v, "reversibility"); 235 return (narrowOop)result; 236 } 237 238 inline narrowOop oopDesc::encode_heap_oop(oop v) { 239 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); 240 } 241 242 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { 243 assert(!is_null(v), "narrow oop value can never be zero"); 244 address base = Universe::narrow_oop_base(); 245 int shift = Universe::narrow_oop_shift(); 246 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); 247 assert(check_obj_alignment(result), err_msg("address not aligned: " INTPTR_FORMAT, p2i((void*) result))); 248 return result; 249 } 250 251 inline oop oopDesc::decode_heap_oop(narrowOop v) { 252 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); 253 } 254 255 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } 256 inline oop oopDesc::decode_heap_oop(oop v) { return v; } 257 258 // Load an oop out of the Java heap as is without decoding. 259 // Called by GC to check for null before decoding. 260 inline oop oopDesc::load_heap_oop(oop* p) { return *p; } 261 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } 262 263 // Load and decode an oop out of the Java heap into a wide oop. 264 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; } 265 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { 266 return decode_heap_oop_not_null(*p); 267 } 268 269 // Load and decode an oop out of the heap accepting null 270 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; } 271 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) { 272 return decode_heap_oop(*p); 273 } 274 275 // Store already encoded heap oop into the heap. 276 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } 277 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } 278 279 // Encode and store a heap oop. 280 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { 281 *p = encode_heap_oop_not_null(v); 282 } 283 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } 284 285 // Encode and store a heap oop allowing for null. 286 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { 287 *p = encode_heap_oop(v); 288 } 289 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } 290 291 // Store heap oop as is for volatile fields. 292 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { 293 OrderAccess::release_store_ptr(p, v); 294 } 295 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p, 296 narrowOop v) { 297 OrderAccess::release_store(p, v); 298 } 299 300 inline void oopDesc::release_encode_store_heap_oop_not_null( 301 volatile narrowOop* p, oop v) { 302 // heap oop is not pointer sized. 303 OrderAccess::release_store(p, encode_heap_oop_not_null(v)); 304 } 305 306 inline void oopDesc::release_encode_store_heap_oop_not_null( 307 volatile oop* p, oop v) { 308 OrderAccess::release_store_ptr(p, v); 309 } 310 311 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p, 312 oop v) { 313 OrderAccess::release_store_ptr(p, v); 314 } 315 inline void oopDesc::release_encode_store_heap_oop( 316 volatile narrowOop* p, oop v) { 317 OrderAccess::release_store(p, encode_heap_oop(v)); 318 } 319 320 321 // These functions are only used to exchange oop fields in instances, 322 // not headers. 323 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { 324 oop result; 325 if (UseCompressedOops) { 326 // encode exchange value from oop to T 327 narrowOop val = encode_heap_oop(exchange_value); 328 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); 329 // decode old from T to oop 330 result = decode_heap_oop(old); 331 } else { 332 result = (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); 333 } 334 #if INCLUDE_ALL_GCS 335 if (UseShenandoahGC) { 336 if (exchange_value != NULL) { 337 ShenandoahBarrierSet::barrier_set()->storeval_barrier(exchange_value); 338 } 339 result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result); 340 } 341 #endif 342 return result; 343 } 344 345 // In order to put or get a field out of an instance, must first check 346 // if the field has been compressed and uncompress it. 347 inline oop oopDesc::obj_field(int offset) const { 348 oop obj = UseCompressedOops ? 349 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) : 350 load_decode_heap_oop(obj_field_addr<oop>(offset)); 351 #if INCLUDE_ALL_GCS 352 if (UseShenandoahGC) { 353 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); 354 } 355 #endif 356 return obj; 357 } 358 inline volatile oop oopDesc::obj_field_volatile(int offset) const { 359 volatile oop value = obj_field(offset); 360 OrderAccess::acquire(); 361 return value; 362 } 363 inline void oopDesc::obj_field_put(int offset, oop value) { 364 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) : 365 oop_store(obj_field_addr<oop>(offset), value); 366 } 367 368 inline Metadata* oopDesc::metadata_field(int offset) const { 369 return *metadata_field_addr(offset); 370 } 371 372 inline void oopDesc::metadata_field_put(int offset, Metadata* value) { 373 *metadata_field_addr(offset) = value; 374 } 375 376 inline void oopDesc::obj_field_put_raw(int offset, oop value) { 377 UseCompressedOops ? 378 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) : 379 encode_store_heap_oop(obj_field_addr<oop>(offset), value); 380 } 381 inline void oopDesc::obj_field_put_volatile(int offset, oop value) { 382 OrderAccess::release(); 383 obj_field_put(offset, value); 384 OrderAccess::fence(); 385 } 386 387 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } 388 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } 389 390 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } 391 inline void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (( (jint) contents) & 1); } 392 393 inline jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); } 394 inline void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; } 395 396 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); } 397 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; } 398 399 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); } 400 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;} 401 402 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); } 403 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; } 404 405 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); } 406 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } 407 408 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } 409 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } 410 411 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); } 412 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } 413 414 inline oop oopDesc::obj_field_acquire(int offset) const { 415 oop obj = UseCompressedOops ? 416 decode_heap_oop((narrowOop) 417 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset))) 418 : decode_heap_oop((oop) 419 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset))); 420 #if INCLUDE_ALL_GCS 421 if (UseShenandoahGC) { 422 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); 423 } 424 #endif 425 return obj; 426 } 427 inline void oopDesc::release_obj_field_put(int offset, oop value) { 428 UseCompressedOops ? 429 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) : 430 oop_store((volatile oop*) obj_field_addr<oop>(offset), value); 431 } 432 433 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } 434 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } 435 436 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } 437 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); } 438 439 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); } 440 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); } 441 442 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); } 443 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); } 444 445 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); } 446 inline void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); } 447 448 inline jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); } 449 inline void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); } 450 451 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); } 452 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); } 453 454 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } 455 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } 456 457 inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } 458 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } 459 460 inline int oopDesc::size_given_klass(Klass* klass) { 461 int lh = klass->layout_helper(); 462 int s; 463 464 // lh is now a value computed at class initialization that may hint 465 // at the size. For instances, this is positive and equal to the 466 // size. For arrays, this is negative and provides log2 of the 467 // array element size. For other oops, it is zero and thus requires 468 // a virtual call. 469 // 470 // We go to all this trouble because the size computation is at the 471 // heart of phase 2 of mark-compaction, and called for every object, 472 // alive or dead. So the speed here is equal in importance to the 473 // speed of allocation. 474 475 if (lh > Klass::_lh_neutral_value) { 476 if (!Klass::layout_helper_needs_slow_path(lh)) { 477 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 478 } else { 479 s = klass->oop_size(this); 480 } 481 } else if (lh <= Klass::_lh_neutral_value) { 482 // The most common case is instances; fall through if so. 483 if (lh < Klass::_lh_neutral_value) { 484 // Second most common case is arrays. We have to fetch the 485 // length of the array, shift (multiply) it appropriately, 486 // up to wordSize, add the header, and align to object size. 487 size_t size_in_bytes; 488 #ifdef _M_IA64 489 // The Windows Itanium Aug 2002 SDK hoists this load above 490 // the check for s < 0. An oop at the end of the heap will 491 // cause an access violation if this load is performed on a non 492 // array oop. Making the reference volatile prohibits this. 493 // (%%% please explain by what magic the length is actually fetched!) 494 volatile int *array_length; 495 array_length = (volatile int *)( (intptr_t)this + 496 arrayOopDesc::length_offset_in_bytes() ); 497 assert(array_length > 0, "Integer arithmetic problem somewhere"); 498 // Put into size_t to avoid overflow. 499 size_in_bytes = (size_t) array_length; 500 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh); 501 #else 502 size_t array_length = (size_t) ((arrayOop)this)->length(); 503 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 504 #endif 505 size_in_bytes += Klass::layout_helper_header_size(lh); 506 507 // This code could be simplified, but by keeping array_header_in_bytes 508 // in units of bytes and doing it this way we can round up just once, 509 // skipping the intermediate round to HeapWordSize. Cast the result 510 // of round_to to size_t to guarantee unsigned division == right shift. 511 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / 512 HeapWordSize); 513 514 // UseParNewGC, UseParallelGC and UseG1GC can change the length field 515 // of an "old copy" of an object array in the young gen so it indicates 516 // the grey portion of an already copied array. This will cause the first 517 // disjunct below to fail if the two comparands are computed across such 518 // a concurrent change. 519 // UseParNewGC also runs with promotion labs (which look like int 520 // filler arrays) which are subject to changing their declared size 521 // when finally retiring a PLAB; this also can cause the first disjunct 522 // to fail for another worker thread that is concurrently walking the block 523 // offset table. Both these invariant failures are benign for their 524 // current uses; we relax the assertion checking to cover these two cases below: 525 // is_objArray() && is_forwarded() // covers first scenario above 526 // || is_typeArray() // covers second scenario above 527 // If and when UseParallelGC uses the same obj array oop stealing/chunking 528 // technique, we will need to suitably modify the assertion. 529 assert((s == klass->oop_size(this)) || 530 (Universe::heap()->is_gc_active() && 531 ((is_typeArray() && UseParNewGC) || 532 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))), 533 "wrong array object size"); 534 } else { 535 // Must be zero, so bite the bullet and take the virtual call. 536 s = klass->oop_size(this); 537 } 538 } 539 540 assert(s % MinObjAlignment == 0, "alignment check"); 541 assert(s > 0, "Bad size calculated"); 542 return s; 543 } 544 545 546 inline int oopDesc::size() { 547 return size_given_klass(klass()); 548 } 549 550 inline void update_barrier_set(void* p, oop v, bool release = false) { 551 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 552 oopDesc::bs()->write_ref_field(p, v, release); 553 } 554 555 template <class T> inline void update_barrier_set_pre(T* p, oop v) { 556 oopDesc::bs()->write_ref_field_pre(p, v); 557 } 558 559 template <class T> inline void oop_store(T* p, oop v) { 560 if (always_do_update_barrier) { 561 oop_store((volatile T*)p, v); 562 } else { 563 update_barrier_set_pre(p, v); 564 oopDesc::encode_store_heap_oop(p, v); 565 // always_do_update_barrier == false => 566 // Either we are at a safepoint (in GC) or CMS is not used. In both 567 // cases it's unnecessary to mark the card as dirty with release sematics. 568 update_barrier_set((void*)p, v, false /* release */); // cast away type 569 } 570 } 571 572 template <class T> inline void oop_store(volatile T* p, oop v) { 573 update_barrier_set_pre((T*)p, v); // cast away volatile 574 // Used by release_obj_field_put, so use release_store_ptr. 575 oopDesc::release_encode_store_heap_oop(p, v); 576 // When using CMS we must mark the card corresponding to p as dirty 577 // with release sematics to prevent that CMS sees the dirty card but 578 // not the new value v at p due to reordering of the two 579 // stores. Note that CMS has a concurrent precleaning phase, where 580 // it reads the card table while the Java threads are running. 581 update_barrier_set((void*)p, v, true /* release */); // cast away type 582 } 583 584 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops 585 // (without having to remember the function name this calls). 586 inline void oop_store_raw(HeapWord* addr, oop value) { 587 if (UseCompressedOops) { 588 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); 589 } else { 590 oopDesc::encode_store_heap_oop((oop*)addr, value); 591 } 592 } 593 594 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, 595 volatile HeapWord *dest, 596 oop compare_value, 597 bool prebarrier) { 598 #if INCLUDE_ALL_GCS 599 if (UseShenandoahGC && ShenandoahCASBarrier) { 600 return ShenandoahBarrierSet::barrier_set()->oop_atomic_cmpxchg_in_heap(exchange_value, dest, compare_value); 601 } 602 #endif 603 if (UseCompressedOops) { 604 if (prebarrier) { 605 update_barrier_set_pre((narrowOop*)dest, exchange_value); 606 } 607 // encode exchange and compare value from oop to T 608 narrowOop val = encode_heap_oop(exchange_value); 609 narrowOop cmp = encode_heap_oop(compare_value); 610 611 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); 612 // decode old from T to oop 613 return decode_heap_oop(old); 614 } else { 615 if (prebarrier) { 616 update_barrier_set_pre((oop*)dest, exchange_value); 617 } 618 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); 619 } 620 } 621 622 // Used only for markSweep, scavenging 623 inline bool oopDesc::is_gc_marked() const { 624 return mark()->is_marked(); 625 } 626 627 inline bool oopDesc::is_locked() const { 628 return mark()->is_locked(); 629 } 630 631 inline bool oopDesc::is_unlocked() const { 632 return mark()->is_unlocked(); 633 } 634 635 inline bool oopDesc::has_bias_pattern() const { 636 return mark()->has_bias_pattern(); 637 } 638 639 640 // used only for asserts 641 inline bool oopDesc::is_oop(bool ignore_mark_word) const { 642 oop obj = (oop) this; 643 if (!check_obj_alignment(obj)) return false; 644 if (!Universe::heap()->is_in_reserved(obj)) return false; 645 // obj is aligned and accessible in heap 646 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false; 647 648 // Header verification: the mark is typically non-NULL. If we're 649 // at a safepoint, it must not be null. 650 // Outside of a safepoint, the header could be changing (for example, 651 // another thread could be inflating a lock on this object). 652 if (ignore_mark_word) { 653 return true; 654 } 655 if (mark() != NULL) { 656 return true; 657 } 658 return !SafepointSynchronize::is_at_safepoint(); 659 } 660 661 662 // used only for asserts 663 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const { 664 return this == NULL ? true : is_oop(ignore_mark_word); 665 } 666 667 #ifndef PRODUCT 668 // used only for asserts 669 inline bool oopDesc::is_unlocked_oop() const { 670 if (!Universe::heap()->is_in_reserved(this)) return false; 671 return mark()->is_unlocked(); 672 } 673 #endif // PRODUCT 674 675 inline void oopDesc::follow_contents(void) { 676 assert (is_gc_marked(), "should be marked"); 677 klass()->oop_follow_contents(this); 678 } 679 680 // Used by scavengers 681 682 inline bool oopDesc::is_forwarded() const { 683 // The extra heap check is needed since the obj might be locked, in which case the 684 // mark would point to a stack location and have the sentinel bit cleared 685 return mark()->is_marked(); 686 } 687 688 // Used by scavengers 689 inline void oopDesc::forward_to(oop p) { 690 assert(check_obj_alignment(p), 691 "forwarding to something not aligned"); 692 assert(Universe::heap()->is_in_reserved(p), 693 "forwarding to something not in heap"); 694 markOop m = markOopDesc::encode_pointer_as_mark(p); 695 assert(m->decode_pointer() == p, "encoding must be reversable"); 696 set_mark(m); 697 } 698 699 // Used by parallel scavengers 700 inline bool oopDesc::cas_forward_to(oop p, markOop compare) { 701 assert(check_obj_alignment(p), 702 "forwarding to something not aligned"); 703 assert(Universe::heap()->is_in_reserved(p), 704 "forwarding to something not in heap"); 705 markOop m = markOopDesc::encode_pointer_as_mark(p); 706 assert(m->decode_pointer() == p, "encoding must be reversable"); 707 return cas_set_mark(m, compare) == compare; 708 } 709 710 // Note that the forwardee is not the same thing as the displaced_mark. 711 // The forwardee is used when copying during scavenge and mark-sweep. 712 // It does need to clear the low two locking- and GC-related bits. 713 inline oop oopDesc::forwardee() const { 714 return (oop) mark()->decode_pointer(); 715 } 716 717 inline bool oopDesc::has_displaced_mark() const { 718 return mark()->has_displaced_mark_helper(); 719 } 720 721 inline markOop oopDesc::displaced_mark() const { 722 return mark()->displaced_mark_helper(); 723 } 724 725 inline void oopDesc::set_displaced_mark(markOop m) { 726 mark()->set_displaced_mark_helper(m); 727 } 728 729 // The following method needs to be MT safe. 730 inline uint oopDesc::age() const { 731 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 732 if (has_displaced_mark()) { 733 return displaced_mark()->age(); 734 } else { 735 return mark()->age(); 736 } 737 } 738 739 inline void oopDesc::incr_age() { 740 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 741 if (has_displaced_mark()) { 742 set_displaced_mark(displaced_mark()->incr_age()); 743 } else { 744 set_mark(mark()->incr_age()); 745 } 746 } 747 748 749 inline intptr_t oopDesc::identity_hash() { 750 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 751 // Note: The mark must be read into local variable to avoid concurrent updates. 752 markOop mrk = mark(); 753 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 754 return mrk->hash(); 755 } else if (mrk->is_marked()) { 756 return mrk->hash(); 757 } else { 758 return slow_identity_hash(); 759 } 760 } 761 762 inline int oopDesc::adjust_pointers() { 763 debug_only(int check_size = size()); 764 int s = klass()->oop_adjust_pointers(this); 765 assert(s == check_size, "should be the same"); 766 return s; 767 } 768 769 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 770 \ 771 inline int oopDesc::oop_iterate(OopClosureType* blk) { \ 772 SpecializationStats::record_call(); \ 773 return klass()->oop_oop_iterate##nv_suffix(this, blk); \ 774 } \ 775 \ 776 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 777 SpecializationStats::record_call(); \ 778 return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ 779 } 780 781 782 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) { 783 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all 784 // the do_oop calls, but turns off all other features in ExtendedOopClosure. 785 NoHeaderExtendedOopClosure cl(blk); 786 return oop_iterate(&cl); 787 } 788 789 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { 790 NoHeaderExtendedOopClosure cl(blk); 791 return oop_iterate(&cl, mr); 792 } 793 794 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) 795 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) 796 797 #if INCLUDE_ALL_GCS 798 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 799 \ 800 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 801 SpecializationStats::record_call(); \ 802 return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 803 } 804 805 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) 806 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) 807 #endif // INCLUDE_ALL_GCS 808 809 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP