44 #include "utilities/macros.hpp"
45 #ifdef TARGET_ARCH_x86
46 # include "bytes_x86.hpp"
47 #endif
48 #ifdef TARGET_ARCH_aarch64
49 # include "bytes_aarch64.hpp"
50 #endif
51 #ifdef TARGET_ARCH_sparc
52 # include "bytes_sparc.hpp"
53 #endif
54 #ifdef TARGET_ARCH_zero
55 # include "bytes_zero.hpp"
56 #endif
57 #ifdef TARGET_ARCH_arm
58 # include "bytes_arm.hpp"
59 #endif
60 #ifdef TARGET_ARCH_ppc
61 # include "bytes_ppc.hpp"
62 #endif
63
64 // Implementation of all inlined member functions defined in oop.hpp
65 // We need a separate file to avoid circular references
66
67 inline void oopDesc::release_set_mark(markOop m) {
68 OrderAccess::release_store_ptr(&_mark, m);
69 }
70
71 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
72 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
73 }
74
75 inline Klass* oopDesc::klass() const {
76 if (UseCompressedClassPointers) {
77 return Klass::decode_klass_not_null(_metadata._compressed_klass);
78 } else {
79 return _metadata._klass;
80 }
81 }
82
83 inline Klass* oopDesc::klass_or_null() const volatile {
300 }
301
302 inline void oopDesc::release_encode_store_heap_oop_not_null(
303 volatile oop* p, oop v) {
304 OrderAccess::release_store_ptr(p, v);
305 }
306
307 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
308 oop v) {
309 OrderAccess::release_store_ptr(p, v);
310 }
311 inline void oopDesc::release_encode_store_heap_oop(
312 volatile narrowOop* p, oop v) {
313 OrderAccess::release_store(p, encode_heap_oop(v));
314 }
315
316
317 // These functions are only used to exchange oop fields in instances,
318 // not headers.
319 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
320 if (UseCompressedOops) {
321 // encode exchange value from oop to T
322 narrowOop val = encode_heap_oop(exchange_value);
323 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
324 // decode old from T to oop
325 return decode_heap_oop(old);
326 } else {
327 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
328 }
329 }
330
331 // In order to put or get a field out of an instance, must first check
332 // if the field has been compressed and uncompress it.
333 inline oop oopDesc::obj_field(int offset) const {
334 return UseCompressedOops ?
335 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
336 load_decode_heap_oop(obj_field_addr<oop>(offset));
337 }
338 inline volatile oop oopDesc::obj_field_volatile(int offset) const {
339 volatile oop value = obj_field(offset);
340 OrderAccess::acquire();
341 return value;
342 }
343 inline void oopDesc::obj_field_put(int offset, oop value) {
344 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
345 oop_store(obj_field_addr<oop>(offset), value);
346 }
347
348 inline Metadata* oopDesc::metadata_field(int offset) const {
349 return *metadata_field_addr(offset);
350 }
351
352 inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
353 *metadata_field_addr(offset) = value;
354 }
355
356 inline void oopDesc::obj_field_put_raw(int offset, oop value) {
375
376 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); }
377 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; }
378
379 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); }
380 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;}
381
382 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); }
383 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; }
384
385 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); }
386 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
387
388 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
389 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
390
391 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
392 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
393
394 inline oop oopDesc::obj_field_acquire(int offset) const {
395 return UseCompressedOops ?
396 decode_heap_oop((narrowOop)
397 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
398 : decode_heap_oop((oop)
399 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
400 }
401 inline void oopDesc::release_obj_field_put(int offset, oop value) {
402 UseCompressedOops ?
403 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
404 oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
405 }
406
407 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
408 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
409
410 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
411 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); }
412
413 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); }
414 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
415
416 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
417 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }
418
419 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
552 // not the new value v at p due to reordering of the two
553 // stores. Note that CMS has a concurrent precleaning phase, where
554 // it reads the card table while the Java threads are running.
555 update_barrier_set((void*)p, v, true /* release */); // cast away type
556 }
557
558 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
559 // (without having to remember the function name this calls).
560 inline void oop_store_raw(HeapWord* addr, oop value) {
561 if (UseCompressedOops) {
562 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
563 } else {
564 oopDesc::encode_store_heap_oop((oop*)addr, value);
565 }
566 }
567
568 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
569 volatile HeapWord *dest,
570 oop compare_value,
571 bool prebarrier) {
572 if (UseCompressedOops) {
573 if (prebarrier) {
574 update_barrier_set_pre((narrowOop*)dest, exchange_value);
575 }
576 // encode exchange and compare value from oop to T
577 narrowOop val = encode_heap_oop(exchange_value);
578 narrowOop cmp = encode_heap_oop(compare_value);
579
580 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
581 // decode old from T to oop
582 return decode_heap_oop(old);
583 } else {
584 if (prebarrier) {
585 update_barrier_set_pre((oop*)dest, exchange_value);
586 }
587 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
588 }
589 }
590
591 // Used only for markSweep, scavenging
|
44 #include "utilities/macros.hpp"
45 #ifdef TARGET_ARCH_x86
46 # include "bytes_x86.hpp"
47 #endif
48 #ifdef TARGET_ARCH_aarch64
49 # include "bytes_aarch64.hpp"
50 #endif
51 #ifdef TARGET_ARCH_sparc
52 # include "bytes_sparc.hpp"
53 #endif
54 #ifdef TARGET_ARCH_zero
55 # include "bytes_zero.hpp"
56 #endif
57 #ifdef TARGET_ARCH_arm
58 # include "bytes_arm.hpp"
59 #endif
60 #ifdef TARGET_ARCH_ppc
61 # include "bytes_ppc.hpp"
62 #endif
63
64 #if INCLUDE_ALL_GCS
65 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
66 #endif
67
68 // Implementation of all inlined member functions defined in oop.hpp
69 // We need a separate file to avoid circular references
70
71 inline void oopDesc::release_set_mark(markOop m) {
72 OrderAccess::release_store_ptr(&_mark, m);
73 }
74
75 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
76 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
77 }
78
79 inline Klass* oopDesc::klass() const {
80 if (UseCompressedClassPointers) {
81 return Klass::decode_klass_not_null(_metadata._compressed_klass);
82 } else {
83 return _metadata._klass;
84 }
85 }
86
87 inline Klass* oopDesc::klass_or_null() const volatile {
304 }
305
306 inline void oopDesc::release_encode_store_heap_oop_not_null(
307 volatile oop* p, oop v) {
308 OrderAccess::release_store_ptr(p, v);
309 }
310
311 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
312 oop v) {
313 OrderAccess::release_store_ptr(p, v);
314 }
315 inline void oopDesc::release_encode_store_heap_oop(
316 volatile narrowOop* p, oop v) {
317 OrderAccess::release_store(p, encode_heap_oop(v));
318 }
319
320
321 // These functions are only used to exchange oop fields in instances,
322 // not headers.
323 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
324 oop result;
325 if (UseCompressedOops) {
326 // encode exchange value from oop to T
327 narrowOop val = encode_heap_oop(exchange_value);
328 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
329 // decode old from T to oop
330 result = decode_heap_oop(old);
331 } else {
332 result = (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
333 }
334 #if INCLUDE_ALL_GCS
335 if (UseShenandoahGC) {
336 if (exchange_value != NULL) {
337 ShenandoahBarrierSet::barrier_set()->storeval_barrier(exchange_value);
338 }
339 result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result);
340 }
341 #endif
342 return result;
343 }
344
345 // In order to put or get a field out of an instance, must first check
346 // if the field has been compressed and uncompress it.
347 inline oop oopDesc::obj_field(int offset) const {
348 oop obj = UseCompressedOops ?
349 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
350 load_decode_heap_oop(obj_field_addr<oop>(offset));
351 #if INCLUDE_ALL_GCS
352 if (UseShenandoahGC) {
353 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
354 }
355 #endif
356 return obj;
357 }
358 inline volatile oop oopDesc::obj_field_volatile(int offset) const {
359 volatile oop value = obj_field(offset);
360 OrderAccess::acquire();
361 return value;
362 }
363 inline void oopDesc::obj_field_put(int offset, oop value) {
364 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
365 oop_store(obj_field_addr<oop>(offset), value);
366 }
367
368 inline Metadata* oopDesc::metadata_field(int offset) const {
369 return *metadata_field_addr(offset);
370 }
371
372 inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
373 *metadata_field_addr(offset) = value;
374 }
375
376 inline void oopDesc::obj_field_put_raw(int offset, oop value) {
395
396 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); }
397 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; }
398
399 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); }
400 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;}
401
402 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); }
403 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; }
404
405 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); }
406 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
407
408 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
409 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
410
411 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
412 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
413
414 inline oop oopDesc::obj_field_acquire(int offset) const {
415 oop obj = UseCompressedOops ?
416 decode_heap_oop((narrowOop)
417 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
418 : decode_heap_oop((oop)
419 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
420 #if INCLUDE_ALL_GCS
421 if (UseShenandoahGC) {
422 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
423 }
424 #endif
425 return obj;
426 }
427 inline void oopDesc::release_obj_field_put(int offset, oop value) {
428 UseCompressedOops ?
429 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
430 oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
431 }
432
433 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
434 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
435
436 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
437 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); }
438
439 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); }
440 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
441
442 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
443 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }
444
445 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
578 // not the new value v at p due to reordering of the two
579 // stores. Note that CMS has a concurrent precleaning phase, where
580 // it reads the card table while the Java threads are running.
581 update_barrier_set((void*)p, v, true /* release */); // cast away type
582 }
583
584 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
585 // (without having to remember the function name this calls).
586 inline void oop_store_raw(HeapWord* addr, oop value) {
587 if (UseCompressedOops) {
588 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
589 } else {
590 oopDesc::encode_store_heap_oop((oop*)addr, value);
591 }
592 }
593
594 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
595 volatile HeapWord *dest,
596 oop compare_value,
597 bool prebarrier) {
598 #if INCLUDE_ALL_GCS
599 if (UseShenandoahGC && ShenandoahCASBarrier) {
600 return ShenandoahBarrierSet::barrier_set()->oop_atomic_cmpxchg_in_heap(exchange_value, dest, compare_value);
601 }
602 #endif
603 if (UseCompressedOops) {
604 if (prebarrier) {
605 update_barrier_set_pre((narrowOop*)dest, exchange_value);
606 }
607 // encode exchange and compare value from oop to T
608 narrowOop val = encode_heap_oop(exchange_value);
609 narrowOop cmp = encode_heap_oop(compare_value);
610
611 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
612 // decode old from T to oop
613 return decode_heap_oop(old);
614 } else {
615 if (prebarrier) {
616 update_barrier_set_pre((oop*)dest, exchange_value);
617 }
618 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
619 }
620 }
621
622 // Used only for markSweep, scavenging
|