< prev index next >

src/share/vm/memory/barrierSet.inline.hpp

Print this page




  31 // Inline functions of BarrierSet, which de-virtualize certain
  32 // performance-critical calls when the barrier is the most common
  33 // card-table kind.
  34 
  35 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
  36   if (kind() == CardTableModRef) {
  37     ((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
  38   } else {
  39     write_ref_field_pre_work(field, new_val);
  40   }
  41 }
  42 
  43 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
  44   if (kind() == CardTableModRef) {
  45     ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val, release);
  46   } else {
  47     write_ref_field_work(field, new_val, release);
  48   }
  49 }
  50 
  51 // count is number of array elements being written
  52 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
  53   assert(count <= (size_t)max_intx, "count too large");
  54   HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
  55   // In the case of compressed oops, start and end may potentially be misaligned;
  56   // so we need to conservatively align the first downward (this is not
  57   // strictly necessary for current uses, but a case of good hygiene and,
  58   // if you will, aesthetics) and the second upward (this is essential for
  59   // current uses) to a HeapWord boundary, so we mark all cards overlapping
  60   // this write. If this evolves in the future to calling a
  61   // logging barrier of narrow oop granularity, like the pre-barrier for G1
  62   // (mentioned here merely by way of example), we will need to change this
  63   // interface, so it is "exactly precise" (if i may be allowed the adverbial
  64   // redundancy for emphasis) and does not include narrow oop slots not
  65   // included in the original write interval.
  66   HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
  67   HeapWord* aligned_end   = (HeapWord*)align_size_up  ((uintptr_t)end,   HeapWordSize);
  68   // If compressed oops were not being used, these should already be aligned
  69   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
  70          "Expected heap word alignment of start and end");
  71 #if 0
  72   warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT "," INTPTR_FORMAT ")\t",
  73                    start,            count,              aligned_start,   aligned_end);
  74 #endif
  75   write_ref_array_work(MemRegion(aligned_start, aligned_end));
  76 }
  77 
  78 
  79 void BarrierSet::write_region(MemRegion mr) {
  80   if (kind() == CardTableModRef) {
  81     ((CardTableModRefBS*)this)->inline_write_region(mr);
  82   } else {
  83     write_region_work(mr);
  84   }
  85 }
  86 
  87 #endif // SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP


  31 // Inline functions of BarrierSet, which de-virtualize certain
  32 // performance-critical calls when the barrier is the most common
  33 // card-table kind.
  34 
  35 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
  36   if (kind() == CardTableModRef) {
  37     ((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
  38   } else {
  39     write_ref_field_pre_work(field, new_val);
  40   }
  41 }
  42 
  43 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
  44   if (kind() == CardTableModRef) {
  45     ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val, release);
  46   } else {
  47     write_ref_field_work(field, new_val, release);
  48   }
  49 }
  50 




























  51 void BarrierSet::write_region(MemRegion mr) {
  52   if (kind() == CardTableModRef) {
  53     ((CardTableModRefBS*)this)->inline_write_region(mr);
  54   } else {
  55     write_region_work(mr);
  56   }
  57 }
  58 
  59 #endif // SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP
< prev index next >