< prev index next >
src/share/vm/memory/barrierSet.inline.hpp
Print this page
*** 46,83 ****
} else {
write_ref_field_work(field, new_val, release);
}
}
- // count is number of array elements being written
- void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
- assert(count <= (size_t)max_intx, "count too large");
- HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
- // In the case of compressed oops, start and end may potentially be misaligned;
- // so we need to conservatively align the first downward (this is not
- // strictly necessary for current uses, but a case of good hygiene and,
- // if you will, aesthetics) and the second upward (this is essential for
- // current uses) to a HeapWord boundary, so we mark all cards overlapping
- // this write. If this evolves in the future to calling a
- // logging barrier of narrow oop granularity, like the pre-barrier for G1
- // (mentioned here merely by way of example), we will need to change this
- // interface, so it is "exactly precise" (if i may be allowed the adverbial
- // redundancy for emphasis) and does not include narrow oop slots not
- // included in the original write interval.
- HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
- HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
- // If compressed oops were not being used, these should already be aligned
- assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
- "Expected heap word alignment of start and end");
- #if 0
- warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT "," INTPTR_FORMAT ")\t",
- start, count, aligned_start, aligned_end);
- #endif
- write_ref_array_work(MemRegion(aligned_start, aligned_end));
- }
-
-
void BarrierSet::write_region(MemRegion mr) {
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_region(mr);
} else {
write_region_work(mr);
--- 46,55 ----
< prev index next >