< prev index next >

src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp

Print this page

 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
 26 #define SHARE_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
 27 
 28 #include "gc/shared/modRefBarrierSet.hpp"
 29 
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "oops/compressedOops.inline.hpp"
 32 #include "oops/objArrayOop.hpp"
 33 #include "oops/oop.hpp"

 34 
 35 class Klass;
 36 
 37 // count is number of array elements being written
 38 void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 39   HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
 40   // In the case of compressed oops, start and end may potentially be misaligned;
 41   // so we need to conservatively align the first downward (this is not
 42   // strictly necessary for current uses, but a case of good hygiene and,
 43   // if you will, aesthetics) and the second upward (this is essential for
 44   // current uses) to a HeapWord boundary, so we mark all cards overlapping
 45   // this write. If this evolves in the future to calling a
 46   // logging barrier of narrow oop granularity, like the pre-barrier for G1
 47   // (mentioned here merely by way of example), we will need to change this
 48   // interface, so it is "exactly precise" (if i may be allowed the adverbial
 49   // redundancy for emphasis) and does not include narrow oop slots not
 50   // included in the original write interval.
 51   HeapWord* aligned_start = align_down(start, HeapWordSize);
 52   HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
 53   // If compressed oops were not being used, these should already be aligned

 75   oop result = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
 76   if (result == compare_value) {
 77     bs->template write_ref_field_post<decorators>(addr, new_value);
 78   }
 79   return result;
 80 }
 81 
 82 template <DecoratorSet decorators, typename BarrierSetT>
 83 template <typename T>
 84 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
 85 oop_atomic_xchg_in_heap(T* addr, oop new_value) {
 86   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
 87   bs->template write_ref_field_pre<decorators>(addr);
 88   oop result = Raw::oop_atomic_xchg(addr, new_value);
 89   bs->template write_ref_field_post<decorators>(addr, new_value);
 90   return result;
 91 }
 92 
 93 template <DecoratorSet decorators, typename BarrierSetT>
 94 template <typename T>
 95 inline bool ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::











 96 oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 97                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 98                       size_t length) {
 99   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
100 
101   src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
102   dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
103 
104   if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {

105     // Optimized covariant case
106     bs->write_ref_array_pre(dst_raw, length,
107                             HasDecorator<decorators, IS_DEST_UNINITIALIZED>::value);
108     Raw::oop_arraycopy(NULL, 0, src_raw, NULL, 0, dst_raw, length);
109     bs->write_ref_array((HeapWord*)dst_raw, length);
110   } else {
111     assert(dst_obj != NULL, "better have an actual oop");
112     Klass* bound = objArrayOop(dst_obj)->element_klass();
113     T* from = const_cast<T*>(src_raw);
114     T* end = from + length;
115     for (T* p = dst_raw; from < end; from++, p++) {
116       T element = *from;
117       if (oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound)) {
118         bs->template write_ref_field_pre<decorators>(p);
119         *p = element;
120       } else {
121         // We must do a barrier to cover the partial copy.
122         const size_t pd = pointer_delta(p, dst_raw, (size_t)heapOopSize);
123         // pointer delta is scaled to number of elements (length field in
124         // objArrayOop) which we assume is 32 bit.
125         assert(pd == (size_t)(int)pd, "length field overflow");
126         bs->write_ref_array((HeapWord*)dst_raw, pd);
127         return false;
128       }



129     }
130     bs->write_ref_array((HeapWord*)dst_raw, length);
131   }
132   return true;
133 }
134 
135 template <DecoratorSet decorators, typename BarrierSetT>
136 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
137 clone_in_heap(oop src, oop dst, size_t size) {
138   Raw::clone(src, dst, size);
139   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
140   bs->write_region(MemRegion((HeapWord*)(void*)dst, size));
141 }
142 
































143 #endif // SHARE_GC_SHARED_MODREFBARRIERSET_INLINE_HPP

 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
 26 #define SHARE_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
 27 
 28 #include "gc/shared/modRefBarrierSet.hpp"
 29 
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "oops/compressedOops.inline.hpp"
 32 #include "oops/objArrayOop.hpp"
 33 #include "oops/oop.hpp"
 34 #include "oops/inlineKlass.inline.hpp"
 35 
 36 class Klass;
 37 
 38 // count is number of array elements being written
 39 void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 40   HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
 41   // In the case of compressed oops, start and end may potentially be misaligned;
 42   // so we need to conservatively align the first downward (this is not
 43   // strictly necessary for current uses, but a case of good hygiene and,
 44   // if you will, aesthetics) and the second upward (this is essential for
 45   // current uses) to a HeapWord boundary, so we mark all cards overlapping
 46   // this write. If this evolves in the future to calling a
 47   // logging barrier of narrow oop granularity, like the pre-barrier for G1
 48   // (mentioned here merely by way of example), we will need to change this
 49   // interface, so it is "exactly precise" (if i may be allowed the adverbial
 50   // redundancy for emphasis) and does not include narrow oop slots not
 51   // included in the original write interval.
 52   HeapWord* aligned_start = align_down(start, HeapWordSize);
 53   HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
 54   // If compressed oops were not being used, these should already be aligned

 76   oop result = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
 77   if (result == compare_value) {
 78     bs->template write_ref_field_post<decorators>(addr, new_value);
 79   }
 80   return result;
 81 }
 82 
 83 template <DecoratorSet decorators, typename BarrierSetT>
 84 template <typename T>
 85 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
 86 oop_atomic_xchg_in_heap(T* addr, oop new_value) {
 87   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
 88   bs->template write_ref_field_pre<decorators>(addr);
 89   oop result = Raw::oop_atomic_xchg(addr, new_value);
 90   bs->template write_ref_field_post<decorators>(addr, new_value);
 91   return result;
 92 }
 93 
 94 template <DecoratorSet decorators, typename BarrierSetT>
 95 template <typename T>
 96 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
 97 oop_arraycopy_partial_barrier(BarrierSetT *bs, T* dst_raw, T* p) {
 98   const size_t pd = pointer_delta(p, dst_raw, (size_t)heapOopSize);
 99   // pointer delta is scaled to number of elements (length field in
100   // objArrayOop) which we assume is 32 bit.
101   assert(pd == (size_t)(int)pd, "length field overflow");
102   bs->write_ref_array((HeapWord*)dst_raw, pd);
103 }
104 
105 template <DecoratorSet decorators, typename BarrierSetT>
106 template <typename T>
107 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
108 oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
109                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
110                       size_t length) {
111   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
112 
113   src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
114   dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
115 
116   if ((!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) &&
117       (!HasDecorator<decorators, ARRAYCOPY_NOTNULL>::value)) {
118     // Optimized covariant case
119     bs->write_ref_array_pre(dst_raw, length,
120                             HasDecorator<decorators, IS_DEST_UNINITIALIZED>::value);
121     Raw::oop_arraycopy(NULL, 0, src_raw, NULL, 0, dst_raw, length);
122     bs->write_ref_array((HeapWord*)dst_raw, length);
123   } else {
124     assert(dst_obj != NULL, "better have an actual oop");
125     Klass* bound = objArrayOop(dst_obj)->element_klass();
126     T* from = const_cast<T*>(src_raw);
127     T* end = from + length;
128     for (T* p = dst_raw; from < end; from++, p++) {
129       T element = *from;
130       // Apply any required checks
131       if (HasDecorator<decorators, ARRAYCOPY_NOTNULL>::value && CompressedOops::is_null(element)) {
132         oop_arraycopy_partial_barrier(bs, dst_raw, p);
133         throw_array_null_pointer_store_exception(src_obj, dst_obj, JavaThread::current());
134         return;
135       }
136       if (HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value &&
137           (!oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound))) {
138         oop_arraycopy_partial_barrier(bs, dst_raw, p);
139         throw_array_store_exception(src_obj, dst_obj, JavaThread::current());
140         return;
141       }
142       // write
143       bs->template write_ref_field_pre<decorators>(p);
144       *p = element;
145     }
146     bs->write_ref_array((HeapWord*)dst_raw, length);
147   }

148 }
149 
150 template <DecoratorSet decorators, typename BarrierSetT>
151 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
152 clone_in_heap(oop src, oop dst, size_t size) {
153   Raw::clone(src, dst, size);
154   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
155   bs->write_region(MemRegion((HeapWord*)(void*)dst, size));
156 }
157 
158 template <DecoratorSet decorators, typename BarrierSetT>
159 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
160 value_copy_in_heap(void* src, void* dst, InlineKlass* md) {
161   if (HasDecorator<decorators, IS_DEST_UNINITIALIZED>::value || (!md->contains_oops())) {
162     Raw::value_copy(src, dst, md);
163   } else {
164     BarrierSetT* bs = barrier_set_cast<BarrierSetT>(BarrierSet::barrier_set());
165     // src/dst aren't oops, need offset to adjust oop map offset
166     const address dst_oop_addr_offset = ((address) dst) - md->first_field_offset();
167     typedef typename ValueOopType<decorators>::type OopType;
168 
169     // Pre-barriers...
170     OopMapBlock* map = md->start_of_nonstatic_oop_maps();
171     OopMapBlock* const end = map + md->nonstatic_oop_map_count();
172     while (map != end) {
173       address doop_address = dst_oop_addr_offset + map->offset();
174       bs->write_ref_array_pre((OopType*) doop_address, map->count(), false);
175       map++;
176     }
177 
178     Raw::value_copy(src, dst, md);
179 
180     // Post-barriers...
181     map = md->start_of_nonstatic_oop_maps();
182     while (map != end) {
183       address doop_address = dst_oop_addr_offset + map->offset();
184       bs->write_ref_array((HeapWord*) doop_address, map->count());
185       map++;
186     }
187   }
188 }
189 
190 #endif // SHARE_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
< prev index next >