< prev index next >

src/hotspot/share/gc/shared/cardTableBarrierSet.hpp

Print this page

 89   void flush_deferred_card_mark_barrier(JavaThread* thread);
 90 
 91   // If a compiler is eliding store barriers for TLAB-allocated objects,
 92   // we will be informed of a slow-path allocation by a call
 93   // to on_slowpath_allocation_exit() below. Such a call precedes the
 94   // initialization of the object itself, and no post-store-barriers will
 95   // be issued. Some heap types require that the barrier strictly follows
 96   // the initializing stores. (This is currently implemented by deferring the
 97   // barrier until the next slow-path allocation or gc-related safepoint.)
 98   // This interface answers whether a particular barrier type needs the card
 99   // mark to be thus strictly sequenced after the stores.
100   virtual bool card_mark_must_follow_store() const;
101 
102   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
103   virtual void on_thread_detach(Thread* thread);
104 
105   virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
106 
107   virtual void print_on(outputStream* st) const;
108 





109   template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet>
110   class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
111 };
112 
113 template<>
114 struct BarrierSet::GetName<CardTableBarrierSet> {
115   static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet;
116 };
117 
118 template<>
119 struct BarrierSet::GetType<BarrierSet::CardTableBarrierSet> {
120   typedef ::CardTableBarrierSet type;
121 };
122 
123 #endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP

 89   void flush_deferred_card_mark_barrier(JavaThread* thread);
 90 
 91   // If a compiler is eliding store barriers for TLAB-allocated objects,
 92   // we will be informed of a slow-path allocation by a call
 93   // to on_slowpath_allocation_exit() below. Such a call precedes the
 94   // initialization of the object itself, and no post-store-barriers will
 95   // be issued. Some heap types require that the barrier strictly follows
 96   // the initializing stores. (This is currently implemented by deferring the
 97   // barrier until the next slow-path allocation or gc-related safepoint.)
 98   // This interface answers whether a particular barrier type needs the card
 99   // mark to be thus strictly sequenced after the stores.
100   virtual bool card_mark_must_follow_store() const;
101 
102   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
103   virtual void on_thread_detach(Thread* thread);
104 
105   virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
106 
107   virtual void print_on(outputStream* st) const;
108 
109   // The AOT code cache manager needs to know the current card shift
110   // and, for some barrier sets, the region grain size shift
111   uint card_shift() const { return _card_table->card_shift(); }
112   virtual uint grain_shift() { return 0; }
113 
114   template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet>
115   class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
116 };
117 
118 template<>
119 struct BarrierSet::GetName<CardTableBarrierSet> {
120   static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet;
121 };
122 
123 template<>
124 struct BarrierSet::GetType<BarrierSet::CardTableBarrierSet> {
125   typedef ::CardTableBarrierSet type;
126 };
127 
128 #endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
< prev index next >