1 /*
  2  * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #ifndef SHARE_GC_Z_ZBARRIER_HPP
 25 #define SHARE_GC_Z_ZBARRIER_HPP
 26 
 27 #include "gc/z/zAddress.hpp"
 28 #include "memory/allStatic.hpp"
 29 #include "memory/iterator.hpp"
 30 #include "oops/inlineKlass.hpp"
 31 
 32 // == Shift based load barrier ==
 33 //
 34 // The load barriers of ZGC check if a loaded value is safe to expose or not, and
 35 // then shifts the pointer to remove metadata bits, such that it points to mapped
 36 // memory.
 37 //
 38 // A pointer is safe to expose if it does not have any load-bad bits set in its
 39 // metadata bits. In the C++ code and non-nmethod generated code, that is checked
 40 // by testing the pointer value against a load-bad mask, checking that no bad bit
 41 // is set, followed by a shift, removing the metadata bits if they were good.
 42 // However, for nmethod code, the test + shift sequence is optimized in such
 43 // a way that the shift both tests if the pointer is exposable or not, and removes
 44 // the metadata bits, with the same instruction. This is a speculative optimization
 45 // that assumes that the loaded pointer is frequently going to be load-good or null
 46 // when checked. Therefore, the nmethod load barriers just apply the shift with the
 47 // current "good" shift (which is patched with nmethod entry barriers for each GC
 48 // phase). If the result of that shift was a raw null value, then the ZF flag is set.
 49 // If the result is a good pointer, then the very last bit that was removed by the
 50 // shift, must have been a 1, which would have set the CF flag. Therefore, the "above"
 51 // branch condition code is used to take a slowpath only iff CF == 0 and ZF == 0.
 52 // CF == 0 implies it was not a good pointer, and ZF == 0 implies the resulting address
 53 // was not a null value. Then we decide that the pointer is bad. This optimization
 54 // is necessary to get satisfactory performance, but does come with a few constraints:
 55 //
 56 // 1) The load barrier can only recognize 4 different good patterns across all GC phases.
 57 //    The reason is that when a load barrier applies the currently good shift, then
 58 //    the value of said shift may differ only by 3, until we risk shifting away more
 59 //    than the low order three zeroes of an address, given a bad pointer, which would
 60 //    yield spurious false positives.
 61 //
 62 // 2) Those bit patterns must have only a single bit set. We achieve that by moving
 63 //    non-relocation work to store barriers.
 64 //
 65 // Another consequence of this speculative optimization, is that when the compiled code
 66 // takes a slow path, it needs to reload the oop, because the shifted oop is now
 67 // broken after being shifted with a different shift to what was used when the oop
 68 // was stored.
 69 
 70 typedef bool (*ZBarrierFastPath)(zpointer);
 71 typedef zpointer (*ZBarrierColor)(zaddress, zpointer);
 72 
 73 class ZGeneration;
 74 
 75 void z_assert_is_barrier_safe();
 76 
 77 class ZBarrier : public AllStatic {
 78   friend class ZContinuation;
 79   friend class ZStoreBarrierBuffer;
 80   friend class ZUncoloredRoot;
 81 
 82 private:
 83   static void assert_transition_monotonicity(zpointer ptr, zpointer heal_ptr);
 84   static void self_heal(ZBarrierFastPath fast_path, volatile zpointer* p, zpointer ptr, zpointer heal_ptr, bool allow_null);
 85 
 86   template <typename ZBarrierSlowPath>
 87   static zaddress barrier(ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path, ZBarrierColor color, volatile zpointer* p, zpointer o, bool allow_null = false);
 88 
 89   static zaddress make_load_good(zpointer ptr);
 90   static zaddress make_load_good_no_relocate(zpointer ptr);
 91   static zaddress relocate_or_remap(zaddress_unsafe addr, ZGeneration* generation);
 92   static zaddress remap(zaddress_unsafe addr, ZGeneration* generation);
 93   static void remember(volatile zpointer* p);
 94   static void mark_and_remember(volatile zpointer* p, zaddress addr);
 95 
 96   // Fast paths in increasing strength level
 97   static bool is_load_good_or_null_fast_path(zpointer ptr);
 98   static bool is_mark_good_fast_path(zpointer ptr);
 99   static bool is_store_good_fast_path(zpointer ptr);
100   static bool is_store_good_or_null_fast_path(zpointer ptr);
101   static bool is_store_good_or_null_any_fast_path(zpointer ptr);
102 
103   static bool is_mark_young_good_fast_path(zpointer ptr);
104   static bool is_finalizable_good_fast_path(zpointer ptr);
105 
106   // Slow paths
107   static zaddress blocking_keep_alive_on_weak_slow_path(volatile zpointer* p, zaddress addr);
108   static zaddress blocking_keep_alive_on_phantom_slow_path(volatile zpointer* p, zaddress addr);
109   static zaddress blocking_load_barrier_on_weak_slow_path(volatile zpointer* p, zaddress addr);
110   static zaddress blocking_load_barrier_on_phantom_slow_path(volatile zpointer* p, zaddress addr);
111 
112   static zaddress mark_slow_path(zaddress addr);
113   static zaddress mark_young_slow_path(zaddress addr);
114   static zaddress mark_from_young_slow_path(zaddress addr);
115   static zaddress mark_from_old_slow_path(zaddress addr);
116   static zaddress mark_finalizable_slow_path(zaddress addr);
117   static zaddress mark_finalizable_from_old_slow_path(zaddress addr);
118 
119   static zaddress keep_alive_slow_path(zaddress addr);
120   static zaddress heap_store_slow_path(volatile zpointer* p, zaddress addr, zpointer prev, bool heal);
121   static zaddress native_store_slow_path(zaddress addr);
122   static zaddress no_keep_alive_heap_store_slow_path(volatile zpointer* p, zaddress addr);
123 
124   static zaddress promote_slow_path(zaddress addr);
125 
126   // Helpers for non-strong oop refs barriers
127   static zaddress blocking_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
128   static zaddress blocking_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
129   static zaddress blocking_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
130   static zaddress blocking_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
131 
132   // Verification
133   static void verify_on_weak(volatile zpointer* referent_addr) NOT_DEBUG_RETURN;
134 
135 public:
136 
137   static zpointer load_atomic(volatile zpointer* p);
138 
139   // Helpers for relocation
140   static ZGeneration* remap_generation(zpointer ptr);
141   static void remap_young_relocated(volatile zpointer* p, zpointer o);
142 
143   // Helpers for marking
144   template <bool resurrect, bool gc_thread, bool follow, bool finalizable>
145   static void mark(zaddress addr);
146   template <bool resurrect, bool gc_thread, bool follow>
147   static void mark_young(zaddress addr);
148   template <bool resurrect, bool gc_thread, bool follow>
149   static void mark_if_young(zaddress addr);
150 
151   // Load barrier
152   static zaddress load_barrier_on_oop_field(volatile zpointer* p);
153   static zaddress load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o);
154 
155   static void load_barrier_on_oop_array(volatile zpointer* p, size_t length);
156 
157   static zaddress keep_alive_load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o);
158 
159   // Load barriers on non-strong oop refs
160   static zaddress load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
161   static zaddress load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
162 
163   static zaddress no_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
164   static zaddress no_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
165 
166   // Reference processor / weak cleaning barriers
167   static bool clean_barrier_on_weak_oop_field(volatile zpointer* p);
168   static bool clean_barrier_on_phantom_oop_field(volatile zpointer* p);
169   static bool clean_barrier_on_final_oop_field(volatile zpointer* p);
170 
171   // Mark barrier
172   static void mark_barrier_on_young_oop_field(volatile zpointer* p);
173   static void mark_barrier_on_old_oop_field(volatile zpointer* p, bool finalizable);
174   static void mark_barrier_on_oop_field(volatile zpointer* p, bool finalizable);
175   static void mark_young_good_barrier_on_oop_field(volatile zpointer* p);
176   static zaddress remset_barrier_on_oop_field(volatile zpointer* p);
177   static void promote_barrier_on_young_oop_field(volatile zpointer* p);
178 
179   // Store barrier
180   static void store_barrier_on_heap_oop_field(volatile zpointer* p, bool heal);
181   static void store_barrier_on_native_oop_field(volatile zpointer* p, bool heal);
182 
183   static void no_keep_alive_store_barrier_on_heap_oop_field(volatile zpointer* p);
184 };
185 
186 #endif // SHARE_GC_Z_ZBARRIER_HPP