1 /*
2 * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #ifndef SHARE_GC_Z_ZBARRIER_HPP
25 #define SHARE_GC_Z_ZBARRIER_HPP
26
27 #include "gc/z/zAddress.hpp"
28 #include "memory/allStatic.hpp"
29 #include "memory/iterator.hpp"
30
31 // == Shift based load barrier ==
32 //
33 // The load barriers of ZGC check if a loaded value is safe to expose or not, and
34 // then shifts the pointer to remove metadata bits, such that it points to mapped
35 // memory.
36 //
37 // A pointer is safe to expose if it does not have any load-bad bits set in its
38 // metadata bits. In the C++ code and non-nmethod generated code, that is checked
39 // by testing the pointer value against a load-bad mask, checking that no bad bit
40 // is set, followed by a shift, removing the metadata bits if they were good.
41 // However, for nmethod code, the test + shift sequence is optimized in such
42 // a way that the shift both tests if the pointer is exposable or not, and removes
43 // the metadata bits, with the same instruction. This is a speculative optimization
44 // that assumes that the loaded pointer is frequently going to be load-good or null
45 // when checked. Therefore, the nmethod load barriers just apply the shift with the
46 // current "good" shift (which is patched with nmethod entry barriers for each GC
47 // phase). If the result of that shift was a raw null value, then the ZF flag is set.
48 // If the result is a good pointer, then the very last bit that was removed by the
49 // shift, must have been a 1, which would have set the CF flag. Therefore, the "above"
50 // branch condition code is used to take a slowpath only iff CF == 0 and ZF == 0.
51 // CF == 0 implies it was not a good pointer, and ZF == 0 implies the resulting address
52 // was not a null value. Then we decide that the pointer is bad. This optimization
53 // is necessary to get satisfactory performance, but does come with a few constraints:
54 //
55 // 1) The load barrier can only recognize 4 different good patterns across all GC phases.
56 // The reason is that when a load barrier applies the currently good shift, then
57 // the value of said shift may differ only by 3, until we risk shifting away more
58 // than the low order three zeroes of an address, given a bad pointer, which would
59 // yield spurious false positives.
60 //
61 // 2) Those bit patterns must have only a single bit set. We achieve that by moving
62 // non-relocation work to store barriers.
63 //
64 // Another consequence of this speculative optimization, is that when the compiled code
65 // takes a slow path, it needs to reload the oop, because the shifted oop is now
66 // broken after being shifted with a different shift to what was used when the oop
67 // was stored.
68
69 typedef bool (*ZBarrierFastPath)(zpointer);
70 typedef zpointer (*ZBarrierColor)(zaddress, zpointer);
71
72 class ZGeneration;
73
74 class ZBarrier : public AllStatic {
75 friend class ZContinuation;
76 friend class ZStoreBarrierBuffer;
77 friend class ZUncoloredRoot;
78
79 private:
80 static void assert_transition_monotonicity(zpointer ptr, zpointer heal_ptr);
81 static void self_heal(ZBarrierFastPath fast_path, volatile zpointer* p, zpointer ptr, zpointer heal_ptr, bool allow_null);
82
83 template <typename ZBarrierSlowPath>
84 static zaddress barrier(ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path, ZBarrierColor color, volatile zpointer* p, zpointer o, bool allow_null = false);
85
86 static zaddress make_load_good(zpointer ptr);
87 static zaddress make_load_good_no_relocate(zpointer ptr);
88 static zaddress relocate_or_remap(zaddress_unsafe addr, ZGeneration* generation);
89 static zaddress remap(zaddress_unsafe addr, ZGeneration* generation);
90 static void remember(volatile zpointer* p);
91 static void mark_and_remember(volatile zpointer* p, zaddress addr);
92
93 // Fast paths in increasing strength level
94 static bool is_load_good_or_null_fast_path(zpointer ptr);
95 static bool is_mark_good_fast_path(zpointer ptr);
96 static bool is_store_good_fast_path(zpointer ptr);
97 static bool is_store_good_or_null_fast_path(zpointer ptr);
98 static bool is_store_good_or_null_any_fast_path(zpointer ptr);
99
100 static bool is_mark_young_good_fast_path(zpointer ptr);
101 static bool is_finalizable_good_fast_path(zpointer ptr);
102
103 // Slow paths
104 static zaddress blocking_keep_alive_on_weak_slow_path(volatile zpointer* p, zaddress addr);
105 static zaddress blocking_keep_alive_on_phantom_slow_path(volatile zpointer* p, zaddress addr);
106 static zaddress blocking_load_barrier_on_weak_slow_path(volatile zpointer* p, zaddress addr);
107 static zaddress blocking_load_barrier_on_phantom_slow_path(volatile zpointer* p, zaddress addr);
108
109 static zaddress mark_slow_path(zaddress addr);
110 static zaddress mark_young_slow_path(zaddress addr);
111 static zaddress mark_from_young_slow_path(zaddress addr);
112 static zaddress mark_from_old_slow_path(zaddress addr);
113 static zaddress mark_finalizable_slow_path(zaddress addr);
114 static zaddress mark_finalizable_from_old_slow_path(zaddress addr);
115
116 static zaddress keep_alive_slow_path(zaddress addr);
117 static zaddress heap_store_slow_path(volatile zpointer* p, zaddress addr, zpointer prev, bool heal);
118 static zaddress native_store_slow_path(zaddress addr);
119 static zaddress no_keep_alive_heap_store_slow_path(volatile zpointer* p, zaddress addr);
120
121 static zaddress promote_slow_path(zaddress addr);
122
123 // Helpers for non-strong oop refs barriers
124 static zaddress blocking_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
125 static zaddress blocking_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
126 static zaddress blocking_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
127 static zaddress blocking_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
128
129 // Verification
130 static void verify_on_weak(volatile zpointer* referent_addr) NOT_DEBUG_RETURN;
131
132 public:
133
134 static zpointer load_atomic(volatile zpointer* p);
135
136 // Helpers for relocation
137 static ZGeneration* remap_generation(zpointer ptr);
138 static void remap_young_relocated(volatile zpointer* p, zpointer o);
139
140 // Helpers for marking
141 template <bool resurrect, bool gc_thread, bool follow, bool finalizable>
142 static void mark(zaddress addr);
143 template <bool resurrect, bool gc_thread, bool follow>
144 static void mark_young(zaddress addr);
145 template <bool resurrect, bool gc_thread, bool follow>
146 static void mark_if_young(zaddress addr);
147
148 // Load barrier
149 static zaddress load_barrier_on_oop_field(volatile zpointer* p);
150 static zaddress load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o);
151
152 static void load_barrier_on_oop_array(volatile zpointer* p, size_t length);
153
154 static zaddress keep_alive_load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o);
155
156 // Load barriers on non-strong oop refs
157 static zaddress load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
158 static zaddress load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
159
160 static zaddress no_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o);
161 static zaddress no_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o);
162
163 // Reference processor / weak cleaning barriers
164 static bool clean_barrier_on_weak_oop_field(volatile zpointer* p);
165 static bool clean_barrier_on_phantom_oop_field(volatile zpointer* p);
166 static bool clean_barrier_on_final_oop_field(volatile zpointer* p);
167
168 // Mark barrier
169 static void mark_barrier_on_young_oop_field(volatile zpointer* p);
170 static void mark_barrier_on_old_oop_field(volatile zpointer* p, bool finalizable);
171 static void mark_barrier_on_oop_field(volatile zpointer* p, bool finalizable);
172 static void mark_young_good_barrier_on_oop_field(volatile zpointer* p);
173 static zaddress remset_barrier_on_oop_field(volatile zpointer* p);
174 static void promote_barrier_on_young_oop_field(volatile zpointer* p);
175
176 // Store barrier
177 static void store_barrier_on_heap_oop_field(volatile zpointer* p, bool heal);
178 static void store_barrier_on_native_oop_field(volatile zpointer* p, bool heal);
179
180 static void no_keep_alive_store_barrier_on_heap_oop_field(volatile zpointer* p);
181 };
182
183 #endif // SHARE_GC_Z_ZBARRIER_HPP