1 /*
2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CDS_ARCHIVEUTILS_HPP
26 #define SHARE_CDS_ARCHIVEUTILS_HPP
27
28 #include "cds/cds_globals.hpp"
29 #include "cds/serializeClosure.hpp"
30 #include "logging/log.hpp"
31 #include "memory/metaspace.hpp"
32 #include "memory/virtualspace.hpp"
33 #include "runtime/nonJavaThread.hpp"
34 #include "runtime/semaphore.hpp"
35 #include "utilities/bitMap.hpp"
36 #include "utilities/exceptions.hpp"
37 #include "utilities/macros.hpp"
38
39 class BootstrapInfo;
40 class ReservedSpace;
41 class VirtualSpace;
42
43 template<class E> class Array;
44 template<class E> class GrowableArray;
45
46 // ArchivePtrMarker is used to mark the location of pointers embedded in a CDS archive. E.g., when an
47 // InstanceKlass k is dumped, we mark the location of the k->_name pointer by effectively calling
48 // mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is
49 // fixed, but _ptr_end can be expanded as more objects are dumped.
50 class ArchivePtrMarker : AllStatic {
51 static CHeapBitMap* _ptrmap;
52 static CHeapBitMap* _rw_ptrmap;
53 static CHeapBitMap* _ro_ptrmap;
54 static VirtualSpace* _vs;
55
56 // Once _ptrmap is compacted, we don't allow bit marking anymore. This is to
57 // avoid unintentional copy operations after the bitmap has been finalized and written.
58 static bool _compacted;
59
60 static address* ptr_base() { return (address*)_vs->low(); } // committed lower bound (inclusive)
61 static address* ptr_end() { return (address*)_vs->high(); } // committed upper bound (exclusive)
62
63 public:
64 static void initialize(CHeapBitMap* ptrmap, VirtualSpace* vs);
65 static void initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap);
66 static void mark_pointer(address* ptr_loc);
67 static void clear_pointer(address* ptr_loc);
68 static void compact(address relocatable_base, address relocatable_end);
69 static void compact(size_t max_non_null_offset);
70
71 template <typename T>
72 static void mark_pointer(T* ptr_loc) {
73 mark_pointer((address*)ptr_loc);
74 }
75
76 template <typename T>
77 static void set_and_mark_pointer(T* ptr_loc, T ptr_value) {
78 *ptr_loc = ptr_value;
79 mark_pointer(ptr_loc);
80 }
81
82 static CHeapBitMap* ptrmap() {
83 return _ptrmap;
84 }
85
86 static CHeapBitMap* rw_ptrmap() {
87 return _rw_ptrmap;
88 }
89
90 static CHeapBitMap* ro_ptrmap() {
91 return _ro_ptrmap;
92 }
93
94 static void reset_map_and_vs() {
95 _ptrmap = nullptr;
96 _rw_ptrmap = nullptr;
97 _ro_ptrmap = nullptr;
98 _vs = nullptr;
99 }
100 };
101
102 // SharedDataRelocator is used to shift pointers in the CDS archive.
103 //
104 // The CDS archive is basically a contiguous block of memory (divided into several regions)
105 // that contains multiple objects. The objects may contain direct pointers that point to other objects
106 // within the archive (e.g., InstanceKlass::_name points to a Symbol in the archive). During dumping, we
107 // built a bitmap that marks the locations of all these pointers (using ArchivePtrMarker, see comments above).
108 //
109 // The contents of the archive assumes that it's mapped at the default SharedBaseAddress (e.g. 0x800000000).
110 // If the archive ends up being mapped at a different address (e.g. 0x810000000), SharedDataRelocator
111 // is used to shift each marked pointer by a delta (0x10000000 in this example), so that it points to
112 // the actually mapped location of the target object.
113 class SharedDataRelocator: public BitMapClosure {
114 // for all (address** p), where (is_marked(p) && _patch_base <= p && p < _patch_end) { *p += delta; }
115
116 // Patch all pointers within this region that are marked.
117 address* _patch_base;
118 address* _patch_end;
119
120 // Before patching, all pointers must point to this region.
121 address _valid_old_base;
122 address _valid_old_end;
123
124 // After patching, all pointers must point to this region.
125 address _valid_new_base;
126 address _valid_new_end;
127
128 // How much to relocate for each pointer.
129 intx _delta;
130
131 public:
132 SharedDataRelocator(address* patch_base, address* patch_end,
133 address valid_old_base, address valid_old_end,
134 address valid_new_base, address valid_new_end, intx delta) :
135 _patch_base(patch_base), _patch_end(patch_end),
136 _valid_old_base(valid_old_base), _valid_old_end(valid_old_end),
137 _valid_new_base(valid_new_base), _valid_new_end(valid_new_end),
138 _delta(delta) {
139 log_debug(aot, reloc)("SharedDataRelocator::_patch_base = " PTR_FORMAT, p2i(_patch_base));
140 log_debug(aot, reloc)("SharedDataRelocator::_patch_end = " PTR_FORMAT, p2i(_patch_end));
141 log_debug(aot, reloc)("SharedDataRelocator::_valid_old_base = " PTR_FORMAT, p2i(_valid_old_base));
142 log_debug(aot, reloc)("SharedDataRelocator::_valid_old_end = " PTR_FORMAT, p2i(_valid_old_end));
143 log_debug(aot, reloc)("SharedDataRelocator::_valid_new_base = " PTR_FORMAT, p2i(_valid_new_base));
144 log_debug(aot, reloc)("SharedDataRelocator::_valid_new_end = " PTR_FORMAT, p2i(_valid_new_end));
145 }
146
147 bool do_bit(size_t offset);
148 };
149
150 class DumpRegion {
151 private:
152 const char* _name;
153 char* _base;
154 char* _top;
155 char* _end;
156 uintx _max_delta;
157 bool _is_packed;
158 ReservedSpace* _rs;
159 VirtualSpace* _vs;
160
161 void commit_to(char* newtop);
162
163 public:
164 DumpRegion(const char* name, uintx max_delta = 0)
165 : _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
166 _max_delta(max_delta), _is_packed(false),
167 _rs(nullptr), _vs(nullptr) {}
168
169 char* expand_top_to(char* newtop);
170 char* allocate(size_t num_bytes, size_t alignment = 0);
171
172 void append_intptr_t(intptr_t n, bool need_to_mark = false) NOT_CDS_RETURN;
173
174 char* base() const { return _base; }
175 char* top() const { return _top; }
176 char* end() const { return _end; }
177 size_t reserved() const { return _end - _base; }
178 size_t used() const { return _top - _base; }
179 bool is_packed() const { return _is_packed; }
180 bool is_allocatable() const {
181 return !is_packed() && _base != nullptr;
182 }
183 bool is_empty() const { return _base == _top; }
184
185 void print(size_t total_bytes) const;
186 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
187
188 void init(ReservedSpace* rs, VirtualSpace* vs);
189
190 void pack(DumpRegion* next = nullptr);
191
192 bool contains(char* p) {
193 return base() <= p && p < top();
194 }
195 };
196
197 // Closure for serializing initialization data out to a data area to be
198 // written to the shared file.
199
200 class WriteClosure : public SerializeClosure {
201 private:
202 DumpRegion* _dump_region;
203
204 public:
205 WriteClosure(DumpRegion* r) {
206 _dump_region = r;
207 }
208
209 void do_ptr(void** p);
210
211 void do_u4(u4* p) {
212 _dump_region->append_intptr_t((intptr_t)(*p));
213 }
214
215 void do_int(int* p) {
216 _dump_region->append_intptr_t((intptr_t)(*p));
217 }
218
219 void do_bool(bool *p) {
220 _dump_region->append_intptr_t((intptr_t)(*p));
221 }
222
223 void do_tag(int tag) {
224 _dump_region->append_intptr_t((intptr_t)tag);
225 }
226
227 char* region_top() {
228 return _dump_region->top();
229 }
230
231 bool reading() const { return false; }
232 };
233
234 // Closure for serializing initialization data in from a data area
235 // (ptr_array) read from the shared file.
236
237 class ReadClosure : public SerializeClosure {
238 private:
239 intptr_t** _ptr_array;
240 intptr_t _base_address;
241 inline intptr_t nextPtr() {
242 return *(*_ptr_array)++;
243 }
244
245 public:
246 ReadClosure(intptr_t** ptr_array, intptr_t base_address) :
247 _ptr_array(ptr_array), _base_address(base_address) {}
248
249 void do_ptr(void** p);
250 void do_u4(u4* p);
251 void do_int(int* p);
252 void do_bool(bool *p);
253 void do_tag(int tag);
254 bool reading() const { return true; }
255 char* region_top() { return nullptr; }
256 };
257
258 class ArchiveUtils {
259 template <typename T> static Array<T>* archive_non_ptr_array(GrowableArray<T>* tmp_array);
260 template <typename T> static Array<T>* archive_ptr_array(GrowableArray<T>* tmp_array);
261
262 public:
263 static const uintx MAX_SHARED_DELTA = 0x7FFFFFFF;
264 static void log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) NOT_CDS_RETURN;
265 static bool has_aot_initialized_mirror(InstanceKlass* src_ik);
266
267 template <typename T, ENABLE_IF(!std::is_pointer<T>::value)>
268 static Array<T>* archive_array(GrowableArray<T>* tmp_array) {
269 return archive_non_ptr_array(tmp_array);
270 }
271
272 template <typename T, ENABLE_IF(std::is_pointer<T>::value)>
273 static Array<T>* archive_array(GrowableArray<T>* tmp_array) {
274 return archive_ptr_array(tmp_array);
275 }
276
277 // The following functions translate between a u4 offset and an address in the
278 // the range of the mapped CDS archive (e.g., Metaspace::in_aot_cache()).
279 // Since the first 16 bytes in this range are dummy data (see ArchiveBuilder::reserve_buffer()),
280 // we know that offset 0 never represents a valid object. As a result, an offset of 0
281 // is used to encode a nullptr.
282 //
283 // Use the "archived_address_or_null" variants if a nullptr may be encoded.
284
285 // offset must represent an object of type T in the mapped shared space. Return
286 // a direct pointer to this object.
287 template <typename T> T static offset_to_archived_address(u4 offset) {
288 assert(offset != 0, "sanity");
289 T p = (T)(SharedBaseAddress + offset);
290 assert(Metaspace::in_aot_cache(p), "must be");
291 return p;
292 }
293
294 template <typename T> T static offset_to_archived_address_or_null(u4 offset) {
295 if (offset == 0) {
296 return nullptr;
297 } else {
298 return offset_to_archived_address<T>(offset);
299 }
300 }
301
302 // p must be an archived object. Get its offset from SharedBaseAddress
303 template <typename T> static u4 archived_address_to_offset(T p) {
304 uintx pn = (uintx)p;
305 uintx base = (uintx)SharedBaseAddress;
306 assert(Metaspace::in_aot_cache(p), "must be");
307 assert(pn > base, "sanity"); // No valid object is stored at 0 offset from SharedBaseAddress
308 uintx offset = pn - base;
309 assert(offset <= MAX_SHARED_DELTA, "range check");
310 return static_cast<u4>(offset);
311 }
312
313 template <typename T> static u4 archived_address_or_null_to_offset(T p) {
314 if (p == nullptr) {
315 return 0;
316 } else {
317 return archived_address_to_offset<T>(p);
318 }
319 }
320 };
321
322 class HeapRootSegments {
323 private:
324 size_t _base_offset;
325 size_t _count;
326 int _roots_count;
327 int _max_size_in_bytes;
328 int _max_size_in_elems;
329
330 public:
331 size_t base_offset() { return _base_offset; }
332 size_t count() { return _count; }
333 int roots_count() { return _roots_count; }
334 int max_size_in_bytes() { return _max_size_in_bytes; }
335 int max_size_in_elems() { return _max_size_in_elems; }
336
337 size_t size_in_bytes(size_t seg_idx);
338 int size_in_elems(size_t seg_idx);
339 size_t segment_offset(size_t seg_idx);
340
341 // Trivial copy assignments are allowed to copy the entire object representation.
342 // We also inline this class into archive header. Therefore, it is important to make
343 // sure any gaps in object representation are initialized to zeroes. This is why
344 // constructors memset before doing field assignments.
345 HeapRootSegments() {
346 memset(this, 0, sizeof(*this));
347 }
348 HeapRootSegments(size_t base_offset, int roots_count, int max_size_in_bytes, int max_size_in_elems) {
349 memset(this, 0, sizeof(*this));
350 _base_offset = base_offset;
351 _count = (roots_count + max_size_in_elems - 1) / max_size_in_elems;
352 _roots_count = roots_count;
353 _max_size_in_bytes = max_size_in_bytes;
354 _max_size_in_elems = max_size_in_elems;
355 }
356
357 // This class is trivially copyable and assignable.
358 HeapRootSegments(const HeapRootSegments&) = default;
359 HeapRootSegments& operator=(const HeapRootSegments&) = default;
360 };
361
362 class ArchiveWorkers;
363
364 // A task to be worked on by worker threads
365 class ArchiveWorkerTask : public CHeapObj<mtInternal> {
366 friend class ArchiveWorkers;
367 private:
368 const char* _name;
369 int _max_chunks;
370 volatile int _chunk;
371
372 void run();
373
374 void configure_max_chunks(int max_chunks);
375
376 public:
377 ArchiveWorkerTask(const char* name) :
378 _name(name), _max_chunks(0), _chunk(0) {}
379 const char* name() const { return _name; }
380 virtual void work(int chunk, int max_chunks) = 0;
381 };
382
383 class ArchiveWorkerThread : public NamedThread {
384 friend class ArchiveWorkers;
385 private:
386 ArchiveWorkers* const _pool;
387
388 void post_run() override;
389
390 public:
391 ArchiveWorkerThread(ArchiveWorkers* pool);
392 const char* type_name() const override { return "Archive Worker Thread"; }
393 void run() override;
394 };
395
396 // Special archive workers. The goal for this implementation is to startup fast,
397 // distribute spiky workloads efficiently, and shutdown immediately after use.
398 // This makes the implementation quite different from the normal GC worker pool.
399 class ArchiveWorkers : public StackObj {
400 friend class ArchiveWorkerThread;
401 private:
402 // Target number of chunks per worker. This should be large enough to even
403 // out work imbalance, and small enough to keep bookkeeping overheads low.
404 static constexpr int CHUNKS_PER_WORKER = 4;
405 static int max_workers();
406
407 Semaphore _end_semaphore;
408
409 int _num_workers;
410 int _started_workers;
411 int _finish_tokens;
412
413 typedef enum { UNUSED, WORKING, SHUTDOWN } State;
414 volatile State _state;
415
416 ArchiveWorkerTask* _task;
417
418 void run_as_worker();
419 void start_worker_if_needed();
420
421 void run_task_single(ArchiveWorkerTask* task);
422 void run_task_multi(ArchiveWorkerTask* task);
423
424 bool is_parallel();
425
426 public:
427 ArchiveWorkers();
428 ~ArchiveWorkers();
429 void run_task(ArchiveWorkerTask* task);
430 };
431
432 #endif // SHARE_CDS_ARCHIVEUTILS_HPP