1 /*
  2  * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveBuilder.hpp"
 27 #include "cds/archiveHeapLoader.inline.hpp"
 28 #include "cds/archiveUtils.hpp"
 29 #include "cds/classListParser.hpp"
 30 #include "cds/classListWriter.hpp"
 31 #include "cds/dynamicArchive.hpp"
 32 #include "cds/filemap.hpp"
 33 #include "cds/heapShared.hpp"
 34 #include "cds/metaspaceShared.hpp"
 35 #include "classfile/systemDictionaryShared.hpp"
 36 #include "classfile/vmClasses.hpp"
 37 #include "interpreter/bootstrapInfo.hpp"
 38 #include "memory/metaspaceUtils.hpp"
 39 #include "memory/resourceArea.hpp"
 40 #include "oops/compressedOops.inline.hpp"
 41 #include "runtime/arguments.hpp"
 42 #include "utilities/bitMap.inline.hpp"
 43 #include "utilities/debug.hpp"
 44 #include "utilities/formatBuffer.hpp"
 45 #include "utilities/globalDefinitions.hpp"
 46 
 47 CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr;
 48 VirtualSpace* ArchivePtrMarker::_vs;
 49 
 50 bool ArchivePtrMarker::_compacted;
 51 
 52 void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
 53   assert(_ptrmap == nullptr, "initialize only once");
 54   _vs = vs;
 55   _compacted = false;
 56   _ptrmap = ptrmap;
 57 
 58   // Use this as initial guesstimate. We should need less space in the
 59   // archive, but if we're wrong the bitmap will be expanded automatically.
 60   size_t estimated_archive_size = MetaspaceGC::capacity_until_GC();
 61   // But set it smaller in debug builds so we always test the expansion code.
 62   // (Default archive is about 12MB).
 63   DEBUG_ONLY(estimated_archive_size = 6 * M);
 64 
 65   // We need one bit per pointer in the archive.
 66   _ptrmap->initialize(estimated_archive_size / sizeof(intptr_t));
 67 }
 68 
 69 void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
 70   assert(_ptrmap != nullptr, "not initialized");
 71   assert(!_compacted, "cannot mark anymore");
 72 
 73   if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
 74     address value = *ptr_loc;
 75     // We don't want any pointer that points to very bottom of the archive, otherwise when
 76     // MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer
 77     // to nothing (null) vs a pointer to an objects that happens to be at the very bottom
 78     // of the archive.
 79     assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
 80 
 81     if (value != nullptr) {
 82       assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
 83       size_t idx = ptr_loc - ptr_base();
 84       if (_ptrmap->size() <= idx) {
 85         _ptrmap->resize((idx + 1) * 2);
 86       }
 87       assert(idx < _ptrmap->size(), "must be");
 88       _ptrmap->set_bit(idx);
 89       //tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
 90     }
 91   }
 92 }
 93 
 94 void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
 95   assert(_ptrmap != nullptr, "not initialized");
 96   assert(!_compacted, "cannot clear anymore");
 97 
 98   assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be");
 99   assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
100   size_t idx = ptr_loc - ptr_base();
101   assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
102   _ptrmap->clear_bit(idx);
103   //tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
104 }
105 
106 class ArchivePtrBitmapCleaner: public BitMapClosure {
107   CHeapBitMap* _ptrmap;
108   address* _ptr_base;
109   address  _relocatable_base;
110   address  _relocatable_end;
111   size_t   _max_non_null_offset;
112 
113 public:
114   ArchivePtrBitmapCleaner(CHeapBitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) :
115     _ptrmap(ptrmap), _ptr_base(ptr_base),
116     _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {}
117 
118   bool do_bit(size_t offset) {
119     address* ptr_loc = _ptr_base + offset;
120     address  ptr_value = *ptr_loc;
121     if (ptr_value != nullptr) {
122       assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!");
123       if (_max_non_null_offset < offset) {
124         _max_non_null_offset = offset;
125       }
126     } else {
127       _ptrmap->clear_bit(offset);
128       DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT  "] -> null @ " SIZE_FORMAT_W(9), p2i(ptr_loc), offset));
129     }
130 
131     return true;
132   }
133 
134   size_t max_non_null_offset() const { return _max_non_null_offset; }
135 };
136 
137 void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
138   assert(!_compacted, "cannot compact again");
139   ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end);
140   _ptrmap->iterate(&cleaner);
141   compact(cleaner.max_non_null_offset());
142 }
143 
144 void ArchivePtrMarker::compact(size_t max_non_null_offset) {
145   assert(!_compacted, "cannot compact again");
146   _ptrmap->resize(max_non_null_offset + 1);
147   _compacted = true;
148 }
149 
150 char* DumpRegion::expand_top_to(char* newtop) {
151   assert(is_allocatable(), "must be initialized and not packed");
152   assert(newtop >= _top, "must not grow backwards");
153   if (newtop > _end) {
154     ArchiveBuilder::current()->report_out_of_space(_name, newtop - _top);
155     ShouldNotReachHere();
156   }
157 
158   commit_to(newtop);
159   _top = newtop;
160 
161   if (_max_delta > 0) {
162     uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
163     if (delta > _max_delta) {
164       // This is just a sanity check and should not appear in any real world usage. This
165       // happens only if you allocate more than 2GB of shared objects and would require
166       // millions of shared classes.
167       vm_exit_during_initialization("Out of memory in the CDS archive",
168                                     "Please reduce the number of shared classes.");
169     }
170   }
171 
172   return _top;
173 }
174 
175 void DumpRegion::commit_to(char* newtop) {
176   Arguments::assert_is_dumping_archive();
177   char* base = _rs->base();
178   size_t need_committed_size = newtop - base;
179   size_t has_committed_size = _vs->committed_size();
180   if (need_committed_size < has_committed_size) {
181     return;
182   }
183 
184   size_t min_bytes = need_committed_size - has_committed_size;
185   size_t preferred_bytes = 1 * M;
186   size_t uncommitted = _vs->reserved_size() - has_committed_size;
187 
188   size_t commit = MAX2(min_bytes, preferred_bytes);
189   commit = MIN2(commit, uncommitted);
190   assert(commit <= uncommitted, "sanity");
191 
192   if (!_vs->expand_by(commit, false)) {
193     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
194                                           need_committed_size));
195   }
196 
197   const char* which;
198   if (_rs->base() == (char*)MetaspaceShared::symbol_rs_base()) {
199     which = "symbol";
200   } else {
201     which = "shared";
202   }
203   log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
204                  which, commit, _vs->actual_committed_size(), _vs->high());
205 }
206 
207 
208 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
209   // We align the starting address of each allocation.
210   char* p = (char*)align_up(_top, alignment);
211   char* newtop = p + num_bytes;
212   // Leave _top always SharedSpaceObjectAlignment aligned. But not more -
213   //  if we allocate with large alignments, lets not waste the gaps.
214   // Ideally we would not need to align _top to anything here but CDS has
215   //  a number of implicit alignment assumptions. Leaving this unaligned
216   //  here will trip of at least ReadClosure (assuming word alignment) and
217   //  DumpAllocStats (will get confused about counting bytes on 32-bit
218   //  platforms if we align to anything less than SharedSpaceObjectAlignment
219   //  here).
220   newtop = align_up(newtop, SharedSpaceObjectAlignment);
221   expand_top_to(newtop);
222   memset(p, 0, newtop - p); // todo: needed? debug_only?
223   return p;
224 }
225 
226 char* DumpRegion::allocate(size_t num_bytes) {
227   return allocate(num_bytes, SharedSpaceObjectAlignment);
228 }
229 
230 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
231   assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
232   intptr_t *p = (intptr_t*)_top;
233   char* newtop = _top + sizeof(intptr_t);
234   expand_top_to(newtop);
235   *p = n;
236   if (need_to_mark) {
237     ArchivePtrMarker::mark_pointer(p);
238   }
239 }
240 
241 void DumpRegion::print(size_t total_bytes) const {
242   log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
243                  _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
244                  p2i(ArchiveBuilder::current()->to_requested(_base)));
245 }
246 
247 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
248   log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
249                  _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
250   if (strcmp(_name, failing_region) == 0) {
251     log_error(cds)(" required = %d", int(needed_bytes));
252   }
253 }
254 
255 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
256   _rs = rs;
257   _vs = vs;
258   // Start with 0 committed bytes. The memory will be committed as needed.
259   if (!_vs->initialize(*_rs, 0)) {
260     fatal("Unable to allocate memory for shared space");
261   }
262   _base = _top = _rs->base();
263   _end = _rs->end();
264 }
265 
266 void DumpRegion::pack(DumpRegion* next) {
267   assert(!is_packed(), "sanity");
268   _end = (char*)align_up(_top, MetaspaceShared::core_region_alignment());
269   _is_packed = true;
270   if (next != nullptr) {
271     next->_rs = _rs;
272     next->_vs = _vs;
273     next->_base = next->_top = this->_end;
274     next->_end = _rs->end();
275   }
276 }
277 
278 void WriteClosure::do_oop(oop* o) {
279   if (*o == nullptr) {
280     _dump_region->append_intptr_t(0);
281   } else {
282     assert(HeapShared::can_write(), "sanity");
283     intptr_t p;
284     if (UseCompressedOops) {
285       p = (intptr_t)CompressedOops::encode_not_null(*o);
286     } else {
287       p = cast_from_oop<intptr_t>(HeapShared::to_requested_address(*o));
288     }
289     _dump_region->append_intptr_t(p);
290   }
291 }
292 
293 void WriteClosure::do_region(u_char* start, size_t size) {
294   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
295   assert(size % sizeof(intptr_t) == 0, "bad size");
296   do_tag((int)size);
297   while (size > 0) {
298     _dump_region->append_intptr_t(*(intptr_t*)start, true);
299     start += sizeof(intptr_t);
300     size -= sizeof(intptr_t);
301   }
302 }
303 
304 void ReadClosure::do_ptr(void** p) {
305   assert(*p == nullptr, "initializing previous initialized pointer.");
306   intptr_t obj = nextPtr();
307   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
308          "hit tag while initializing ptrs.");
309   *p = (void*)obj;
310 }
311 
312 void ReadClosure::do_u4(u4* p) {
313   intptr_t obj = nextPtr();
314   *p = (u4)(uintx(obj));
315 }
316 
317 void ReadClosure::do_bool(bool* p) {
318   intptr_t obj = nextPtr();
319   *p = (bool)(uintx(obj));
320 }
321 
322 void ReadClosure::do_tag(int tag) {
323   int old_tag;
324   old_tag = (int)(intptr_t)nextPtr();
325   // do_int(&old_tag);
326   assert(tag == old_tag, "tag doesn't match (%d, expected %d)", old_tag, tag);
327   FileMapInfo::assert_mark(tag == old_tag);
328 }
329 
330 void ReadClosure::do_oop(oop *p) {
331   if (UseCompressedOops) {
332     narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
333     if (CompressedOops::is_null(o) || !ArchiveHeapLoader::is_fully_available()) {
334       *p = nullptr;
335     } else {
336       assert(ArchiveHeapLoader::can_use(), "sanity");
337       assert(ArchiveHeapLoader::is_fully_available(), "must be");
338       *p = ArchiveHeapLoader::decode_from_archive(o);
339     }
340   } else {
341     intptr_t dumptime_oop = nextPtr();
342     if (dumptime_oop == 0 || !ArchiveHeapLoader::is_fully_available()) {
343       *p = nullptr;
344     } else {
345       assert(!ArchiveHeapLoader::is_loaded(), "ArchiveHeapLoader::can_load() is not supported for uncompessed oops");
346       intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
347       *p = cast_to_oop(runtime_oop);
348     }
349   }
350 }
351 
352 void ReadClosure::do_region(u_char* start, size_t size) {
353   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
354   assert(size % sizeof(intptr_t) == 0, "bad size");
355   do_tag((int)size);
356   while (size > 0) {
357     *(intptr_t*)start = nextPtr();
358     start += sizeof(intptr_t);
359     size -= sizeof(intptr_t);
360   }
361 }
362 
363 void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) {
364   if (ClassListWriter::is_enabled()) {
365     if (SystemDictionaryShared::is_supported_invokedynamic(bootstrap_specifier)) {
366       const constantPoolHandle& pool = bootstrap_specifier->pool();
367       if (SystemDictionaryShared::is_builtin_loader(pool->pool_holder()->class_loader_data())) {
368         // Currently lambda proxy classes are supported only for the built-in loaders.
369         ResourceMark rm(THREAD);
370         int pool_index = bootstrap_specifier->bss_index();
371         ClassListWriter w;
372         w.stream()->print("%s %s", LAMBDA_PROXY_TAG, pool->pool_holder()->name()->as_C_string());
373         CDSIndyInfo cii;
374         ClassListParser::populate_cds_indy_info(pool, pool_index, &cii, CHECK);
375         GrowableArray<const char*>* indy_items = cii.items();
376         for (int i = 0; i < indy_items->length(); i++) {
377           w.stream()->print(" %s", indy_items->at(i));
378         }
379         w.stream()->cr();
380       }
381     }
382   }
383 }