1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "code/compiledIC.hpp" 28 #include "code/oopRecorder.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "logging/log.hpp" 31 #include "oops/klass.inline.hpp" 32 #include "oops/methodCounters.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/icache.hpp" 36 #include "runtime/safepointVerifiers.hpp" 37 #include "utilities/align.hpp" 38 #include "utilities/copy.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 #include "utilities/xmlstream.hpp" 41 42 // The structure of a CodeSection: 43 // 44 // _start -> +----------------+ 45 // | machine code...| 46 // _end -> |----------------| 47 // | | 48 // | (empty) | 49 // | | 50 // | | 51 // +----------------+ 52 // _limit -> | | 53 // 54 // _locs_start -> +----------------+ 55 // |reloc records...| 56 // |----------------| 57 // _locs_end -> | | 58 // | | 59 // | (empty) | 60 // | | 61 // | | 62 // +----------------+ 63 // _locs_limit -> | | 64 // The _end (resp. _limit) pointer refers to the first 65 // unused (resp. unallocated) byte. 66 67 // The structure of the CodeBuffer while code is being accumulated: 68 // 69 // _total_start -> \ 70 // _consts._start -> +----------------+ 71 // | | 72 // | Constants | 73 // | | 74 // _insts._start -> |----------------| 75 // | | 76 // | Code | 77 // | | 78 // _stubs._start -> |----------------| 79 // | | 80 // | Stubs | (also handlers for deopt/exception) 81 // | | 82 // +----------------+ 83 // + _total_size -> | | 84 // 85 // When the code and relocations are copied to the code cache, 86 // the empty parts of each section are removed, and everything 87 // is copied into contiguous locations. 88 89 typedef CodeBuffer::csize_t csize_t; // file-local definition 90 91 // External buffer, in a predefined CodeBlob. 92 // Important: The code_start must be taken exactly, and not realigned. 93 CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) { 94 // Provide code buffer with meaningful name 95 initialize_misc(blob->name()); 96 initialize(blob->content_begin(), blob->content_size()); 97 debug_only(verify_section_allocation();) 98 } 99 100 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 101 // Always allow for empty slop around each section. 102 int slop = (int) CodeSection::end_slop(); 103 104 assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments"); 105 int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop; 106 107 assert(blob() == nullptr, "only once"); 108 set_blob(BufferBlob::create(_name, total_size)); 109 if (blob() == nullptr) { 110 // The assembler constructor will throw a fatal on an empty CodeBuffer. 111 return; // caller must test this 112 } 113 114 // Set up various pointers into the blob. 115 initialize(_total_start, _total_size); 116 117 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 118 119 pd_initialize(); 120 121 if (locs_size != 0) { 122 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 123 } 124 125 debug_only(verify_section_allocation();) 126 } 127 128 129 CodeBuffer::~CodeBuffer() { 130 verify_section_allocation(); 131 132 // If we allocated our code buffer from the CodeCache via a BufferBlob, and 133 // it's not permanent, then free the BufferBlob. The rest of the memory 134 // will be freed when the ResourceObj is released. 135 for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) { 136 // Previous incarnations of this buffer are held live, so that internal 137 // addresses constructed before expansions will not be confused. 138 cb->free_blob(); 139 } 140 if (_overflow_arena != nullptr) { 141 // free any overflow storage 142 delete _overflow_arena; 143 } 144 if (_shared_trampoline_requests != nullptr) { 145 delete _shared_trampoline_requests; 146 } 147 148 NOT_PRODUCT(clear_strings()); 149 } 150 151 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 152 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 153 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 154 _oop_recorder = r; 155 } 156 157 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 158 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 159 csize_t slop = CodeSection::end_slop(); // margin between sections 160 int align = cs->alignment(); 161 assert(is_power_of_2(align), "sanity"); 162 address start = _insts._start; 163 address limit = _insts._limit; 164 address middle = limit - size; 165 middle -= (intptr_t)middle & (align-1); // align the division point downward 166 guarantee(middle - slop > start, "need enough space to divide up"); 167 _insts._limit = middle - slop; // subtract desired space, plus slop 168 cs->initialize(middle, limit - middle); 169 assert(cs->start() == middle, "sanity"); 170 assert(cs->limit() == limit, "sanity"); 171 // give it some relocations to start with, if the main section has them 172 if (_insts.has_locs()) cs->initialize_locs(1); 173 } 174 175 void CodeBuffer::set_blob(BufferBlob* blob) { 176 _blob = blob; 177 if (blob != nullptr) { 178 address start = blob->content_begin(); 179 address end = blob->content_end(); 180 // Round up the starting address. 181 int align = _insts.alignment(); 182 start += (-(intptr_t)start) & (align-1); 183 _total_start = start; 184 _total_size = end - start; 185 } else { 186 #ifdef ASSERT 187 // Clean out dangling pointers. 188 _total_start = badAddress; 189 _consts._start = _consts._end = badAddress; 190 _insts._start = _insts._end = badAddress; 191 _stubs._start = _stubs._end = badAddress; 192 #endif //ASSERT 193 } 194 } 195 196 void CodeBuffer::free_blob() { 197 if (_blob != nullptr) { 198 BufferBlob::free(_blob); 199 set_blob(nullptr); 200 } 201 } 202 203 const char* CodeBuffer::code_section_name(int n) { 204 #ifdef PRODUCT 205 return nullptr; 206 #else //PRODUCT 207 switch (n) { 208 case SECT_CONSTS: return "consts"; 209 case SECT_INSTS: return "insts"; 210 case SECT_STUBS: return "stubs"; 211 default: return nullptr; 212 } 213 #endif //PRODUCT 214 } 215 216 int CodeBuffer::section_index_of(address addr) const { 217 for (int n = 0; n < (int)SECT_LIMIT; n++) { 218 const CodeSection* cs = code_section(n); 219 if (cs->allocates(addr)) return n; 220 } 221 return SECT_NONE; 222 } 223 224 int CodeBuffer::locator(address addr) const { 225 for (int n = 0; n < (int)SECT_LIMIT; n++) { 226 const CodeSection* cs = code_section(n); 227 if (cs->allocates(addr)) { 228 return locator(addr - cs->start(), n); 229 } 230 } 231 return -1; 232 } 233 234 235 bool CodeBuffer::is_backward_branch(Label& L) { 236 return L.is_bound() && insts_end() <= locator_address(L.loc()); 237 } 238 239 #ifndef PRODUCT 240 address CodeBuffer::decode_begin() { 241 address begin = _insts.start(); 242 if (_decode_begin != nullptr && _decode_begin > begin) 243 begin = _decode_begin; 244 return begin; 245 } 246 #endif // !PRODUCT 247 248 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 249 if (_overflow_arena == nullptr) { 250 _overflow_arena = new (mtCode) Arena(mtCode); 251 } 252 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 253 } 254 255 256 // Helper function for managing labels and their target addresses. 257 // Returns a sensible address, and if it is not the label's final 258 // address, notes the dependency (at 'branch_pc') on the label. 259 address CodeSection::target(Label& L, address branch_pc) { 260 if (L.is_bound()) { 261 int loc = L.loc(); 262 if (index() == CodeBuffer::locator_sect(loc)) { 263 return start() + CodeBuffer::locator_pos(loc); 264 } else { 265 return outer()->locator_address(loc); 266 } 267 } else { 268 assert(allocates2(branch_pc), "sanity"); 269 address base = start(); 270 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 271 L.add_patch_at(outer(), patch_loc); 272 273 // Need to return a pc, doesn't matter what it is since it will be 274 // replaced during resolution later. 275 // Don't return null or badAddress, since branches shouldn't overflow. 276 // Don't return base either because that could overflow displacements 277 // for shorter branches. It will get checked when bound. 278 return branch_pc; 279 } 280 } 281 282 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) { 283 RelocationHolder rh; 284 switch (rtype) { 285 case relocInfo::none: return; 286 case relocInfo::opt_virtual_call_type: { 287 rh = opt_virtual_call_Relocation::spec(method_index); 288 break; 289 } 290 case relocInfo::static_call_type: { 291 rh = static_call_Relocation::spec(method_index); 292 break; 293 } 294 case relocInfo::virtual_call_type: { 295 assert(method_index == 0, "resolved method overriding is not supported"); 296 rh = Relocation::spec_simple(rtype); 297 break; 298 } 299 default: { 300 rh = Relocation::spec_simple(rtype); 301 break; 302 } 303 } 304 relocate(at, rh, format); 305 } 306 307 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 308 // Do not relocate in scratch buffers. 309 if (scratch_emit()) { return; } 310 Relocation* reloc = spec.reloc(); 311 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 312 if (rtype == relocInfo::none) return; 313 314 // The assertion below has been adjusted, to also work for 315 // relocation for fixup. Sometimes we want to put relocation 316 // information for the next instruction, since it will be patched 317 // with a call. 318 assert(start() <= at && at <= end()+1, 319 "cannot relocate data outside code boundaries"); 320 321 if (!has_locs()) { 322 // no space for relocation information provided => code cannot be 323 // relocated. Make sure that relocate is only called with rtypes 324 // that can be ignored for this kind of code. 325 assert(rtype == relocInfo::none || 326 rtype == relocInfo::runtime_call_type || 327 rtype == relocInfo::internal_word_type|| 328 rtype == relocInfo::section_word_type || 329 rtype == relocInfo::external_word_type|| 330 rtype == relocInfo::barrier_type, 331 "code needs relocation information"); 332 // leave behind an indication that we attempted a relocation 333 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 334 return; 335 } 336 337 // Advance the point, noting the offset we'll have to record. 338 csize_t offset = at - locs_point(); 339 set_locs_point(at); 340 341 // Test for a couple of overflow conditions; maybe expand the buffer. 342 relocInfo* end = locs_end(); 343 relocInfo* req = end + relocInfo::length_limit; 344 // Check for (potential) overflow 345 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 346 req += (uint)offset / (uint)relocInfo::offset_limit(); 347 if (req >= locs_limit()) { 348 // Allocate or reallocate. 349 expand_locs(locs_count() + (req - end)); 350 // reload pointer 351 end = locs_end(); 352 } 353 } 354 355 // If the offset is giant, emit filler relocs, of type 'none', but 356 // each carrying the largest possible offset, to advance the locs_point. 357 while (offset >= relocInfo::offset_limit()) { 358 assert(end < locs_limit(), "adjust previous paragraph of code"); 359 *end++ = relocInfo::filler_info(); 360 offset -= relocInfo::filler_info().addr_offset(); 361 } 362 363 // If it's a simple reloc with no data, we'll just write (rtype | offset). 364 (*end) = relocInfo(rtype, offset, format); 365 366 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 367 end->initialize(this, reloc); 368 } 369 370 void CodeSection::initialize_locs(int locs_capacity) { 371 assert(_locs_start == nullptr, "only one locs init step, please"); 372 // Apply a priori lower limits to relocation size: 373 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 374 if (locs_capacity < min_locs) locs_capacity = min_locs; 375 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 376 _locs_start = locs_start; 377 _locs_end = locs_start; 378 _locs_limit = locs_start + locs_capacity; 379 _locs_own = true; 380 } 381 382 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 383 assert(_locs_start == nullptr, "do this before locs are allocated"); 384 // Internal invariant: locs buf must be fully aligned. 385 // See copy_relocations_to() below. 386 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 387 ++buf; --length; 388 } 389 if (length > 0) { 390 _locs_start = buf; 391 _locs_end = buf; 392 _locs_limit = buf + length; 393 _locs_own = false; 394 } 395 } 396 397 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 398 int lcount = source_cs->locs_count(); 399 if (lcount != 0) { 400 initialize_shared_locs(source_cs->locs_start(), lcount); 401 _locs_end = _locs_limit = _locs_start + lcount; 402 assert(is_allocated(), "must have copied code already"); 403 set_locs_point(start() + source_cs->locs_point_off()); 404 } 405 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 406 } 407 408 void CodeSection::expand_locs(int new_capacity) { 409 if (_locs_start == nullptr) { 410 initialize_locs(new_capacity); 411 return; 412 } else { 413 int old_count = locs_count(); 414 int old_capacity = locs_capacity(); 415 if (new_capacity < old_capacity * 2) 416 new_capacity = old_capacity * 2; 417 relocInfo* locs_start; 418 if (_locs_own) { 419 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 420 } else { 421 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 422 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 423 _locs_own = true; 424 } 425 _locs_start = locs_start; 426 _locs_end = locs_start + old_count; 427 _locs_limit = locs_start + new_capacity; 428 } 429 } 430 431 int CodeSection::alignment() const { 432 if (_index == CodeBuffer::SECT_CONSTS) { 433 // CodeBuffer controls the alignment of the constants section 434 return _outer->_const_section_alignment; 435 } 436 if (_index == CodeBuffer::SECT_INSTS) { 437 return (int) CodeEntryAlignment; 438 } 439 if (_index == CodeBuffer::SECT_STUBS) { 440 // CodeBuffer installer expects sections to be HeapWordSize aligned 441 return HeapWordSize; 442 } 443 ShouldNotReachHere(); 444 return 0; 445 } 446 447 /// Support for emitting the code to its final location. 448 /// The pattern is the same for all functions. 449 /// We iterate over all the sections, padding each to alignment. 450 451 csize_t CodeBuffer::total_content_size() const { 452 csize_t size_so_far = 0; 453 for (int n = 0; n < (int)SECT_LIMIT; n++) { 454 const CodeSection* cs = code_section(n); 455 if (cs->is_empty()) continue; // skip trivial section 456 size_so_far = cs->align_at_start(size_so_far); 457 size_so_far += cs->size(); 458 } 459 return size_so_far; 460 } 461 462 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 463 address buf = dest->_total_start; 464 csize_t buf_offset = 0; 465 assert(dest->_total_size >= total_content_size(), "must be big enough"); 466 assert(!_finalize_stubs, "non-finalized stubs"); 467 468 { 469 // not sure why this is here, but why not... 470 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 471 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 472 } 473 474 const CodeSection* prev_cs = nullptr; 475 CodeSection* prev_dest_cs = nullptr; 476 477 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 478 // figure compact layout of each section 479 const CodeSection* cs = code_section(n); 480 csize_t csize = cs->size(); 481 482 CodeSection* dest_cs = dest->code_section(n); 483 if (!cs->is_empty()) { 484 // Compute initial padding; assign it to the previous non-empty guy. 485 // Cf. figure_expanded_capacities. 486 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 487 if (prev_dest_cs != nullptr) { 488 if (padding != 0) { 489 buf_offset += padding; 490 prev_dest_cs->_limit += padding; 491 } 492 } else { 493 guarantee(padding == 0, "In first iteration no padding should be needed."); 494 } 495 prev_dest_cs = dest_cs; 496 prev_cs = cs; 497 } 498 499 debug_only(dest_cs->_start = nullptr); // defeat double-initialization assert 500 dest_cs->initialize(buf+buf_offset, csize); 501 dest_cs->set_end(buf+buf_offset+csize); 502 assert(dest_cs->is_allocated(), "must always be allocated"); 503 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 504 505 buf_offset += csize; 506 } 507 508 // Done calculating sections; did it come out to the right end? 509 assert(buf_offset == total_content_size(), "sanity"); 510 debug_only(dest->verify_section_allocation();) 511 } 512 513 // Append an oop reference that keeps the class alive. 514 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 515 oop cl = k->klass_holder(); 516 if (cl != nullptr && !oops->contains(cl)) { 517 oops->append(cl); 518 } 519 } 520 521 void CodeBuffer::finalize_oop_references(const methodHandle& mh) { 522 NoSafepointVerifier nsv; 523 524 GrowableArray<oop> oops; 525 526 // Make sure that immediate metadata records something in the OopRecorder 527 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 528 // pull code out of each section 529 CodeSection* cs = code_section(n); 530 if (cs->is_empty() || (cs->locs_count() == 0)) continue; // skip trivial section 531 RelocIterator iter(cs); 532 while (iter.next()) { 533 if (iter.type() == relocInfo::metadata_type) { 534 metadata_Relocation* md = iter.metadata_reloc(); 535 if (md->metadata_is_immediate()) { 536 Metadata* m = md->metadata_value(); 537 if (oop_recorder()->is_real(m)) { 538 if (m->is_methodData()) { 539 m = ((MethodData*)m)->method(); 540 } 541 if (m->is_methodCounters()) { 542 m = ((MethodCounters*)m)->method(); 543 } 544 if (m->is_method()) { 545 m = ((Method*)m)->method_holder(); 546 } 547 if (m->is_klass()) { 548 append_oop_references(&oops, (Klass*)m); 549 } else { 550 // XXX This will currently occur for MDO which don't 551 // have a backpointer. This has to be fixed later. 552 m->print(); 553 ShouldNotReachHere(); 554 } 555 } 556 } 557 } 558 } 559 } 560 561 if (!oop_recorder()->is_unused()) { 562 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 563 Metadata* m = oop_recorder()->metadata_at(i); 564 if (oop_recorder()->is_real(m)) { 565 if (m->is_methodData()) { 566 m = ((MethodData*)m)->method(); 567 } 568 if (m->is_methodCounters()) { 569 m = ((MethodCounters*)m)->method(); 570 } 571 if (m->is_method()) { 572 m = ((Method*)m)->method_holder(); 573 } 574 if (m->is_klass()) { 575 append_oop_references(&oops, (Klass*)m); 576 } else { 577 m->print(); 578 ShouldNotReachHere(); 579 } 580 } 581 } 582 583 } 584 585 // Add the class loader of Method* for the nmethod itself 586 append_oop_references(&oops, mh->method_holder()); 587 588 // Add any oops that we've found 589 Thread* thread = Thread::current(); 590 for (int i = 0; i < oops.length(); i++) { 591 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 592 } 593 } 594 595 596 597 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const { 598 csize_t size_so_far = 0; 599 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 600 const CodeSection* cur_cs = code_section(n); 601 if (!cur_cs->is_empty()) { 602 size_so_far = cur_cs->align_at_start(size_so_far); 603 } 604 if (cur_cs->index() == cs->index()) { 605 return size_so_far; 606 } 607 size_so_far += cur_cs->size(); 608 } 609 ShouldNotReachHere(); 610 return -1; 611 } 612 613 int CodeBuffer::total_skipped_instructions_size() const { 614 int total_skipped_size = 0; 615 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 616 const CodeSection* cur_cs = code_section(n); 617 if (!cur_cs->is_empty()) { 618 total_skipped_size += cur_cs->_skipped_instructions_size; 619 } 620 } 621 return total_skipped_size; 622 } 623 624 csize_t CodeBuffer::total_relocation_size() const { 625 csize_t total = copy_relocations_to(nullptr); // dry run only 626 return (csize_t) align_up(total, HeapWordSize); 627 } 628 629 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { 630 csize_t buf_offset = 0; 631 csize_t code_end_so_far = 0; 632 csize_t code_point_so_far = 0; 633 634 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 635 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 636 637 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 638 if (only_inst && (n != (int)SECT_INSTS)) { 639 // Need only relocation info for code. 640 continue; 641 } 642 // pull relocs out of each section 643 const CodeSection* cs = code_section(n); 644 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 645 if (cs->is_empty()) continue; // skip trivial section 646 relocInfo* lstart = cs->locs_start(); 647 relocInfo* lend = cs->locs_end(); 648 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 649 csize_t csize = cs->size(); 650 code_end_so_far = cs->align_at_start(code_end_so_far); 651 652 if (lsize > 0) { 653 // Figure out how to advance the combined relocation point 654 // first to the beginning of this section. 655 // We'll insert one or more filler relocs to span that gap. 656 // (Don't bother to improve this by editing the first reloc's offset.) 657 csize_t new_code_point = code_end_so_far; 658 for (csize_t jump; 659 code_point_so_far < new_code_point; 660 code_point_so_far += jump) { 661 jump = new_code_point - code_point_so_far; 662 relocInfo filler = relocInfo::filler_info(); 663 if (jump >= filler.addr_offset()) { 664 jump = filler.addr_offset(); 665 } else { // else shrink the filler to fit 666 filler = relocInfo(relocInfo::none, jump); 667 } 668 if (buf != nullptr) { 669 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 670 *(relocInfo*)(buf+buf_offset) = filler; 671 } 672 buf_offset += sizeof(filler); 673 } 674 675 // Update code point and end to skip past this section: 676 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 677 assert(code_point_so_far <= last_code_point, "sanity"); 678 code_point_so_far = last_code_point; // advance past this guy's relocs 679 } 680 code_end_so_far += csize; // advance past this guy's instructions too 681 682 // Done with filler; emit the real relocations: 683 if (buf != nullptr && lsize != 0) { 684 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 685 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 686 if (buf_offset % HeapWordSize == 0) { 687 // Use wordwise copies if possible: 688 Copy::disjoint_words((HeapWord*)lstart, 689 (HeapWord*)(buf+buf_offset), 690 (lsize + HeapWordSize-1) / HeapWordSize); 691 } else { 692 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 693 } 694 } 695 buf_offset += lsize; 696 } 697 698 // Align end of relocation info in target. 699 while (buf_offset % HeapWordSize != 0) { 700 if (buf != nullptr) { 701 relocInfo padding = relocInfo(relocInfo::none, 0); 702 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 703 *(relocInfo*)(buf+buf_offset) = padding; 704 } 705 buf_offset += sizeof(relocInfo); 706 } 707 708 assert(only_inst || code_end_so_far == total_content_size(), "sanity"); 709 710 return buf_offset; 711 } 712 713 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 714 address buf = nullptr; 715 csize_t buf_offset = 0; 716 csize_t buf_limit = 0; 717 718 if (dest != nullptr) { 719 buf = (address)dest->relocation_begin(); 720 buf_limit = (address)dest->relocation_end() - buf; 721 } 722 // if dest is null, this is just the sizing pass 723 // 724 buf_offset = copy_relocations_to(buf, buf_limit, false); 725 726 return buf_offset; 727 } 728 729 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 730 #ifndef PRODUCT 731 if (PrintNMethods && (WizardMode || Verbose)) { 732 tty->print("done with CodeBuffer:"); 733 ((CodeBuffer*)this)->print_on(tty); 734 } 735 #endif //PRODUCT 736 737 CodeBuffer dest(dest_blob); 738 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 739 this->compute_final_layout(&dest); 740 741 // Set beginning of constant table before relocating. 742 dest_blob->set_ctable_begin(dest.consts()->start()); 743 744 relocate_code_to(&dest); 745 746 // Share assembly remarks and debug strings with the blob. 747 NOT_PRODUCT(dest_blob->use_remarks(_asm_remarks)); 748 NOT_PRODUCT(dest_blob->use_strings(_dbg_strings)); 749 750 // Done moving code bytes; were they the right size? 751 assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 752 753 // Flush generated code 754 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 755 } 756 757 // Move all my code into another code buffer. Consult applicable 758 // relocs to repair embedded addresses. The layout in the destination 759 // CodeBuffer is different to the source CodeBuffer: the destination 760 // CodeBuffer gets the final layout (consts, insts, stubs in order of 761 // ascending address). 762 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 763 address dest_end = dest->_total_start + dest->_total_size; 764 address dest_filled = nullptr; 765 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 766 // pull code out of each section 767 const CodeSection* cs = code_section(n); 768 if (cs->is_empty()) continue; // skip trivial section 769 CodeSection* dest_cs = dest->code_section(n); 770 assert(cs->size() == dest_cs->size(), "sanity"); 771 csize_t usize = dest_cs->size(); 772 csize_t wsize = align_up(usize, HeapWordSize); 773 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 774 // Copy the code as aligned machine words. 775 // This may also include an uninitialized partial word at the end. 776 Copy::disjoint_words((HeapWord*)cs->start(), 777 (HeapWord*)dest_cs->start(), 778 wsize / HeapWordSize); 779 780 if (dest->blob() == nullptr) { 781 // Destination is a final resting place, not just another buffer. 782 // Normalize uninitialized bytes in the final padding. 783 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 784 Assembler::code_fill_byte()); 785 } 786 // Keep track of the highest filled address 787 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 788 789 assert(cs->locs_start() != (relocInfo*)badAddress, 790 "this section carries no reloc storage, but reloc was attempted"); 791 792 // Make the new code copy use the old copy's relocations: 793 dest_cs->initialize_locs_from(cs); 794 } 795 796 // Do relocation after all sections are copied. 797 // This is necessary if the code uses constants in stubs, which are 798 // relocated when the corresponding instruction in the code (e.g., a 799 // call) is relocated. Stubs are placed behind the main code 800 // section, so that section has to be copied before relocating. 801 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 802 CodeSection* dest_cs = dest->code_section(n); 803 if (dest_cs->is_empty() || (dest_cs->locs_count() == 0)) continue; // skip trivial section 804 { // Repair the pc relative information in the code after the move 805 RelocIterator iter(dest_cs); 806 while (iter.next()) { 807 iter.reloc()->fix_relocation_after_move(this, dest); 808 } 809 } 810 } 811 812 if (dest->blob() == nullptr && dest_filled != nullptr) { 813 // Destination is a final resting place, not just another buffer. 814 // Normalize uninitialized bytes in the final padding. 815 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 816 Assembler::code_fill_byte()); 817 818 } 819 } 820 821 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 822 csize_t amount, 823 csize_t* new_capacity) { 824 csize_t new_total_cap = 0; 825 826 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 827 const CodeSection* sect = code_section(n); 828 829 if (!sect->is_empty()) { 830 // Compute initial padding; assign it to the previous section, 831 // even if it's empty (e.g. consts section can be empty). 832 // Cf. compute_final_layout 833 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 834 if (padding != 0) { 835 new_total_cap += padding; 836 assert(n - 1 >= SECT_FIRST, "sanity"); 837 new_capacity[n - 1] += padding; 838 } 839 } 840 841 csize_t exp = sect->size(); // 100% increase 842 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 843 if (sect == which_cs) { 844 if (exp < amount) exp = amount; 845 if (StressCodeBuffers) exp = amount; // expand only slightly 846 } else if (n == SECT_INSTS) { 847 // scale down inst increases to a more modest 25% 848 exp = 4*K + ((exp - 4*K) >> 2); 849 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 850 } else if (sect->is_empty()) { 851 // do not grow an empty secondary section 852 exp = 0; 853 } 854 // Allow for inter-section slop: 855 exp += CodeSection::end_slop(); 856 csize_t new_cap = sect->size() + exp; 857 if (new_cap < sect->capacity()) { 858 // No need to expand after all. 859 new_cap = sect->capacity(); 860 } 861 new_capacity[n] = new_cap; 862 new_total_cap += new_cap; 863 } 864 865 return new_total_cap; 866 } 867 868 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 869 #ifndef PRODUCT 870 if (PrintNMethods && (WizardMode || Verbose)) { 871 tty->print("expanding CodeBuffer:"); 872 this->print_on(tty); 873 } 874 875 if (StressCodeBuffers && blob() != nullptr) { 876 static int expand_count = 0; 877 if (expand_count >= 0) expand_count += 1; 878 if (expand_count > 100 && is_power_of_2(expand_count)) { 879 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 880 // simulate an occasional allocation failure: 881 free_blob(); 882 } 883 } 884 #endif //PRODUCT 885 886 // Resizing must be allowed 887 { 888 if (blob() == nullptr) return; // caller must check if blob is null 889 } 890 891 // Figure new capacity for each section. 892 csize_t new_capacity[SECT_LIMIT]; 893 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT); 894 csize_t new_total_cap 895 = figure_expanded_capacities(which_cs, amount, new_capacity); 896 897 // Create a new (temporary) code buffer to hold all the new data 898 CodeBuffer cb(name(), new_total_cap, 0); 899 if (cb.blob() == nullptr) { 900 // Failed to allocate in code cache. 901 free_blob(); 902 return; 903 } 904 905 // Create an old code buffer to remember which addresses used to go where. 906 // This will be useful when we do final assembly into the code cache, 907 // because we will need to know how to warp any internal address that 908 // has been created at any time in this CodeBuffer's past. 909 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 910 bxp->take_over_code_from(this); // remember the old undersized blob 911 DEBUG_ONLY(this->_blob = nullptr); // silence a later assert 912 bxp->_before_expand = this->_before_expand; 913 this->_before_expand = bxp; 914 915 // Give each section its required (expanded) capacity. 916 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 917 CodeSection* cb_sect = cb.code_section(n); 918 CodeSection* this_sect = code_section(n); 919 if (new_capacity[n] == 0) continue; // already nulled out 920 if (n != SECT_INSTS) { 921 cb.initialize_section_size(cb_sect, new_capacity[n]); 922 } 923 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 924 address cb_start = cb_sect->start(); 925 cb_sect->set_end(cb_start + this_sect->size()); 926 if (this_sect->mark() == nullptr) { 927 cb_sect->clear_mark(); 928 } else { 929 cb_sect->set_mark(cb_start + this_sect->mark_off()); 930 } 931 } 932 933 // Needs to be initialized when calling fix_relocation_after_move. 934 cb.blob()->set_ctable_begin(cb.consts()->start()); 935 936 // Move all the code and relocations to the new blob: 937 relocate_code_to(&cb); 938 939 // some internal addresses, _last_insn _last_label, are used during code emission, 940 // adjust them in expansion 941 adjust_internal_address(insts_begin(), cb.insts_begin()); 942 943 // Copy the temporary code buffer into the current code buffer. 944 // Basically, do {*this = cb}, except for some control information. 945 this->take_over_code_from(&cb); 946 cb.set_blob(nullptr); 947 948 // Zap the old code buffer contents, to avoid mistakenly using them. 949 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 950 badCodeHeapFreeVal);) 951 952 // Make certain that the new sections are all snugly inside the new blob. 953 debug_only(verify_section_allocation();) 954 955 #ifndef PRODUCT 956 _decode_begin = nullptr; // sanity 957 if (PrintNMethods && (WizardMode || Verbose)) { 958 tty->print("expanded CodeBuffer:"); 959 this->print_on(tty); 960 } 961 #endif //PRODUCT 962 } 963 964 void CodeBuffer::adjust_internal_address(address from, address to) { 965 if (_last_insn != nullptr) { 966 _last_insn += to - from; 967 } 968 if (_last_label != nullptr) { 969 _last_label += to - from; 970 } 971 } 972 973 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 974 // Must already have disposed of the old blob somehow. 975 assert(blob() == nullptr, "must be empty"); 976 // Take the new blob away from cb. 977 set_blob(cb->blob()); 978 // Take over all the section pointers. 979 for (int n = 0; n < (int)SECT_LIMIT; n++) { 980 CodeSection* cb_sect = cb->code_section(n); 981 CodeSection* this_sect = code_section(n); 982 this_sect->take_over_code_from(cb_sect); 983 } 984 // Make sure the old cb won't try to use it or free it. 985 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 986 } 987 988 void CodeBuffer::verify_section_allocation() { 989 address tstart = _total_start; 990 if (tstart == badAddress) return; // smashed by set_blob(nullptr) 991 address tend = tstart + _total_size; 992 if (_blob != nullptr) { 993 guarantee(tstart >= _blob->content_begin(), "sanity"); 994 guarantee(tend <= _blob->content_end(), "sanity"); 995 } 996 // Verify disjointness. 997 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 998 CodeSection* sect = code_section(n); 999 if (!sect->is_allocated() || sect->is_empty()) { 1000 continue; 1001 } 1002 guarantee(_blob == nullptr || is_aligned(sect->start(), sect->alignment()), 1003 "start is aligned"); 1004 for (int m = n + 1; m < (int) SECT_LIMIT; m++) { 1005 CodeSection* other = code_section(m); 1006 if (!other->is_allocated() || other == sect) { 1007 continue; 1008 } 1009 guarantee(other->disjoint(sect), "sanity"); 1010 } 1011 guarantee(sect->end() <= tend, "sanity, sect_end: " PTR_FORMAT " tend: " PTR_FORMAT " size: %d", p2i(sect->end()), p2i(tend), (int)_total_size); 1012 guarantee(sect->end() <= sect->limit(), "sanity, sect_end: " PTR_FORMAT " sect_limit: " PTR_FORMAT, p2i(sect->end()), p2i(sect->limit())); 1013 } 1014 } 1015 1016 void CodeBuffer::log_section_sizes(const char* name) { 1017 if (xtty != nullptr) { 1018 ttyLocker ttyl; 1019 // log info about buffer usage 1020 xtty->head("blob name='%s' total_size='%d'", name, _total_size); 1021 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 1022 CodeSection* sect = code_section(n); 1023 if (!sect->is_allocated() || sect->is_empty()) continue; 1024 xtty->elem("sect index='%d' capacity='%d' size='%d' remaining='%d'", 1025 n, sect->capacity(), sect->size(), sect->remaining()); 1026 } 1027 xtty->tail("blob"); 1028 } 1029 } 1030 1031 bool CodeBuffer::finalize_stubs() { 1032 if (_finalize_stubs && !pd_finalize_stubs()) { 1033 // stub allocation failure 1034 return false; 1035 } 1036 _finalize_stubs = false; 1037 return true; 1038 } 1039 1040 void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) { 1041 if (_shared_stub_to_interp_requests == nullptr) { 1042 _shared_stub_to_interp_requests = new SharedStubToInterpRequests(8); 1043 } 1044 SharedStubToInterpRequest request(callee, call_offset); 1045 _shared_stub_to_interp_requests->push(request); 1046 _finalize_stubs = true; 1047 } 1048 1049 #ifndef PRODUCT 1050 void CodeBuffer::block_comment(ptrdiff_t offset, const char* comment) { 1051 if (_collect_comments) { 1052 const char* str = _asm_remarks.insert(offset, comment); 1053 postcond(str != comment); 1054 } 1055 } 1056 1057 const char* CodeBuffer::code_string(const char* str) { 1058 const char* tmp = _dbg_strings.insert(str); 1059 postcond(tmp != str); 1060 return tmp; 1061 } 1062 1063 void CodeBuffer::decode() { 1064 ttyLocker ttyl; 1065 Disassembler::decode(decode_begin(), insts_end(), tty NOT_PRODUCT(COMMA &asm_remarks())); 1066 _decode_begin = insts_end(); 1067 } 1068 1069 void CodeSection::print_on(outputStream* st, const char* name) { 1070 csize_t locs_size = locs_end() - locs_start(); 1071 st->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)", 1072 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity()); 1073 st->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1074 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1075 if (PrintRelocations && (locs_size != 0)) { 1076 RelocIterator iter(this); 1077 iter.print_on(st); 1078 } 1079 } 1080 1081 void CodeBuffer::print_on(outputStream* st) { 1082 #if 0 1083 if (this == nullptr) { // gcc complains 'nonnull' argument 'this' compared to NULL 1084 st->print_cr("null CodeBuffer pointer"); 1085 return; 1086 } 1087 #endif 1088 1089 st->print_cr("CodeBuffer:%s", name()); 1090 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1091 // print each section 1092 CodeSection* cs = code_section(n); 1093 cs->print_on(st, code_section_name(n)); 1094 } 1095 } 1096 1097 // ----- CHeapString ----------------------------------------------------------- 1098 1099 class CHeapString : public CHeapObj<mtCode> { 1100 public: 1101 CHeapString(const char* str) : _string(os::strdup(str)) {} 1102 ~CHeapString() { 1103 os::free((void*)_string); 1104 _string = nullptr; 1105 } 1106 const char* string() const { return _string; } 1107 1108 private: 1109 const char* _string; 1110 }; 1111 1112 // ----- AsmRemarkCollection --------------------------------------------------- 1113 1114 class AsmRemarkCollection : public CHeapObj<mtCode> { 1115 public: 1116 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {} 1117 ~AsmRemarkCollection() { 1118 assert(is_empty(), "Must 'clear()' before deleting!"); 1119 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 1120 } 1121 AsmRemarkCollection* reuse() { 1122 precond(_ref_cnt > 0); 1123 return _ref_cnt++, this; 1124 } 1125 1126 const char* insert(uint offset, const char* remark); 1127 const char* lookup(uint offset) const; 1128 const char* next(uint offset) const; 1129 1130 bool is_empty() const { return _remarks == nullptr; } 1131 uint clear(); 1132 1133 private: 1134 struct Cell : CHeapString { 1135 Cell(const char* remark, uint offset) : 1136 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {} 1137 void push_back(Cell* cell) { 1138 Cell* head = this; 1139 Cell* tail = prev; 1140 tail->next = cell; 1141 cell->next = head; 1142 cell->prev = tail; 1143 prev = cell; 1144 } 1145 uint offset; 1146 Cell* prev; 1147 Cell* next; 1148 }; 1149 uint _ref_cnt; 1150 Cell* _remarks; 1151 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that 1152 // does not change the state of the list per se), supportig a simplistic 1153 // iteration scheme. 1154 mutable Cell* _next; 1155 }; 1156 1157 // ----- DbgStringCollection --------------------------------------------------- 1158 1159 class DbgStringCollection : public CHeapObj<mtCode> { 1160 public: 1161 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {} 1162 ~DbgStringCollection() { 1163 assert(is_empty(), "Must 'clear()' before deleting!"); 1164 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 1165 } 1166 DbgStringCollection* reuse() { 1167 precond(_ref_cnt > 0); 1168 return _ref_cnt++, this; 1169 } 1170 1171 const char* insert(const char* str); 1172 const char* lookup(const char* str) const; 1173 1174 bool is_empty() const { return _strings == nullptr; } 1175 uint clear(); 1176 1177 private: 1178 struct Cell : CHeapString { 1179 Cell(const char* dbgstr) : 1180 CHeapString(dbgstr), prev(nullptr), next(nullptr) {} 1181 void push_back(Cell* cell) { 1182 Cell* head = this; 1183 Cell* tail = prev; 1184 tail->next = cell; 1185 cell->next = head; 1186 cell->prev = tail; 1187 prev = cell; 1188 } 1189 Cell* prev; 1190 Cell* next; 1191 }; 1192 uint _ref_cnt; 1193 Cell* _strings; 1194 }; 1195 1196 // ----- AsmRemarks ------------------------------------------------------------ 1197 // 1198 // Acting as interface to reference counted mapping [offset -> remark], where 1199 // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or 1200 // other memory buffer) and remark is a string (comment). 1201 // 1202 AsmRemarks::AsmRemarks() : _remarks(new AsmRemarkCollection()) { 1203 assert(_remarks != nullptr, "Allocation failure!"); 1204 } 1205 1206 AsmRemarks::~AsmRemarks() { 1207 assert(_remarks == nullptr, "Must 'clear()' before deleting!"); 1208 } 1209 1210 const char* AsmRemarks::insert(uint offset, const char* remstr) { 1211 precond(remstr != nullptr); 1212 return _remarks->insert(offset, remstr); 1213 } 1214 1215 bool AsmRemarks::is_empty() const { 1216 return _remarks->is_empty(); 1217 } 1218 1219 void AsmRemarks::share(const AsmRemarks &src) { 1220 precond(is_empty()); 1221 clear(); 1222 _remarks = src._remarks->reuse(); 1223 } 1224 1225 void AsmRemarks::clear() { 1226 if (_remarks->clear() == 0) { 1227 delete _remarks; 1228 } 1229 _remarks = nullptr; 1230 } 1231 1232 uint AsmRemarks::print(uint offset, outputStream* strm) const { 1233 uint count = 0; 1234 const char* prefix = " ;; "; 1235 const char* remstr = _remarks->lookup(offset); 1236 while (remstr != nullptr) { 1237 strm->bol(); 1238 strm->print("%s", prefix); 1239 // Don't interpret as format strings since it could contain '%'. 1240 strm->print_raw(remstr); 1241 // Advance to next line iff string didn't contain a cr() at the end. 1242 strm->bol(); 1243 remstr = _remarks->next(offset); 1244 count++; 1245 } 1246 return count; 1247 } 1248 1249 // ----- DbgStrings ------------------------------------------------------------ 1250 // 1251 // Acting as interface to reference counted collection of (debug) strings used 1252 // in the code generated, and thus requiring a fixed address. 1253 // 1254 DbgStrings::DbgStrings() : _strings(new DbgStringCollection()) { 1255 assert(_strings != nullptr, "Allocation failure!"); 1256 } 1257 1258 DbgStrings::~DbgStrings() { 1259 assert(_strings == nullptr, "Must 'clear()' before deleting!"); 1260 } 1261 1262 const char* DbgStrings::insert(const char* dbgstr) { 1263 const char* str = _strings->lookup(dbgstr); 1264 return str != nullptr ? str : _strings->insert(dbgstr); 1265 } 1266 1267 bool DbgStrings::is_empty() const { 1268 return _strings->is_empty(); 1269 } 1270 1271 void DbgStrings::share(const DbgStrings &src) { 1272 precond(is_empty()); 1273 clear(); 1274 _strings = src._strings->reuse(); 1275 } 1276 1277 void DbgStrings::clear() { 1278 if (_strings->clear() == 0) { 1279 delete _strings; 1280 } 1281 _strings = nullptr; 1282 } 1283 1284 // ----- AsmRemarkCollection --------------------------------------------------- 1285 1286 const char* AsmRemarkCollection::insert(uint offset, const char* remstr) { 1287 precond(remstr != nullptr); 1288 Cell* cell = new Cell { remstr, offset }; 1289 if (is_empty()) { 1290 cell->prev = cell; 1291 cell->next = cell; 1292 _remarks = cell; 1293 } else { 1294 _remarks->push_back(cell); 1295 } 1296 return cell->string(); 1297 } 1298 1299 const char* AsmRemarkCollection::lookup(uint offset) const { 1300 _next = _remarks; 1301 return next(offset); 1302 } 1303 1304 const char* AsmRemarkCollection::next(uint offset) const { 1305 if (_next != nullptr) { 1306 Cell* i = _next; 1307 do { 1308 if (i->offset == offset) { 1309 _next = i->next == _remarks ? nullptr : i->next; 1310 return i->string(); 1311 } 1312 i = i->next; 1313 } while (i != _remarks); 1314 _next = nullptr; 1315 } 1316 return nullptr; 1317 } 1318 1319 uint AsmRemarkCollection::clear() { 1320 precond(_ref_cnt > 0); 1321 if (--_ref_cnt > 0) { 1322 return _ref_cnt; 1323 } 1324 if (!is_empty()) { 1325 uint count = 0; 1326 Cell* i = _remarks; 1327 do { 1328 Cell* next = i->next; 1329 delete i; 1330 i = next; 1331 count++; 1332 } while (i != _remarks); 1333 1334 log_debug(codestrings)("Clear %u asm-remark%s.", count, count == 1 ? "" : "s"); 1335 _remarks = nullptr; 1336 } 1337 return 0; // i.e. _ref_cnt == 0 1338 } 1339 1340 // ----- DbgStringCollection --------------------------------------------------- 1341 1342 const char* DbgStringCollection::insert(const char* dbgstr) { 1343 precond(dbgstr != nullptr); 1344 Cell* cell = new Cell { dbgstr }; 1345 1346 if (is_empty()) { 1347 cell->prev = cell; 1348 cell->next = cell; 1349 _strings = cell; 1350 } else { 1351 _strings->push_back(cell); 1352 } 1353 return cell->string(); 1354 } 1355 1356 const char* DbgStringCollection::lookup(const char* dbgstr) const { 1357 precond(dbgstr != nullptr); 1358 if (_strings != nullptr) { 1359 Cell* i = _strings; 1360 do { 1361 if (strcmp(i->string(), dbgstr) == 0) { 1362 return i->string(); 1363 } 1364 i = i->next; 1365 } while (i != _strings); 1366 } 1367 return nullptr; 1368 } 1369 1370 uint DbgStringCollection::clear() { 1371 precond(_ref_cnt > 0); 1372 if (--_ref_cnt > 0) { 1373 return _ref_cnt; 1374 } 1375 if (!is_empty()) { 1376 uint count = 0; 1377 Cell* i = _strings; 1378 do { 1379 Cell* next = i->next; 1380 delete i; 1381 i = next; 1382 count++; 1383 } while (i != _strings); 1384 1385 log_debug(codestrings)("Clear %u dbg-string%s.", count, count == 1 ? "" : "s"); 1386 _strings = nullptr; 1387 } 1388 return 0; // i.e. _ref_cnt == 0 1389 } 1390 1391 #endif // not PRODUCT