1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/codeBuffer.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/oopRecorder.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "logging/log.hpp" 30 #include "oops/klass.inline.hpp" 31 #include "oops/methodCounters.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/icache.hpp" 35 #include "runtime/safepointVerifiers.hpp" 36 #include "utilities/align.hpp" 37 #include "utilities/copy.hpp" 38 #include "utilities/powerOfTwo.hpp" 39 #include "utilities/xmlstream.hpp" 40 41 // The structure of a CodeSection: 42 // 43 // _start -> +----------------+ 44 // | machine code...| 45 // _end -> |----------------| 46 // | | 47 // | (empty) | 48 // | | 49 // | | 50 // +----------------+ 51 // _limit -> | | 52 // 53 // _locs_start -> +----------------+ 54 // |reloc records...| 55 // |----------------| 56 // _locs_end -> | | 57 // | | 58 // | (empty) | 59 // | | 60 // | | 61 // +----------------+ 62 // _locs_limit -> | | 63 // The _end (resp. _limit) pointer refers to the first 64 // unused (resp. unallocated) byte. 65 66 // The structure of the CodeBuffer while code is being accumulated: 67 // 68 // _total_start -> \ 69 // _consts._start -> +----------------+ 70 // | | 71 // | Constants | 72 // | | 73 // _insts._start -> |----------------| 74 // | | 75 // | Code | 76 // | | 77 // _stubs._start -> |----------------| 78 // | | 79 // | Stubs | (also handlers for deopt/exception) 80 // | | 81 // +----------------+ 82 // + _total_size -> | | 83 // 84 // When the code and relocations are copied to the code cache, 85 // the empty parts of each section are removed, and everything 86 // is copied into contiguous locations. 87 88 typedef CodeBuffer::csize_t csize_t; // file-local definition 89 90 // External buffer, in a predefined CodeBlob. 91 // Important: The code_start must be taken exactly, and not realigned. 92 CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) { 93 // Provide code buffer with meaningful name 94 initialize_misc(blob->name()); 95 initialize(blob->content_begin(), blob->content_size()); 96 debug_only(verify_section_allocation();) 97 } 98 99 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 100 // Always allow for empty slop around each section. 101 int slop = (int) CodeSection::end_slop(); 102 103 assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments"); 104 int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop; 105 106 assert(blob() == nullptr, "only once"); 107 set_blob(BufferBlob::create(_name, total_size)); 108 if (blob() == nullptr) { 109 // The assembler constructor will throw a fatal on an empty CodeBuffer. 110 return; // caller must test this 111 } 112 113 // Set up various pointers into the blob. 114 initialize(_total_start, _total_size); 115 116 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 117 118 pd_initialize(); 119 120 if (locs_size != 0) { 121 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 122 } 123 124 debug_only(verify_section_allocation();) 125 } 126 127 128 CodeBuffer::~CodeBuffer() { 129 verify_section_allocation(); 130 131 // If we allocated our code buffer from the CodeCache via a BufferBlob, and 132 // it's not permanent, then free the BufferBlob. The rest of the memory 133 // will be freed when the ResourceObj is released. 134 for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) { 135 // Previous incarnations of this buffer are held live, so that internal 136 // addresses constructed before expansions will not be confused. 137 cb->free_blob(); 138 } 139 if (_overflow_arena != nullptr) { 140 // free any overflow storage 141 delete _overflow_arena; 142 } 143 if (_shared_trampoline_requests != nullptr) { 144 delete _shared_trampoline_requests; 145 } 146 147 NOT_PRODUCT(clear_strings()); 148 } 149 150 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 151 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 152 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 153 _oop_recorder = r; 154 } 155 156 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 157 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 158 csize_t slop = CodeSection::end_slop(); // margin between sections 159 int align = cs->alignment(); 160 assert(is_power_of_2(align), "sanity"); 161 address start = _insts._start; 162 address limit = _insts._limit; 163 address middle = limit - size; 164 middle -= (intptr_t)middle & (align-1); // align the division point downward 165 guarantee(middle - slop > start, "need enough space to divide up"); 166 _insts._limit = middle - slop; // subtract desired space, plus slop 167 cs->initialize(middle, limit - middle); 168 assert(cs->start() == middle, "sanity"); 169 assert(cs->limit() == limit, "sanity"); 170 // give it some relocations to start with, if the main section has them 171 if (_insts.has_locs()) cs->initialize_locs(1); 172 } 173 174 void CodeBuffer::set_blob(BufferBlob* blob) { 175 _blob = blob; 176 if (blob != nullptr) { 177 address start = blob->content_begin(); 178 address end = blob->content_end(); 179 // Round up the starting address. 180 int align = _insts.alignment(); 181 start += (-(intptr_t)start) & (align-1); 182 _total_start = start; 183 _total_size = end - start; 184 } else { 185 #ifdef ASSERT 186 // Clean out dangling pointers. 187 _total_start = badAddress; 188 _consts._start = _consts._end = badAddress; 189 _insts._start = _insts._end = badAddress; 190 _stubs._start = _stubs._end = badAddress; 191 #endif //ASSERT 192 } 193 } 194 195 void CodeBuffer::free_blob() { 196 if (_blob != nullptr) { 197 BufferBlob::free(_blob); 198 set_blob(nullptr); 199 } 200 } 201 202 const char* CodeBuffer::code_section_name(int n) { 203 #ifdef PRODUCT 204 return nullptr; 205 #else //PRODUCT 206 switch (n) { 207 case SECT_CONSTS: return "consts"; 208 case SECT_INSTS: return "insts"; 209 case SECT_STUBS: return "stubs"; 210 default: return nullptr; 211 } 212 #endif //PRODUCT 213 } 214 215 int CodeBuffer::section_index_of(address addr) const { 216 for (int n = 0; n < (int)SECT_LIMIT; n++) { 217 const CodeSection* cs = code_section(n); 218 if (cs->allocates(addr)) return n; 219 } 220 return SECT_NONE; 221 } 222 223 int CodeBuffer::locator(address addr) const { 224 for (int n = 0; n < (int)SECT_LIMIT; n++) { 225 const CodeSection* cs = code_section(n); 226 if (cs->allocates(addr)) { 227 return locator(addr - cs->start(), n); 228 } 229 } 230 return -1; 231 } 232 233 234 bool CodeBuffer::is_backward_branch(Label& L) { 235 return L.is_bound() && insts_end() <= locator_address(L.loc()); 236 } 237 238 #ifndef PRODUCT 239 address CodeBuffer::decode_begin() { 240 address begin = _insts.start(); 241 if (_decode_begin != nullptr && _decode_begin > begin) 242 begin = _decode_begin; 243 return begin; 244 } 245 #endif // !PRODUCT 246 247 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 248 if (_overflow_arena == nullptr) { 249 _overflow_arena = new (mtCode) Arena(mtCode); 250 } 251 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 252 } 253 254 255 // Helper function for managing labels and their target addresses. 256 // Returns a sensible address, and if it is not the label's final 257 // address, notes the dependency (at 'branch_pc') on the label. 258 address CodeSection::target(Label& L, address branch_pc) { 259 if (L.is_bound()) { 260 int loc = L.loc(); 261 if (index() == CodeBuffer::locator_sect(loc)) { 262 return start() + CodeBuffer::locator_pos(loc); 263 } else { 264 return outer()->locator_address(loc); 265 } 266 } else { 267 assert(allocates2(branch_pc), "sanity"); 268 address base = start(); 269 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 270 L.add_patch_at(outer(), patch_loc); 271 272 // Need to return a pc, doesn't matter what it is since it will be 273 // replaced during resolution later. 274 // Don't return null or badAddress, since branches shouldn't overflow. 275 // Don't return base either because that could overflow displacements 276 // for shorter branches. It will get checked when bound. 277 return branch_pc; 278 } 279 } 280 281 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) { 282 RelocationHolder rh; 283 switch (rtype) { 284 case relocInfo::none: return; 285 case relocInfo::opt_virtual_call_type: { 286 rh = opt_virtual_call_Relocation::spec(method_index); 287 break; 288 } 289 case relocInfo::static_call_type: { 290 rh = static_call_Relocation::spec(method_index); 291 break; 292 } 293 case relocInfo::virtual_call_type: { 294 assert(method_index == 0, "resolved method overriding is not supported"); 295 rh = Relocation::spec_simple(rtype); 296 break; 297 } 298 default: { 299 rh = Relocation::spec_simple(rtype); 300 break; 301 } 302 } 303 relocate(at, rh, format); 304 } 305 306 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 307 // Do not relocate in scratch buffers. 308 if (scratch_emit()) { return; } 309 Relocation* reloc = spec.reloc(); 310 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 311 if (rtype == relocInfo::none) return; 312 313 // The assertion below has been adjusted, to also work for 314 // relocation for fixup. Sometimes we want to put relocation 315 // information for the next instruction, since it will be patched 316 // with a call. 317 assert(start() <= at && at <= end()+1, 318 "cannot relocate data outside code boundaries"); 319 320 if (!has_locs()) { 321 // no space for relocation information provided => code cannot be 322 // relocated. Make sure that relocate is only called with rtypes 323 // that can be ignored for this kind of code. 324 assert(rtype == relocInfo::none || 325 rtype == relocInfo::runtime_call_type || 326 rtype == relocInfo::internal_word_type|| 327 rtype == relocInfo::section_word_type || 328 rtype == relocInfo::external_word_type|| 329 rtype == relocInfo::barrier_type, 330 "code needs relocation information"); 331 // leave behind an indication that we attempted a relocation 332 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 333 return; 334 } 335 336 // Advance the point, noting the offset we'll have to record. 337 csize_t offset = at - locs_point(); 338 set_locs_point(at); 339 340 // Test for a couple of overflow conditions; maybe expand the buffer. 341 relocInfo* end = locs_end(); 342 relocInfo* req = end + relocInfo::length_limit; 343 // Check for (potential) overflow 344 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 345 req += (uint)offset / (uint)relocInfo::offset_limit(); 346 if (req >= locs_limit()) { 347 // Allocate or reallocate. 348 expand_locs(locs_count() + (req - end)); 349 // reload pointer 350 end = locs_end(); 351 } 352 } 353 354 // If the offset is giant, emit filler relocs, of type 'none', but 355 // each carrying the largest possible offset, to advance the locs_point. 356 while (offset >= relocInfo::offset_limit()) { 357 assert(end < locs_limit(), "adjust previous paragraph of code"); 358 *end++ = relocInfo::filler_info(); 359 offset -= relocInfo::filler_info().addr_offset(); 360 } 361 362 // If it's a simple reloc with no data, we'll just write (rtype | offset). 363 (*end) = relocInfo(rtype, offset, format); 364 365 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 366 end->initialize(this, reloc); 367 } 368 369 void CodeSection::initialize_locs(int locs_capacity) { 370 assert(_locs_start == nullptr, "only one locs init step, please"); 371 // Apply a priori lower limits to relocation size: 372 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 373 if (locs_capacity < min_locs) locs_capacity = min_locs; 374 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 375 _locs_start = locs_start; 376 _locs_end = locs_start; 377 _locs_limit = locs_start + locs_capacity; 378 _locs_own = true; 379 } 380 381 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 382 assert(_locs_start == nullptr, "do this before locs are allocated"); 383 // Internal invariant: locs buf must be fully aligned. 384 // See copy_relocations_to() below. 385 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 386 ++buf; --length; 387 } 388 if (length > 0) { 389 _locs_start = buf; 390 _locs_end = buf; 391 _locs_limit = buf + length; 392 _locs_own = false; 393 } 394 } 395 396 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 397 int lcount = source_cs->locs_count(); 398 if (lcount != 0) { 399 initialize_shared_locs(source_cs->locs_start(), lcount); 400 _locs_end = _locs_limit = _locs_start + lcount; 401 assert(is_allocated(), "must have copied code already"); 402 set_locs_point(start() + source_cs->locs_point_off()); 403 } 404 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 405 } 406 407 void CodeSection::expand_locs(int new_capacity) { 408 if (_locs_start == nullptr) { 409 initialize_locs(new_capacity); 410 return; 411 } else { 412 int old_count = locs_count(); 413 int old_capacity = locs_capacity(); 414 if (new_capacity < old_capacity * 2) 415 new_capacity = old_capacity * 2; 416 relocInfo* locs_start; 417 if (_locs_own) { 418 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 419 } else { 420 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 421 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 422 _locs_own = true; 423 } 424 _locs_start = locs_start; 425 _locs_end = locs_start + old_count; 426 _locs_limit = locs_start + new_capacity; 427 } 428 } 429 430 int CodeSection::alignment() const { 431 if (_index == CodeBuffer::SECT_CONSTS) { 432 // CodeBuffer controls the alignment of the constants section 433 return _outer->_const_section_alignment; 434 } 435 if (_index == CodeBuffer::SECT_INSTS) { 436 return (int) CodeEntryAlignment; 437 } 438 if (_index == CodeBuffer::SECT_STUBS) { 439 // CodeBuffer installer expects sections to be HeapWordSize aligned 440 return HeapWordSize; 441 } 442 ShouldNotReachHere(); 443 return 0; 444 } 445 446 /// Support for emitting the code to its final location. 447 /// The pattern is the same for all functions. 448 /// We iterate over all the sections, padding each to alignment. 449 450 csize_t CodeBuffer::total_content_size() const { 451 csize_t size_so_far = 0; 452 for (int n = 0; n < (int)SECT_LIMIT; n++) { 453 const CodeSection* cs = code_section(n); 454 if (cs->is_empty()) continue; // skip trivial section 455 size_so_far = cs->align_at_start(size_so_far); 456 size_so_far += cs->size(); 457 } 458 return size_so_far; 459 } 460 461 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 462 address buf = dest->_total_start; 463 csize_t buf_offset = 0; 464 assert(dest->_total_size >= total_content_size(), "must be big enough"); 465 assert(!_finalize_stubs, "non-finalized stubs"); 466 467 { 468 // not sure why this is here, but why not... 469 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 470 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 471 } 472 473 const CodeSection* prev_cs = nullptr; 474 CodeSection* prev_dest_cs = nullptr; 475 476 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 477 // figure compact layout of each section 478 const CodeSection* cs = code_section(n); 479 csize_t csize = cs->size(); 480 481 CodeSection* dest_cs = dest->code_section(n); 482 if (!cs->is_empty()) { 483 // Compute initial padding; assign it to the previous non-empty guy. 484 // Cf. figure_expanded_capacities. 485 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 486 if (prev_dest_cs != nullptr) { 487 if (padding != 0) { 488 buf_offset += padding; 489 prev_dest_cs->_limit += padding; 490 } 491 } else { 492 guarantee(padding == 0, "In first iteration no padding should be needed."); 493 } 494 prev_dest_cs = dest_cs; 495 prev_cs = cs; 496 } 497 498 debug_only(dest_cs->_start = nullptr); // defeat double-initialization assert 499 dest_cs->initialize(buf+buf_offset, csize); 500 dest_cs->set_end(buf+buf_offset+csize); 501 assert(dest_cs->is_allocated(), "must always be allocated"); 502 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 503 504 buf_offset += csize; 505 } 506 507 // Done calculating sections; did it come out to the right end? 508 assert(buf_offset == total_content_size(), "sanity"); 509 debug_only(dest->verify_section_allocation();) 510 } 511 512 // Append an oop reference that keeps the class alive. 513 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 514 oop cl = k->klass_holder(); 515 if (cl != nullptr && !oops->contains(cl)) { 516 oops->append(cl); 517 } 518 } 519 520 void CodeBuffer::finalize_oop_references(const methodHandle& mh) { 521 NoSafepointVerifier nsv; 522 523 GrowableArray<oop> oops; 524 525 // Make sure that immediate metadata records something in the OopRecorder 526 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 527 // pull code out of each section 528 CodeSection* cs = code_section(n); 529 if (cs->is_empty() || (cs->locs_count() == 0)) continue; // skip trivial section 530 RelocIterator iter(cs); 531 while (iter.next()) { 532 if (iter.type() == relocInfo::metadata_type) { 533 metadata_Relocation* md = iter.metadata_reloc(); 534 if (md->metadata_is_immediate()) { 535 Metadata* m = md->metadata_value(); 536 if (oop_recorder()->is_real(m)) { 537 if (m->is_methodData()) { 538 m = ((MethodData*)m)->method(); 539 } 540 if (m->is_methodCounters()) { 541 m = ((MethodCounters*)m)->method(); 542 } 543 if (m->is_method()) { 544 m = ((Method*)m)->method_holder(); 545 } 546 if (m->is_klass()) { 547 append_oop_references(&oops, (Klass*)m); 548 } else { 549 // XXX This will currently occur for MDO which don't 550 // have a backpointer. This has to be fixed later. 551 m->print(); 552 ShouldNotReachHere(); 553 } 554 } 555 } 556 } 557 } 558 } 559 560 if (!oop_recorder()->is_unused()) { 561 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 562 Metadata* m = oop_recorder()->metadata_at(i); 563 if (oop_recorder()->is_real(m)) { 564 if (m->is_methodData()) { 565 m = ((MethodData*)m)->method(); 566 } 567 if (m->is_methodCounters()) { 568 m = ((MethodCounters*)m)->method(); 569 } 570 if (m->is_method()) { 571 m = ((Method*)m)->method_holder(); 572 } 573 if (m->is_klass()) { 574 append_oop_references(&oops, (Klass*)m); 575 } else { 576 m->print(); 577 ShouldNotReachHere(); 578 } 579 } 580 } 581 582 } 583 584 // Add the class loader of Method* for the nmethod itself 585 append_oop_references(&oops, mh->method_holder()); 586 587 // Add any oops that we've found 588 Thread* thread = Thread::current(); 589 for (int i = 0; i < oops.length(); i++) { 590 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 591 } 592 } 593 594 595 596 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const { 597 csize_t size_so_far = 0; 598 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 599 const CodeSection* cur_cs = code_section(n); 600 if (!cur_cs->is_empty()) { 601 size_so_far = cur_cs->align_at_start(size_so_far); 602 } 603 if (cur_cs->index() == cs->index()) { 604 return size_so_far; 605 } 606 size_so_far += cur_cs->size(); 607 } 608 ShouldNotReachHere(); 609 return -1; 610 } 611 612 int CodeBuffer::total_skipped_instructions_size() const { 613 int total_skipped_size = 0; 614 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 615 const CodeSection* cur_cs = code_section(n); 616 if (!cur_cs->is_empty()) { 617 total_skipped_size += cur_cs->_skipped_instructions_size; 618 } 619 } 620 return total_skipped_size; 621 } 622 623 csize_t CodeBuffer::total_relocation_size() const { 624 csize_t total = copy_relocations_to(nullptr); // dry run only 625 return (csize_t) align_up(total, HeapWordSize); 626 } 627 628 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { 629 csize_t buf_offset = 0; 630 csize_t code_end_so_far = 0; 631 csize_t code_point_so_far = 0; 632 633 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 634 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 635 636 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 637 if (only_inst && (n != (int)SECT_INSTS)) { 638 // Need only relocation info for code. 639 continue; 640 } 641 // pull relocs out of each section 642 const CodeSection* cs = code_section(n); 643 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 644 if (cs->is_empty()) continue; // skip trivial section 645 relocInfo* lstart = cs->locs_start(); 646 relocInfo* lend = cs->locs_end(); 647 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 648 csize_t csize = cs->size(); 649 code_end_so_far = cs->align_at_start(code_end_so_far); 650 651 if (lsize > 0) { 652 // Figure out how to advance the combined relocation point 653 // first to the beginning of this section. 654 // We'll insert one or more filler relocs to span that gap. 655 // (Don't bother to improve this by editing the first reloc's offset.) 656 csize_t new_code_point = code_end_so_far; 657 for (csize_t jump; 658 code_point_so_far < new_code_point; 659 code_point_so_far += jump) { 660 jump = new_code_point - code_point_so_far; 661 relocInfo filler = relocInfo::filler_info(); 662 if (jump >= filler.addr_offset()) { 663 jump = filler.addr_offset(); 664 } else { // else shrink the filler to fit 665 filler = relocInfo(relocInfo::none, jump); 666 } 667 if (buf != nullptr) { 668 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 669 *(relocInfo*)(buf+buf_offset) = filler; 670 } 671 buf_offset += sizeof(filler); 672 } 673 674 // Update code point and end to skip past this section: 675 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 676 assert(code_point_so_far <= last_code_point, "sanity"); 677 code_point_so_far = last_code_point; // advance past this guy's relocs 678 } 679 code_end_so_far += csize; // advance past this guy's instructions too 680 681 // Done with filler; emit the real relocations: 682 if (buf != nullptr && lsize != 0) { 683 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 684 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 685 if (buf_offset % HeapWordSize == 0) { 686 // Use wordwise copies if possible: 687 Copy::disjoint_words((HeapWord*)lstart, 688 (HeapWord*)(buf+buf_offset), 689 (lsize + HeapWordSize-1) / HeapWordSize); 690 } else { 691 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 692 } 693 } 694 buf_offset += lsize; 695 } 696 697 // Align end of relocation info in target. 698 while (buf_offset % HeapWordSize != 0) { 699 if (buf != nullptr) { 700 relocInfo padding = relocInfo(relocInfo::none, 0); 701 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 702 *(relocInfo*)(buf+buf_offset) = padding; 703 } 704 buf_offset += sizeof(relocInfo); 705 } 706 707 assert(only_inst || code_end_so_far == total_content_size(), "sanity"); 708 709 return buf_offset; 710 } 711 712 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 713 address buf = nullptr; 714 csize_t buf_offset = 0; 715 csize_t buf_limit = 0; 716 717 if (dest != nullptr) { 718 buf = (address)dest->relocation_begin(); 719 buf_limit = (address)dest->relocation_end() - buf; 720 } 721 // if dest is null, this is just the sizing pass 722 // 723 buf_offset = copy_relocations_to(buf, buf_limit, false); 724 725 return buf_offset; 726 } 727 728 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 729 #ifndef PRODUCT 730 if (PrintNMethods && (WizardMode || Verbose)) { 731 tty->print("done with CodeBuffer:"); 732 ((CodeBuffer*)this)->print_on(tty); 733 } 734 #endif //PRODUCT 735 736 CodeBuffer dest(dest_blob); 737 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 738 this->compute_final_layout(&dest); 739 740 // Set beginning of constant table before relocating. 741 dest_blob->set_ctable_begin(dest.consts()->start()); 742 743 relocate_code_to(&dest); 744 745 // Share assembly remarks and debug strings with the blob. 746 NOT_PRODUCT(dest_blob->use_remarks(_asm_remarks)); 747 NOT_PRODUCT(dest_blob->use_strings(_dbg_strings)); 748 749 // Done moving code bytes; were they the right size? 750 assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 751 752 // Flush generated code 753 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 754 } 755 756 // Move all my code into another code buffer. Consult applicable 757 // relocs to repair embedded addresses. The layout in the destination 758 // CodeBuffer is different to the source CodeBuffer: the destination 759 // CodeBuffer gets the final layout (consts, insts, stubs in order of 760 // ascending address). 761 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 762 address dest_end = dest->_total_start + dest->_total_size; 763 address dest_filled = nullptr; 764 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 765 // pull code out of each section 766 const CodeSection* cs = code_section(n); 767 if (cs->is_empty()) continue; // skip trivial section 768 CodeSection* dest_cs = dest->code_section(n); 769 assert(cs->size() == dest_cs->size(), "sanity"); 770 csize_t usize = dest_cs->size(); 771 csize_t wsize = align_up(usize, HeapWordSize); 772 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 773 // Copy the code as aligned machine words. 774 // This may also include an uninitialized partial word at the end. 775 Copy::disjoint_words((HeapWord*)cs->start(), 776 (HeapWord*)dest_cs->start(), 777 wsize / HeapWordSize); 778 779 if (dest->blob() == nullptr) { 780 // Destination is a final resting place, not just another buffer. 781 // Normalize uninitialized bytes in the final padding. 782 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 783 Assembler::code_fill_byte()); 784 } 785 // Keep track of the highest filled address 786 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 787 788 assert(cs->locs_start() != (relocInfo*)badAddress, 789 "this section carries no reloc storage, but reloc was attempted"); 790 791 // Make the new code copy use the old copy's relocations: 792 dest_cs->initialize_locs_from(cs); 793 } 794 795 // Do relocation after all sections are copied. 796 // This is necessary if the code uses constants in stubs, which are 797 // relocated when the corresponding instruction in the code (e.g., a 798 // call) is relocated. Stubs are placed behind the main code 799 // section, so that section has to be copied before relocating. 800 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 801 CodeSection* dest_cs = dest->code_section(n); 802 if (dest_cs->is_empty() || (dest_cs->locs_count() == 0)) continue; // skip trivial section 803 { // Repair the pc relative information in the code after the move 804 RelocIterator iter(dest_cs); 805 while (iter.next()) { 806 iter.reloc()->fix_relocation_after_move(this, dest); 807 } 808 } 809 } 810 811 if (dest->blob() == nullptr && dest_filled != nullptr) { 812 // Destination is a final resting place, not just another buffer. 813 // Normalize uninitialized bytes in the final padding. 814 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 815 Assembler::code_fill_byte()); 816 817 } 818 } 819 820 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 821 csize_t amount, 822 csize_t* new_capacity) { 823 csize_t new_total_cap = 0; 824 825 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 826 const CodeSection* sect = code_section(n); 827 828 if (!sect->is_empty()) { 829 // Compute initial padding; assign it to the previous section, 830 // even if it's empty (e.g. consts section can be empty). 831 // Cf. compute_final_layout 832 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 833 if (padding != 0) { 834 new_total_cap += padding; 835 assert(n - 1 >= SECT_FIRST, "sanity"); 836 new_capacity[n - 1] += padding; 837 } 838 } 839 840 csize_t exp = sect->size(); // 100% increase 841 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 842 if (sect == which_cs) { 843 if (exp < amount) exp = amount; 844 if (StressCodeBuffers) exp = amount; // expand only slightly 845 } else if (n == SECT_INSTS) { 846 // scale down inst increases to a more modest 25% 847 exp = 4*K + ((exp - 4*K) >> 2); 848 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 849 } else if (sect->is_empty()) { 850 // do not grow an empty secondary section 851 exp = 0; 852 } 853 // Allow for inter-section slop: 854 exp += CodeSection::end_slop(); 855 csize_t new_cap = sect->size() + exp; 856 if (new_cap < sect->capacity()) { 857 // No need to expand after all. 858 new_cap = sect->capacity(); 859 } 860 new_capacity[n] = new_cap; 861 new_total_cap += new_cap; 862 } 863 864 return new_total_cap; 865 } 866 867 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 868 #ifndef PRODUCT 869 if (PrintNMethods && (WizardMode || Verbose)) { 870 tty->print("expanding CodeBuffer:"); 871 this->print_on(tty); 872 } 873 874 if (StressCodeBuffers && blob() != nullptr) { 875 static int expand_count = 0; 876 if (expand_count >= 0) expand_count += 1; 877 if (expand_count > 100 && is_power_of_2(expand_count)) { 878 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 879 // simulate an occasional allocation failure: 880 free_blob(); 881 } 882 } 883 #endif //PRODUCT 884 885 // Resizing must be allowed 886 { 887 if (blob() == nullptr) return; // caller must check if blob is null 888 } 889 890 // Figure new capacity for each section. 891 csize_t new_capacity[SECT_LIMIT]; 892 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT); 893 csize_t new_total_cap 894 = figure_expanded_capacities(which_cs, amount, new_capacity); 895 896 // Create a new (temporary) code buffer to hold all the new data 897 CodeBuffer cb(name(), new_total_cap, 0); 898 cb.set_const_section_alignment(_const_section_alignment); 899 if (cb.blob() == nullptr) { 900 // Failed to allocate in code cache. 901 free_blob(); 902 return; 903 } 904 905 // Create an old code buffer to remember which addresses used to go where. 906 // This will be useful when we do final assembly into the code cache, 907 // because we will need to know how to warp any internal address that 908 // has been created at any time in this CodeBuffer's past. 909 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 910 bxp->take_over_code_from(this); // remember the old undersized blob 911 DEBUG_ONLY(this->_blob = nullptr); // silence a later assert 912 bxp->_before_expand = this->_before_expand; 913 this->_before_expand = bxp; 914 915 // Give each section its required (expanded) capacity. 916 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 917 CodeSection* cb_sect = cb.code_section(n); 918 CodeSection* this_sect = code_section(n); 919 if (new_capacity[n] == 0) continue; // already nulled out 920 if (n != SECT_INSTS) { 921 cb.initialize_section_size(cb_sect, new_capacity[n]); 922 } 923 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 924 address cb_start = cb_sect->start(); 925 cb_sect->set_end(cb_start + this_sect->size()); 926 if (this_sect->mark() == nullptr) { 927 cb_sect->clear_mark(); 928 } else { 929 cb_sect->set_mark(cb_start + this_sect->mark_off()); 930 } 931 } 932 933 // Needs to be initialized when calling fix_relocation_after_move. 934 cb.blob()->set_ctable_begin(cb.consts()->start()); 935 936 // Move all the code and relocations to the new blob: 937 relocate_code_to(&cb); 938 939 // some internal addresses, _last_insn _last_label, are used during code emission, 940 // adjust them in expansion 941 adjust_internal_address(insts_begin(), cb.insts_begin()); 942 943 // Copy the temporary code buffer into the current code buffer. 944 // Basically, do {*this = cb}, except for some control information. 945 this->take_over_code_from(&cb); 946 cb.set_blob(nullptr); 947 948 // Zap the old code buffer contents, to avoid mistakenly using them. 949 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 950 badCodeHeapFreeVal);) 951 952 // Make certain that the new sections are all snugly inside the new blob. 953 debug_only(verify_section_allocation();) 954 955 #ifndef PRODUCT 956 _decode_begin = nullptr; // sanity 957 if (PrintNMethods && (WizardMode || Verbose)) { 958 tty->print("expanded CodeBuffer:"); 959 this->print_on(tty); 960 } 961 #endif //PRODUCT 962 } 963 964 void CodeBuffer::adjust_internal_address(address from, address to) { 965 if (_last_insn != nullptr) { 966 _last_insn += to - from; 967 } 968 if (_last_label != nullptr) { 969 _last_label += to - from; 970 } 971 } 972 973 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 974 // Must already have disposed of the old blob somehow. 975 assert(blob() == nullptr, "must be empty"); 976 // Take the new blob away from cb. 977 set_blob(cb->blob()); 978 // Take over all the section pointers. 979 for (int n = 0; n < (int)SECT_LIMIT; n++) { 980 CodeSection* cb_sect = cb->code_section(n); 981 CodeSection* this_sect = code_section(n); 982 this_sect->take_over_code_from(cb_sect); 983 } 984 // Make sure the old cb won't try to use it or free it. 985 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 986 } 987 988 void CodeBuffer::verify_section_allocation() { 989 address tstart = _total_start; 990 if (tstart == badAddress) return; // smashed by set_blob(nullptr) 991 address tend = tstart + _total_size; 992 if (_blob != nullptr) { 993 guarantee(tstart >= _blob->content_begin(), "sanity"); 994 guarantee(tend <= _blob->content_end(), "sanity"); 995 } 996 // Verify disjointness. 997 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 998 CodeSection* sect = code_section(n); 999 if (!sect->is_allocated() || sect->is_empty()) { 1000 continue; 1001 } 1002 guarantee(_blob == nullptr || is_aligned(sect->start(), sect->alignment()), 1003 "start is aligned"); 1004 for (int m = n + 1; m < (int) SECT_LIMIT; m++) { 1005 CodeSection* other = code_section(m); 1006 if (!other->is_allocated() || other == sect) { 1007 continue; 1008 } 1009 guarantee(other->disjoint(sect), "sanity"); 1010 } 1011 guarantee(sect->end() <= tend, "sanity, sect_end: " PTR_FORMAT " tend: " PTR_FORMAT " size: %d", p2i(sect->end()), p2i(tend), (int)_total_size); 1012 guarantee(sect->end() <= sect->limit(), "sanity, sect_end: " PTR_FORMAT " sect_limit: " PTR_FORMAT, p2i(sect->end()), p2i(sect->limit())); 1013 } 1014 } 1015 1016 void CodeBuffer::log_section_sizes(const char* name) { 1017 if (xtty != nullptr) { 1018 ttyLocker ttyl; 1019 // log info about buffer usage 1020 xtty->head("blob name='%s' total_size='%d'", name, _total_size); 1021 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 1022 CodeSection* sect = code_section(n); 1023 if (!sect->is_allocated() || sect->is_empty()) continue; 1024 xtty->elem("sect index='%d' capacity='%d' size='%d' remaining='%d'", 1025 n, sect->capacity(), sect->size(), sect->remaining()); 1026 } 1027 xtty->tail("blob"); 1028 } 1029 } 1030 1031 bool CodeBuffer::finalize_stubs() { 1032 if (_finalize_stubs && !pd_finalize_stubs()) { 1033 // stub allocation failure 1034 return false; 1035 } 1036 _finalize_stubs = false; 1037 return true; 1038 } 1039 1040 void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) { 1041 if (_shared_stub_to_interp_requests == nullptr) { 1042 _shared_stub_to_interp_requests = new SharedStubToInterpRequests(8); 1043 } 1044 SharedStubToInterpRequest request(callee, call_offset); 1045 _shared_stub_to_interp_requests->push(request); 1046 _finalize_stubs = true; 1047 } 1048 1049 #ifndef PRODUCT 1050 void CodeBuffer::block_comment(ptrdiff_t offset, const char* comment) { 1051 if (_collect_comments) { 1052 const char* str = _asm_remarks.insert(offset, comment); 1053 postcond(str != comment); 1054 } 1055 } 1056 1057 const char* CodeBuffer::code_string(const char* str) { 1058 const char* tmp = _dbg_strings.insert(str); 1059 postcond(tmp != str); 1060 return tmp; 1061 } 1062 1063 void CodeBuffer::decode() { 1064 ttyLocker ttyl; 1065 Disassembler::decode(decode_begin(), insts_end(), tty NOT_PRODUCT(COMMA &asm_remarks())); 1066 _decode_begin = insts_end(); 1067 } 1068 1069 void CodeSection::print_on(outputStream* st, const char* name) { 1070 csize_t locs_size = locs_end() - locs_start(); 1071 st->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)", 1072 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity()); 1073 st->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1074 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1075 if (PrintRelocations && (locs_size != 0)) { 1076 RelocIterator iter(this); 1077 iter.print_on(st); 1078 } 1079 } 1080 1081 void CodeBuffer::print_on(outputStream* st) { 1082 #if 0 1083 if (this == nullptr) { // gcc complains 'nonnull' argument 'this' compared to NULL 1084 st->print_cr("null CodeBuffer pointer"); 1085 return; 1086 } 1087 #endif 1088 1089 st->print_cr("CodeBuffer:%s", name()); 1090 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1091 // print each section 1092 CodeSection* cs = code_section(n); 1093 cs->print_on(st, code_section_name(n)); 1094 } 1095 } 1096 1097 // ----- CHeapString ----------------------------------------------------------- 1098 1099 class CHeapString : public CHeapObj<mtCode> { 1100 public: 1101 CHeapString(const char* str) : _string(os::strdup(str)) {} 1102 ~CHeapString() { 1103 os::free((void*)_string); 1104 _string = nullptr; 1105 } 1106 const char* string() const { return _string; } 1107 1108 private: 1109 const char* _string; 1110 }; 1111 1112 // ----- AsmRemarkCollection --------------------------------------------------- 1113 1114 class AsmRemarkCollection : public CHeapObj<mtCode> { 1115 public: 1116 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {} 1117 ~AsmRemarkCollection() { 1118 assert(is_empty(), "Must 'clear()' before deleting!"); 1119 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 1120 } 1121 AsmRemarkCollection* reuse() { 1122 precond(_ref_cnt > 0); 1123 return _ref_cnt++, this; 1124 } 1125 1126 const char* insert(uint offset, const char* remark); 1127 const char* lookup(uint offset) const; 1128 const char* next(uint offset) const; 1129 1130 bool is_empty() const { return _remarks == nullptr; } 1131 uint clear(); 1132 1133 private: 1134 struct Cell : CHeapString { 1135 Cell(const char* remark, uint offset) : 1136 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {} 1137 void push_back(Cell* cell) { 1138 Cell* head = this; 1139 Cell* tail = prev; 1140 tail->next = cell; 1141 cell->next = head; 1142 cell->prev = tail; 1143 prev = cell; 1144 } 1145 uint offset; 1146 Cell* prev; 1147 Cell* next; 1148 }; 1149 uint _ref_cnt; 1150 Cell* _remarks; 1151 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that 1152 // does not change the state of the list per se), supportig a simplistic 1153 // iteration scheme. 1154 mutable Cell* _next; 1155 }; 1156 1157 // ----- DbgStringCollection --------------------------------------------------- 1158 1159 class DbgStringCollection : public CHeapObj<mtCode> { 1160 public: 1161 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {} 1162 ~DbgStringCollection() { 1163 assert(is_empty(), "Must 'clear()' before deleting!"); 1164 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 1165 } 1166 DbgStringCollection* reuse() { 1167 precond(_ref_cnt > 0); 1168 return _ref_cnt++, this; 1169 } 1170 1171 const char* insert(const char* str); 1172 const char* lookup(const char* str) const; 1173 1174 bool is_empty() const { return _strings == nullptr; } 1175 uint clear(); 1176 1177 private: 1178 struct Cell : CHeapString { 1179 Cell(const char* dbgstr) : 1180 CHeapString(dbgstr), prev(nullptr), next(nullptr) {} 1181 void push_back(Cell* cell) { 1182 Cell* head = this; 1183 Cell* tail = prev; 1184 tail->next = cell; 1185 cell->next = head; 1186 cell->prev = tail; 1187 prev = cell; 1188 } 1189 Cell* prev; 1190 Cell* next; 1191 }; 1192 uint _ref_cnt; 1193 Cell* _strings; 1194 }; 1195 1196 // ----- AsmRemarks ------------------------------------------------------------ 1197 // 1198 // Acting as interface to reference counted mapping [offset -> remark], where 1199 // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or 1200 // other memory buffer) and remark is a string (comment). 1201 // 1202 AsmRemarks::AsmRemarks() : _remarks(new AsmRemarkCollection()) { 1203 assert(_remarks != nullptr, "Allocation failure!"); 1204 } 1205 1206 AsmRemarks::~AsmRemarks() { 1207 assert(_remarks == nullptr, "Must 'clear()' before deleting!"); 1208 } 1209 1210 const char* AsmRemarks::insert(uint offset, const char* remstr) { 1211 precond(remstr != nullptr); 1212 return _remarks->insert(offset, remstr); 1213 } 1214 1215 bool AsmRemarks::is_empty() const { 1216 return _remarks->is_empty(); 1217 } 1218 1219 void AsmRemarks::share(const AsmRemarks &src) { 1220 precond(is_empty()); 1221 clear(); 1222 _remarks = src._remarks->reuse(); 1223 } 1224 1225 void AsmRemarks::clear() { 1226 if (_remarks->clear() == 0) { 1227 delete _remarks; 1228 } 1229 _remarks = nullptr; 1230 } 1231 1232 uint AsmRemarks::print(uint offset, outputStream* strm) const { 1233 uint count = 0; 1234 const char* prefix = " ;; "; 1235 const char* remstr = _remarks->lookup(offset); 1236 while (remstr != nullptr) { 1237 strm->bol(); 1238 strm->print("%s", prefix); 1239 // Don't interpret as format strings since it could contain '%'. 1240 strm->print_raw(remstr); 1241 // Advance to next line iff string didn't contain a cr() at the end. 1242 strm->bol(); 1243 remstr = _remarks->next(offset); 1244 count++; 1245 } 1246 return count; 1247 } 1248 1249 // ----- DbgStrings ------------------------------------------------------------ 1250 // 1251 // Acting as interface to reference counted collection of (debug) strings used 1252 // in the code generated, and thus requiring a fixed address. 1253 // 1254 DbgStrings::DbgStrings() : _strings(new DbgStringCollection()) { 1255 assert(_strings != nullptr, "Allocation failure!"); 1256 } 1257 1258 DbgStrings::~DbgStrings() { 1259 assert(_strings == nullptr, "Must 'clear()' before deleting!"); 1260 } 1261 1262 const char* DbgStrings::insert(const char* dbgstr) { 1263 const char* str = _strings->lookup(dbgstr); 1264 return str != nullptr ? str : _strings->insert(dbgstr); 1265 } 1266 1267 bool DbgStrings::is_empty() const { 1268 return _strings->is_empty(); 1269 } 1270 1271 void DbgStrings::share(const DbgStrings &src) { 1272 precond(is_empty()); 1273 clear(); 1274 _strings = src._strings->reuse(); 1275 } 1276 1277 void DbgStrings::clear() { 1278 if (_strings->clear() == 0) { 1279 delete _strings; 1280 } 1281 _strings = nullptr; 1282 } 1283 1284 // ----- AsmRemarkCollection --------------------------------------------------- 1285 1286 const char* AsmRemarkCollection::insert(uint offset, const char* remstr) { 1287 precond(remstr != nullptr); 1288 Cell* cell = new Cell { remstr, offset }; 1289 if (is_empty()) { 1290 cell->prev = cell; 1291 cell->next = cell; 1292 _remarks = cell; 1293 } else { 1294 _remarks->push_back(cell); 1295 } 1296 return cell->string(); 1297 } 1298 1299 const char* AsmRemarkCollection::lookup(uint offset) const { 1300 _next = _remarks; 1301 return next(offset); 1302 } 1303 1304 const char* AsmRemarkCollection::next(uint offset) const { 1305 if (_next != nullptr) { 1306 Cell* i = _next; 1307 do { 1308 if (i->offset == offset) { 1309 _next = i->next == _remarks ? nullptr : i->next; 1310 return i->string(); 1311 } 1312 i = i->next; 1313 } while (i != _remarks); 1314 _next = nullptr; 1315 } 1316 return nullptr; 1317 } 1318 1319 uint AsmRemarkCollection::clear() { 1320 precond(_ref_cnt > 0); 1321 if (--_ref_cnt > 0) { 1322 return _ref_cnt; 1323 } 1324 if (!is_empty()) { 1325 uint count = 0; 1326 Cell* i = _remarks; 1327 do { 1328 Cell* next = i->next; 1329 delete i; 1330 i = next; 1331 count++; 1332 } while (i != _remarks); 1333 1334 log_debug(codestrings)("Clear %u asm-remark%s.", count, count == 1 ? "" : "s"); 1335 _remarks = nullptr; 1336 } 1337 return 0; // i.e. _ref_cnt == 0 1338 } 1339 1340 // ----- DbgStringCollection --------------------------------------------------- 1341 1342 const char* DbgStringCollection::insert(const char* dbgstr) { 1343 precond(dbgstr != nullptr); 1344 Cell* cell = new Cell { dbgstr }; 1345 1346 if (is_empty()) { 1347 cell->prev = cell; 1348 cell->next = cell; 1349 _strings = cell; 1350 } else { 1351 _strings->push_back(cell); 1352 } 1353 return cell->string(); 1354 } 1355 1356 const char* DbgStringCollection::lookup(const char* dbgstr) const { 1357 precond(dbgstr != nullptr); 1358 if (_strings != nullptr) { 1359 Cell* i = _strings; 1360 do { 1361 if (strcmp(i->string(), dbgstr) == 0) { 1362 return i->string(); 1363 } 1364 i = i->next; 1365 } while (i != _strings); 1366 } 1367 return nullptr; 1368 } 1369 1370 uint DbgStringCollection::clear() { 1371 precond(_ref_cnt > 0); 1372 if (--_ref_cnt > 0) { 1373 return _ref_cnt; 1374 } 1375 if (!is_empty()) { 1376 uint count = 0; 1377 Cell* i = _strings; 1378 do { 1379 Cell* next = i->next; 1380 delete i; 1381 i = next; 1382 count++; 1383 } while (i != _strings); 1384 1385 log_debug(codestrings)("Clear %u dbg-string%s.", count, count == 1 ? "" : "s"); 1386 _strings = nullptr; 1387 } 1388 return 0; // i.e. _ref_cnt == 0 1389 } 1390 1391 #endif // not PRODUCT