1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "code/compiledIC.hpp" 28 #include "code/oopRecorder.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "logging/log.hpp" 31 #include "oops/klass.inline.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/icache.hpp" 35 #include "runtime/safepointVerifiers.hpp" 36 #include "utilities/align.hpp" 37 #include "utilities/copy.hpp" 38 #include "utilities/powerOfTwo.hpp" 39 #include "utilities/xmlstream.hpp" 40 41 // The structure of a CodeSection: 42 // 43 // _start -> +----------------+ 44 // | machine code...| 45 // _end -> |----------------| 46 // | | 47 // | (empty) | 48 // | | 49 // | | 50 // +----------------+ 51 // _limit -> | | 52 // 53 // _locs_start -> +----------------+ 54 // |reloc records...| 55 // |----------------| 56 // _locs_end -> | | 57 // | | 58 // | (empty) | 59 // | | 60 // | | 61 // +----------------+ 62 // _locs_limit -> | | 63 // The _end (resp. _limit) pointer refers to the first 64 // unused (resp. unallocated) byte. 65 66 // The structure of the CodeBuffer while code is being accumulated: 67 // 68 // _total_start -> \ 69 // _consts._start -> +----------------+ 70 // | | 71 // | Constants | 72 // | | 73 // _insts._start -> |----------------| 74 // | | 75 // | Code | 76 // | | 77 // _stubs._start -> |----------------| 78 // | | 79 // | Stubs | (also handlers for deopt/exception) 80 // | | 81 // +----------------+ 82 // + _total_size -> | | 83 // 84 // When the code and relocations are copied to the code cache, 85 // the empty parts of each section are removed, and everything 86 // is copied into contiguous locations. 87 88 typedef CodeBuffer::csize_t csize_t; // file-local definition 89 90 // External buffer, in a predefined CodeBlob. 91 // Important: The code_start must be taken exactly, and not realigned. 92 CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) { 93 // Provide code buffer with meaningful name 94 initialize_misc(blob->name()); 95 initialize(blob->content_begin(), blob->content_size()); 96 debug_only(verify_section_allocation();) 97 } 98 99 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 100 // Always allow for empty slop around each section. 101 int slop = (int) CodeSection::end_slop(); 102 103 assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments"); 104 int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop; 105 106 assert(blob() == nullptr, "only once"); 107 set_blob(BufferBlob::create(_name, total_size)); 108 if (blob() == nullptr) { 109 // The assembler constructor will throw a fatal on an empty CodeBuffer. 110 return; // caller must test this 111 } 112 113 // Set up various pointers into the blob. 114 initialize(_total_start, _total_size); 115 116 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 117 118 pd_initialize(); 119 120 if (locs_size != 0) { 121 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 122 } 123 124 debug_only(verify_section_allocation();) 125 } 126 127 128 CodeBuffer::~CodeBuffer() { 129 verify_section_allocation(); 130 131 // If we allocated our code buffer from the CodeCache via a BufferBlob, and 132 // it's not permanent, then free the BufferBlob. The rest of the memory 133 // will be freed when the ResourceObj is released. 134 for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) { 135 // Previous incarnations of this buffer are held live, so that internal 136 // addresses constructed before expansions will not be confused. 137 cb->free_blob(); 138 } 139 if (_overflow_arena != nullptr) { 140 // free any overflow storage 141 delete _overflow_arena; 142 } 143 if (_shared_trampoline_requests != nullptr) { 144 delete _shared_trampoline_requests; 145 } 146 147 NOT_PRODUCT(clear_strings()); 148 } 149 150 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 151 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 152 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 153 _oop_recorder = r; 154 } 155 156 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 157 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 158 csize_t slop = CodeSection::end_slop(); // margin between sections 159 int align = cs->alignment(); 160 assert(is_power_of_2(align), "sanity"); 161 address start = _insts._start; 162 address limit = _insts._limit; 163 address middle = limit - size; 164 middle -= (intptr_t)middle & (align-1); // align the division point downward 165 guarantee(middle - slop > start, "need enough space to divide up"); 166 _insts._limit = middle - slop; // subtract desired space, plus slop 167 cs->initialize(middle, limit - middle); 168 assert(cs->start() == middle, "sanity"); 169 assert(cs->limit() == limit, "sanity"); 170 // give it some relocations to start with, if the main section has them 171 if (_insts.has_locs()) cs->initialize_locs(1); 172 } 173 174 void CodeBuffer::set_blob(BufferBlob* blob) { 175 _blob = blob; 176 if (blob != nullptr) { 177 address start = blob->content_begin(); 178 address end = blob->content_end(); 179 // Round up the starting address. 180 int align = _insts.alignment(); 181 start += (-(intptr_t)start) & (align-1); 182 _total_start = start; 183 _total_size = end - start; 184 } else { 185 #ifdef ASSERT 186 // Clean out dangling pointers. 187 _total_start = badAddress; 188 _consts._start = _consts._end = badAddress; 189 _insts._start = _insts._end = badAddress; 190 _stubs._start = _stubs._end = badAddress; 191 #endif //ASSERT 192 } 193 } 194 195 void CodeBuffer::free_blob() { 196 if (_blob != nullptr) { 197 BufferBlob::free(_blob); 198 set_blob(nullptr); 199 } 200 } 201 202 const char* CodeBuffer::code_section_name(int n) { 203 #ifdef PRODUCT 204 return nullptr; 205 #else //PRODUCT 206 switch (n) { 207 case SECT_CONSTS: return "consts"; 208 case SECT_INSTS: return "insts"; 209 case SECT_STUBS: return "stubs"; 210 default: return nullptr; 211 } 212 #endif //PRODUCT 213 } 214 215 int CodeBuffer::section_index_of(address addr) const { 216 for (int n = 0; n < (int)SECT_LIMIT; n++) { 217 const CodeSection* cs = code_section(n); 218 if (cs->allocates(addr)) return n; 219 } 220 return SECT_NONE; 221 } 222 223 int CodeBuffer::locator(address addr) const { 224 for (int n = 0; n < (int)SECT_LIMIT; n++) { 225 const CodeSection* cs = code_section(n); 226 if (cs->allocates(addr)) { 227 return locator(addr - cs->start(), n); 228 } 229 } 230 return -1; 231 } 232 233 234 bool CodeBuffer::is_backward_branch(Label& L) { 235 return L.is_bound() && insts_end() <= locator_address(L.loc()); 236 } 237 238 #ifndef PRODUCT 239 address CodeBuffer::decode_begin() { 240 address begin = _insts.start(); 241 if (_decode_begin != nullptr && _decode_begin > begin) 242 begin = _decode_begin; 243 return begin; 244 } 245 #endif // !PRODUCT 246 247 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 248 if (_overflow_arena == nullptr) { 249 _overflow_arena = new (mtCode) Arena(mtCode); 250 } 251 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 252 } 253 254 255 // Helper function for managing labels and their target addresses. 256 // Returns a sensible address, and if it is not the label's final 257 // address, notes the dependency (at 'branch_pc') on the label. 258 address CodeSection::target(Label& L, address branch_pc) { 259 if (L.is_bound()) { 260 int loc = L.loc(); 261 if (index() == CodeBuffer::locator_sect(loc)) { 262 return start() + CodeBuffer::locator_pos(loc); 263 } else { 264 return outer()->locator_address(loc); 265 } 266 } else { 267 assert(allocates2(branch_pc), "sanity"); 268 address base = start(); 269 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 270 L.add_patch_at(outer(), patch_loc); 271 272 // Need to return a pc, doesn't matter what it is since it will be 273 // replaced during resolution later. 274 // Don't return null or badAddress, since branches shouldn't overflow. 275 // Don't return base either because that could overflow displacements 276 // for shorter branches. It will get checked when bound. 277 return branch_pc; 278 } 279 } 280 281 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) { 282 RelocationHolder rh; 283 switch (rtype) { 284 case relocInfo::none: return; 285 case relocInfo::opt_virtual_call_type: { 286 rh = opt_virtual_call_Relocation::spec(method_index); 287 break; 288 } 289 case relocInfo::static_call_type: { 290 rh = static_call_Relocation::spec(method_index); 291 break; 292 } 293 case relocInfo::virtual_call_type: { 294 assert(method_index == 0, "resolved method overriding is not supported"); 295 rh = Relocation::spec_simple(rtype); 296 break; 297 } 298 default: { 299 rh = Relocation::spec_simple(rtype); 300 break; 301 } 302 } 303 relocate(at, rh, format); 304 } 305 306 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 307 // Do not relocate in scratch buffers. 308 if (scratch_emit()) { return; } 309 Relocation* reloc = spec.reloc(); 310 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 311 if (rtype == relocInfo::none) return; 312 313 // The assertion below has been adjusted, to also work for 314 // relocation for fixup. Sometimes we want to put relocation 315 // information for the next instruction, since it will be patched 316 // with a call. 317 assert(start() <= at && at <= end()+1, 318 "cannot relocate data outside code boundaries"); 319 320 if (!has_locs()) { 321 // no space for relocation information provided => code cannot be 322 // relocated. Make sure that relocate is only called with rtypes 323 // that can be ignored for this kind of code. 324 assert(rtype == relocInfo::none || 325 rtype == relocInfo::runtime_call_type || 326 rtype == relocInfo::internal_word_type|| 327 rtype == relocInfo::section_word_type || 328 rtype == relocInfo::external_word_type|| 329 rtype == relocInfo::barrier_type, 330 "code needs relocation information"); 331 // leave behind an indication that we attempted a relocation 332 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 333 return; 334 } 335 336 // Advance the point, noting the offset we'll have to record. 337 csize_t offset = at - locs_point(); 338 set_locs_point(at); 339 340 // Test for a couple of overflow conditions; maybe expand the buffer. 341 relocInfo* end = locs_end(); 342 relocInfo* req = end + relocInfo::length_limit; 343 // Check for (potential) overflow 344 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 345 req += (uint)offset / (uint)relocInfo::offset_limit(); 346 if (req >= locs_limit()) { 347 // Allocate or reallocate. 348 expand_locs(locs_count() + (req - end)); 349 // reload pointer 350 end = locs_end(); 351 } 352 } 353 354 // If the offset is giant, emit filler relocs, of type 'none', but 355 // each carrying the largest possible offset, to advance the locs_point. 356 while (offset >= relocInfo::offset_limit()) { 357 assert(end < locs_limit(), "adjust previous paragraph of code"); 358 *end++ = relocInfo::filler_info(); 359 offset -= relocInfo::filler_info().addr_offset(); 360 } 361 362 // If it's a simple reloc with no data, we'll just write (rtype | offset). 363 (*end) = relocInfo(rtype, offset, format); 364 365 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 366 end->initialize(this, reloc); 367 } 368 369 void CodeSection::initialize_locs(int locs_capacity) { 370 assert(_locs_start == nullptr, "only one locs init step, please"); 371 // Apply a priori lower limits to relocation size: 372 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 373 if (locs_capacity < min_locs) locs_capacity = min_locs; 374 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 375 _locs_start = locs_start; 376 _locs_end = locs_start; 377 _locs_limit = locs_start + locs_capacity; 378 _locs_own = true; 379 } 380 381 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 382 assert(_locs_start == nullptr, "do this before locs are allocated"); 383 // Internal invariant: locs buf must be fully aligned. 384 // See copy_relocations_to() below. 385 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 386 ++buf; --length; 387 } 388 if (length > 0) { 389 _locs_start = buf; 390 _locs_end = buf; 391 _locs_limit = buf + length; 392 _locs_own = false; 393 } 394 } 395 396 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 397 int lcount = source_cs->locs_count(); 398 if (lcount != 0) { 399 initialize_shared_locs(source_cs->locs_start(), lcount); 400 _locs_end = _locs_limit = _locs_start + lcount; 401 assert(is_allocated(), "must have copied code already"); 402 set_locs_point(start() + source_cs->locs_point_off()); 403 } 404 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 405 } 406 407 void CodeSection::expand_locs(int new_capacity) { 408 if (_locs_start == nullptr) { 409 initialize_locs(new_capacity); 410 return; 411 } else { 412 int old_count = locs_count(); 413 int old_capacity = locs_capacity(); 414 if (new_capacity < old_capacity * 2) 415 new_capacity = old_capacity * 2; 416 relocInfo* locs_start; 417 if (_locs_own) { 418 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 419 } else { 420 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 421 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 422 _locs_own = true; 423 } 424 _locs_start = locs_start; 425 _locs_end = locs_start + old_count; 426 _locs_limit = locs_start + new_capacity; 427 } 428 } 429 430 int CodeSection::alignment() const { 431 if (_index == CodeBuffer::SECT_CONSTS) { 432 // CodeBuffer controls the alignment of the constants section 433 return _outer->_const_section_alignment; 434 } 435 if (_index == CodeBuffer::SECT_INSTS) { 436 return (int) CodeEntryAlignment; 437 } 438 if (_index == CodeBuffer::SECT_STUBS) { 439 // CodeBuffer installer expects sections to be HeapWordSize aligned 440 return HeapWordSize; 441 } 442 ShouldNotReachHere(); 443 return 0; 444 } 445 446 /// Support for emitting the code to its final location. 447 /// The pattern is the same for all functions. 448 /// We iterate over all the sections, padding each to alignment. 449 450 csize_t CodeBuffer::total_content_size() const { 451 csize_t size_so_far = 0; 452 for (int n = 0; n < (int)SECT_LIMIT; n++) { 453 const CodeSection* cs = code_section(n); 454 if (cs->is_empty()) continue; // skip trivial section 455 size_so_far = cs->align_at_start(size_so_far); 456 size_so_far += cs->size(); 457 } 458 return size_so_far; 459 } 460 461 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 462 address buf = dest->_total_start; 463 csize_t buf_offset = 0; 464 assert(dest->_total_size >= total_content_size(), "must be big enough"); 465 assert(!_finalize_stubs, "non-finalized stubs"); 466 467 { 468 // not sure why this is here, but why not... 469 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 470 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 471 } 472 473 const CodeSection* prev_cs = nullptr; 474 CodeSection* prev_dest_cs = nullptr; 475 476 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 477 // figure compact layout of each section 478 const CodeSection* cs = code_section(n); 479 csize_t csize = cs->size(); 480 481 CodeSection* dest_cs = dest->code_section(n); 482 if (!cs->is_empty()) { 483 // Compute initial padding; assign it to the previous non-empty guy. 484 // Cf. figure_expanded_capacities. 485 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 486 if (prev_dest_cs != nullptr) { 487 if (padding != 0) { 488 buf_offset += padding; 489 prev_dest_cs->_limit += padding; 490 } 491 } else { 492 guarantee(padding == 0, "In first iteration no padding should be needed."); 493 } 494 prev_dest_cs = dest_cs; 495 prev_cs = cs; 496 } 497 498 debug_only(dest_cs->_start = nullptr); // defeat double-initialization assert 499 dest_cs->initialize(buf+buf_offset, csize); 500 dest_cs->set_end(buf+buf_offset+csize); 501 assert(dest_cs->is_allocated(), "must always be allocated"); 502 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 503 504 buf_offset += csize; 505 } 506 507 // Done calculating sections; did it come out to the right end? 508 assert(buf_offset == total_content_size(), "sanity"); 509 debug_only(dest->verify_section_allocation();) 510 } 511 512 // Append an oop reference that keeps the class alive. 513 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 514 oop cl = k->klass_holder(); 515 if (cl != nullptr && !oops->contains(cl)) { 516 oops->append(cl); 517 } 518 } 519 520 void CodeBuffer::finalize_oop_references(const methodHandle& mh) { 521 NoSafepointVerifier nsv; 522 523 GrowableArray<oop> oops; 524 525 // Make sure that immediate metadata records something in the OopRecorder 526 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 527 // pull code out of each section 528 CodeSection* cs = code_section(n); 529 if (cs->is_empty() || (cs->locs_count() == 0)) continue; // skip trivial section 530 RelocIterator iter(cs); 531 while (iter.next()) { 532 if (iter.type() == relocInfo::metadata_type) { 533 metadata_Relocation* md = iter.metadata_reloc(); 534 if (md->metadata_is_immediate()) { 535 Metadata* m = md->metadata_value(); 536 if (oop_recorder()->is_real(m)) { 537 if (m->is_methodData()) { 538 m = ((MethodData*)m)->method(); 539 } 540 if (m->is_method()) { 541 m = ((Method*)m)->method_holder(); 542 } 543 if (m->is_klass()) { 544 append_oop_references(&oops, (Klass*)m); 545 } else { 546 // XXX This will currently occur for MDO which don't 547 // have a backpointer. This has to be fixed later. 548 m->print(); 549 ShouldNotReachHere(); 550 } 551 } 552 } 553 } 554 } 555 } 556 557 if (!oop_recorder()->is_unused()) { 558 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 559 Metadata* m = oop_recorder()->metadata_at(i); 560 if (oop_recorder()->is_real(m)) { 561 if (m->is_methodData()) { 562 m = ((MethodData*)m)->method(); 563 } 564 if (m->is_method()) { 565 m = ((Method*)m)->method_holder(); 566 } 567 if (m->is_klass()) { 568 append_oop_references(&oops, (Klass*)m); 569 } else { 570 m->print(); 571 ShouldNotReachHere(); 572 } 573 } 574 } 575 576 } 577 578 // Add the class loader of Method* for the nmethod itself 579 append_oop_references(&oops, mh->method_holder()); 580 581 // Add any oops that we've found 582 Thread* thread = Thread::current(); 583 for (int i = 0; i < oops.length(); i++) { 584 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 585 } 586 } 587 588 589 590 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const { 591 csize_t size_so_far = 0; 592 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 593 const CodeSection* cur_cs = code_section(n); 594 if (!cur_cs->is_empty()) { 595 size_so_far = cur_cs->align_at_start(size_so_far); 596 } 597 if (cur_cs->index() == cs->index()) { 598 return size_so_far; 599 } 600 size_so_far += cur_cs->size(); 601 } 602 ShouldNotReachHere(); 603 return -1; 604 } 605 606 int CodeBuffer::total_skipped_instructions_size() const { 607 int total_skipped_size = 0; 608 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 609 const CodeSection* cur_cs = code_section(n); 610 if (!cur_cs->is_empty()) { 611 total_skipped_size += cur_cs->_skipped_instructions_size; 612 } 613 } 614 return total_skipped_size; 615 } 616 617 csize_t CodeBuffer::total_relocation_size() const { 618 csize_t total = copy_relocations_to(nullptr); // dry run only 619 return (csize_t) align_up(total, HeapWordSize); 620 } 621 622 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { 623 csize_t buf_offset = 0; 624 csize_t code_end_so_far = 0; 625 csize_t code_point_so_far = 0; 626 627 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 628 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 629 630 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 631 if (only_inst && (n != (int)SECT_INSTS)) { 632 // Need only relocation info for code. 633 continue; 634 } 635 // pull relocs out of each section 636 const CodeSection* cs = code_section(n); 637 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 638 if (cs->is_empty()) continue; // skip trivial section 639 relocInfo* lstart = cs->locs_start(); 640 relocInfo* lend = cs->locs_end(); 641 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 642 csize_t csize = cs->size(); 643 code_end_so_far = cs->align_at_start(code_end_so_far); 644 645 if (lsize > 0) { 646 // Figure out how to advance the combined relocation point 647 // first to the beginning of this section. 648 // We'll insert one or more filler relocs to span that gap. 649 // (Don't bother to improve this by editing the first reloc's offset.) 650 csize_t new_code_point = code_end_so_far; 651 for (csize_t jump; 652 code_point_so_far < new_code_point; 653 code_point_so_far += jump) { 654 jump = new_code_point - code_point_so_far; 655 relocInfo filler = relocInfo::filler_info(); 656 if (jump >= filler.addr_offset()) { 657 jump = filler.addr_offset(); 658 } else { // else shrink the filler to fit 659 filler = relocInfo(relocInfo::none, jump); 660 } 661 if (buf != nullptr) { 662 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 663 *(relocInfo*)(buf+buf_offset) = filler; 664 } 665 buf_offset += sizeof(filler); 666 } 667 668 // Update code point and end to skip past this section: 669 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 670 assert(code_point_so_far <= last_code_point, "sanity"); 671 code_point_so_far = last_code_point; // advance past this guy's relocs 672 } 673 code_end_so_far += csize; // advance past this guy's instructions too 674 675 // Done with filler; emit the real relocations: 676 if (buf != nullptr && lsize != 0) { 677 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 678 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 679 if (buf_offset % HeapWordSize == 0) { 680 // Use wordwise copies if possible: 681 Copy::disjoint_words((HeapWord*)lstart, 682 (HeapWord*)(buf+buf_offset), 683 (lsize + HeapWordSize-1) / HeapWordSize); 684 } else { 685 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 686 } 687 } 688 buf_offset += lsize; 689 } 690 691 // Align end of relocation info in target. 692 while (buf_offset % HeapWordSize != 0) { 693 if (buf != nullptr) { 694 relocInfo padding = relocInfo(relocInfo::none, 0); 695 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 696 *(relocInfo*)(buf+buf_offset) = padding; 697 } 698 buf_offset += sizeof(relocInfo); 699 } 700 701 assert(only_inst || code_end_so_far == total_content_size(), "sanity"); 702 703 return buf_offset; 704 } 705 706 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 707 address buf = nullptr; 708 csize_t buf_offset = 0; 709 csize_t buf_limit = 0; 710 711 if (dest != nullptr) { 712 buf = (address)dest->relocation_begin(); 713 buf_limit = (address)dest->relocation_end() - buf; 714 } 715 // if dest is null, this is just the sizing pass 716 // 717 buf_offset = copy_relocations_to(buf, buf_limit, false); 718 719 return buf_offset; 720 } 721 722 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 723 #ifndef PRODUCT 724 if (PrintNMethods && (WizardMode || Verbose)) { 725 tty->print("done with CodeBuffer:"); 726 ((CodeBuffer*)this)->print(); 727 } 728 #endif //PRODUCT 729 730 CodeBuffer dest(dest_blob); 731 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 732 this->compute_final_layout(&dest); 733 734 // Set beginning of constant table before relocating. 735 dest_blob->set_ctable_begin(dest.consts()->start()); 736 737 relocate_code_to(&dest); 738 739 // Share assembly remarks and debug strings with the blob. 740 NOT_PRODUCT(dest_blob->use_remarks(_asm_remarks)); 741 NOT_PRODUCT(dest_blob->use_strings(_dbg_strings)); 742 743 // Done moving code bytes; were they the right size? 744 assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 745 746 // Flush generated code 747 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 748 } 749 750 // Move all my code into another code buffer. Consult applicable 751 // relocs to repair embedded addresses. The layout in the destination 752 // CodeBuffer is different to the source CodeBuffer: the destination 753 // CodeBuffer gets the final layout (consts, insts, stubs in order of 754 // ascending address). 755 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 756 address dest_end = dest->_total_start + dest->_total_size; 757 address dest_filled = nullptr; 758 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 759 // pull code out of each section 760 const CodeSection* cs = code_section(n); 761 if (cs->is_empty()) continue; // skip trivial section 762 CodeSection* dest_cs = dest->code_section(n); 763 assert(cs->size() == dest_cs->size(), "sanity"); 764 csize_t usize = dest_cs->size(); 765 csize_t wsize = align_up(usize, HeapWordSize); 766 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 767 // Copy the code as aligned machine words. 768 // This may also include an uninitialized partial word at the end. 769 Copy::disjoint_words((HeapWord*)cs->start(), 770 (HeapWord*)dest_cs->start(), 771 wsize / HeapWordSize); 772 773 if (dest->blob() == nullptr) { 774 // Destination is a final resting place, not just another buffer. 775 // Normalize uninitialized bytes in the final padding. 776 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 777 Assembler::code_fill_byte()); 778 } 779 // Keep track of the highest filled address 780 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 781 782 assert(cs->locs_start() != (relocInfo*)badAddress, 783 "this section carries no reloc storage, but reloc was attempted"); 784 785 // Make the new code copy use the old copy's relocations: 786 dest_cs->initialize_locs_from(cs); 787 } 788 789 // Do relocation after all sections are copied. 790 // This is necessary if the code uses constants in stubs, which are 791 // relocated when the corresponding instruction in the code (e.g., a 792 // call) is relocated. Stubs are placed behind the main code 793 // section, so that section has to be copied before relocating. 794 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 795 CodeSection* dest_cs = dest->code_section(n); 796 if (dest_cs->is_empty() || (dest_cs->locs_count() == 0)) continue; // skip trivial section 797 { // Repair the pc relative information in the code after the move 798 RelocIterator iter(dest_cs); 799 while (iter.next()) { 800 iter.reloc()->fix_relocation_after_move(this, dest); 801 } 802 } 803 } 804 805 if (dest->blob() == nullptr && dest_filled != nullptr) { 806 // Destination is a final resting place, not just another buffer. 807 // Normalize uninitialized bytes in the final padding. 808 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 809 Assembler::code_fill_byte()); 810 811 } 812 } 813 814 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 815 csize_t amount, 816 csize_t* new_capacity) { 817 csize_t new_total_cap = 0; 818 819 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 820 const CodeSection* sect = code_section(n); 821 822 if (!sect->is_empty()) { 823 // Compute initial padding; assign it to the previous section, 824 // even if it's empty (e.g. consts section can be empty). 825 // Cf. compute_final_layout 826 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 827 if (padding != 0) { 828 new_total_cap += padding; 829 assert(n - 1 >= SECT_FIRST, "sanity"); 830 new_capacity[n - 1] += padding; 831 } 832 } 833 834 csize_t exp = sect->size(); // 100% increase 835 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 836 if (sect == which_cs) { 837 if (exp < amount) exp = amount; 838 if (StressCodeBuffers) exp = amount; // expand only slightly 839 } else if (n == SECT_INSTS) { 840 // scale down inst increases to a more modest 25% 841 exp = 4*K + ((exp - 4*K) >> 2); 842 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 843 } else if (sect->is_empty()) { 844 // do not grow an empty secondary section 845 exp = 0; 846 } 847 // Allow for inter-section slop: 848 exp += CodeSection::end_slop(); 849 csize_t new_cap = sect->size() + exp; 850 if (new_cap < sect->capacity()) { 851 // No need to expand after all. 852 new_cap = sect->capacity(); 853 } 854 new_capacity[n] = new_cap; 855 new_total_cap += new_cap; 856 } 857 858 return new_total_cap; 859 } 860 861 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 862 #ifndef PRODUCT 863 if (PrintNMethods && (WizardMode || Verbose)) { 864 tty->print("expanding CodeBuffer:"); 865 this->print(); 866 } 867 868 if (StressCodeBuffers && blob() != nullptr) { 869 static int expand_count = 0; 870 if (expand_count >= 0) expand_count += 1; 871 if (expand_count > 100 && is_power_of_2(expand_count)) { 872 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 873 // simulate an occasional allocation failure: 874 free_blob(); 875 } 876 } 877 #endif //PRODUCT 878 879 // Resizing must be allowed 880 { 881 if (blob() == nullptr) return; // caller must check if blob is null 882 } 883 884 // Figure new capacity for each section. 885 csize_t new_capacity[SECT_LIMIT]; 886 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT); 887 csize_t new_total_cap 888 = figure_expanded_capacities(which_cs, amount, new_capacity); 889 890 // Create a new (temporary) code buffer to hold all the new data 891 CodeBuffer cb(name(), new_total_cap, 0); 892 if (cb.blob() == nullptr) { 893 // Failed to allocate in code cache. 894 free_blob(); 895 return; 896 } 897 898 // Create an old code buffer to remember which addresses used to go where. 899 // This will be useful when we do final assembly into the code cache, 900 // because we will need to know how to warp any internal address that 901 // has been created at any time in this CodeBuffer's past. 902 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 903 bxp->take_over_code_from(this); // remember the old undersized blob 904 DEBUG_ONLY(this->_blob = nullptr); // silence a later assert 905 bxp->_before_expand = this->_before_expand; 906 this->_before_expand = bxp; 907 908 // Give each section its required (expanded) capacity. 909 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 910 CodeSection* cb_sect = cb.code_section(n); 911 CodeSection* this_sect = code_section(n); 912 if (new_capacity[n] == 0) continue; // already nulled out 913 if (n != SECT_INSTS) { 914 cb.initialize_section_size(cb_sect, new_capacity[n]); 915 } 916 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 917 address cb_start = cb_sect->start(); 918 cb_sect->set_end(cb_start + this_sect->size()); 919 if (this_sect->mark() == nullptr) { 920 cb_sect->clear_mark(); 921 } else { 922 cb_sect->set_mark(cb_start + this_sect->mark_off()); 923 } 924 } 925 926 // Needs to be initialized when calling fix_relocation_after_move. 927 cb.blob()->set_ctable_begin(cb.consts()->start()); 928 929 // Move all the code and relocations to the new blob: 930 relocate_code_to(&cb); 931 932 // some internal addresses, _last_insn _last_label, are used during code emission, 933 // adjust them in expansion 934 adjust_internal_address(insts_begin(), cb.insts_begin()); 935 936 // Copy the temporary code buffer into the current code buffer. 937 // Basically, do {*this = cb}, except for some control information. 938 this->take_over_code_from(&cb); 939 cb.set_blob(nullptr); 940 941 // Zap the old code buffer contents, to avoid mistakenly using them. 942 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 943 badCodeHeapFreeVal);) 944 945 // Make certain that the new sections are all snugly inside the new blob. 946 debug_only(verify_section_allocation();) 947 948 #ifndef PRODUCT 949 _decode_begin = nullptr; // sanity 950 if (PrintNMethods && (WizardMode || Verbose)) { 951 tty->print("expanded CodeBuffer:"); 952 this->print(); 953 } 954 #endif //PRODUCT 955 } 956 957 void CodeBuffer::adjust_internal_address(address from, address to) { 958 if (_last_insn != nullptr) { 959 _last_insn += to - from; 960 } 961 if (_last_label != nullptr) { 962 _last_label += to - from; 963 } 964 } 965 966 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 967 // Must already have disposed of the old blob somehow. 968 assert(blob() == nullptr, "must be empty"); 969 // Take the new blob away from cb. 970 set_blob(cb->blob()); 971 // Take over all the section pointers. 972 for (int n = 0; n < (int)SECT_LIMIT; n++) { 973 CodeSection* cb_sect = cb->code_section(n); 974 CodeSection* this_sect = code_section(n); 975 this_sect->take_over_code_from(cb_sect); 976 } 977 // Make sure the old cb won't try to use it or free it. 978 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 979 } 980 981 void CodeBuffer::verify_section_allocation() { 982 address tstart = _total_start; 983 if (tstart == badAddress) return; // smashed by set_blob(nullptr) 984 address tend = tstart + _total_size; 985 if (_blob != nullptr) { 986 guarantee(tstart >= _blob->content_begin(), "sanity"); 987 guarantee(tend <= _blob->content_end(), "sanity"); 988 } 989 // Verify disjointness. 990 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 991 CodeSection* sect = code_section(n); 992 if (!sect->is_allocated() || sect->is_empty()) { 993 continue; 994 } 995 guarantee(_blob == nullptr || is_aligned(sect->start(), sect->alignment()), 996 "start is aligned"); 997 for (int m = n + 1; m < (int) SECT_LIMIT; m++) { 998 CodeSection* other = code_section(m); 999 if (!other->is_allocated() || other == sect) { 1000 continue; 1001 } 1002 guarantee(other->disjoint(sect), "sanity"); 1003 } 1004 guarantee(sect->end() <= tend, "sanity"); 1005 guarantee(sect->end() <= sect->limit(), "sanity"); 1006 } 1007 } 1008 1009 void CodeBuffer::log_section_sizes(const char* name) { 1010 if (xtty != nullptr) { 1011 ttyLocker ttyl; 1012 // log info about buffer usage 1013 xtty->print_cr("<blob name='%s' total_size='%d'>", name, _total_size); 1014 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 1015 CodeSection* sect = code_section(n); 1016 if (!sect->is_allocated() || sect->is_empty()) continue; 1017 xtty->print_cr("<sect index='%d' capacity='%d' size='%d' remaining='%d'/>", 1018 n, sect->capacity(), sect->size(), sect->remaining()); 1019 } 1020 xtty->print_cr("</blob>"); 1021 } 1022 } 1023 1024 bool CodeBuffer::finalize_stubs() { 1025 if (_finalize_stubs && !pd_finalize_stubs()) { 1026 // stub allocation failure 1027 return false; 1028 } 1029 _finalize_stubs = false; 1030 return true; 1031 } 1032 1033 void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) { 1034 if (_shared_stub_to_interp_requests == nullptr) { 1035 _shared_stub_to_interp_requests = new SharedStubToInterpRequests(8); 1036 } 1037 SharedStubToInterpRequest request(callee, call_offset); 1038 _shared_stub_to_interp_requests->push(request); 1039 _finalize_stubs = true; 1040 } 1041 1042 #ifndef PRODUCT 1043 void CodeBuffer::block_comment(ptrdiff_t offset, const char* comment) { 1044 if (_collect_comments) { 1045 const char* str = _asm_remarks.insert(offset, comment); 1046 postcond(str != comment); 1047 } 1048 } 1049 1050 const char* CodeBuffer::code_string(const char* str) { 1051 const char* tmp = _dbg_strings.insert(str); 1052 postcond(tmp != str); 1053 return tmp; 1054 } 1055 1056 void CodeBuffer::decode() { 1057 ttyLocker ttyl; 1058 Disassembler::decode(decode_begin(), insts_end(), tty NOT_PRODUCT(COMMA &asm_remarks())); 1059 _decode_begin = insts_end(); 1060 } 1061 1062 void CodeSection::print(const char* name) { 1063 csize_t locs_size = locs_end() - locs_start(); 1064 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)", 1065 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity()); 1066 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1067 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1068 if (PrintRelocations && (locs_size != 0)) { 1069 RelocIterator iter(this); 1070 iter.print(); 1071 } 1072 } 1073 1074 void CodeBuffer::print() { 1075 tty->print_cr("CodeBuffer:"); 1076 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1077 // print each section 1078 CodeSection* cs = code_section(n); 1079 cs->print(code_section_name(n)); 1080 } 1081 } 1082 1083 // ----- CHeapString ----------------------------------------------------------- 1084 1085 class CHeapString : public CHeapObj<mtCode> { 1086 public: 1087 CHeapString(const char* str) : _string(os::strdup(str)) {} 1088 ~CHeapString() { 1089 os::free((void*)_string); 1090 _string = nullptr; 1091 } 1092 const char* string() const { return _string; } 1093 1094 private: 1095 const char* _string; 1096 }; 1097 1098 // ----- AsmRemarkCollection --------------------------------------------------- 1099 1100 class AsmRemarkCollection : public CHeapObj<mtCode> { 1101 public: 1102 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {} 1103 ~AsmRemarkCollection() { 1104 assert(is_empty(), "Must 'clear()' before deleting!"); 1105 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 1106 } 1107 AsmRemarkCollection* reuse() { 1108 precond(_ref_cnt > 0); 1109 return _ref_cnt++, this; 1110 } 1111 1112 const char* insert(uint offset, const char* remark); 1113 const char* lookup(uint offset) const; 1114 const char* next(uint offset) const; 1115 1116 bool is_empty() const { return _remarks == nullptr; } 1117 uint clear(); 1118 1119 private: 1120 struct Cell : CHeapString { 1121 Cell(const char* remark, uint offset) : 1122 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {} 1123 void push_back(Cell* cell) { 1124 Cell* head = this; 1125 Cell* tail = prev; 1126 tail->next = cell; 1127 cell->next = head; 1128 cell->prev = tail; 1129 prev = cell; 1130 } 1131 uint offset; 1132 Cell* prev; 1133 Cell* next; 1134 }; 1135 uint _ref_cnt; 1136 Cell* _remarks; 1137 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that 1138 // does not change the state of the list per se), supportig a simplistic 1139 // iteration scheme. 1140 mutable Cell* _next; 1141 }; 1142 1143 // ----- DbgStringCollection --------------------------------------------------- 1144 1145 class DbgStringCollection : public CHeapObj<mtCode> { 1146 public: 1147 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {} 1148 ~DbgStringCollection() { 1149 assert(is_empty(), "Must 'clear()' before deleting!"); 1150 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 1151 } 1152 DbgStringCollection* reuse() { 1153 precond(_ref_cnt > 0); 1154 return _ref_cnt++, this; 1155 } 1156 1157 const char* insert(const char* str); 1158 const char* lookup(const char* str) const; 1159 1160 bool is_empty() const { return _strings == nullptr; } 1161 uint clear(); 1162 1163 private: 1164 struct Cell : CHeapString { 1165 Cell(const char* dbgstr) : 1166 CHeapString(dbgstr), prev(nullptr), next(nullptr) {} 1167 void push_back(Cell* cell) { 1168 Cell* head = this; 1169 Cell* tail = prev; 1170 tail->next = cell; 1171 cell->next = head; 1172 cell->prev = tail; 1173 prev = cell; 1174 } 1175 Cell* prev; 1176 Cell* next; 1177 }; 1178 uint _ref_cnt; 1179 Cell* _strings; 1180 }; 1181 1182 // ----- AsmRemarks ------------------------------------------------------------ 1183 // 1184 // Acting as interface to reference counted mapping [offset -> remark], where 1185 // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or 1186 // other memory buffer) and remark is a string (comment). 1187 // 1188 AsmRemarks::AsmRemarks() : _remarks(new AsmRemarkCollection()) { 1189 assert(_remarks != nullptr, "Allocation failure!"); 1190 } 1191 1192 AsmRemarks::~AsmRemarks() { 1193 assert(_remarks == nullptr, "Must 'clear()' before deleting!"); 1194 } 1195 1196 const char* AsmRemarks::insert(uint offset, const char* remstr) { 1197 precond(remstr != nullptr); 1198 return _remarks->insert(offset, remstr); 1199 } 1200 1201 bool AsmRemarks::is_empty() const { 1202 return _remarks->is_empty(); 1203 } 1204 1205 void AsmRemarks::share(const AsmRemarks &src) { 1206 precond(is_empty()); 1207 clear(); 1208 _remarks = src._remarks->reuse(); 1209 } 1210 1211 void AsmRemarks::clear() { 1212 if (_remarks->clear() == 0) { 1213 delete _remarks; 1214 } 1215 _remarks = nullptr; 1216 } 1217 1218 uint AsmRemarks::print(uint offset, outputStream* strm) const { 1219 uint count = 0; 1220 const char* prefix = " ;; "; 1221 const char* remstr = _remarks->lookup(offset); 1222 while (remstr != nullptr) { 1223 strm->bol(); 1224 strm->print("%s", prefix); 1225 // Don't interpret as format strings since it could contain '%'. 1226 strm->print_raw(remstr); 1227 // Advance to next line iff string didn't contain a cr() at the end. 1228 strm->bol(); 1229 remstr = _remarks->next(offset); 1230 count++; 1231 } 1232 return count; 1233 } 1234 1235 // ----- DbgStrings ------------------------------------------------------------ 1236 // 1237 // Acting as interface to reference counted collection of (debug) strings used 1238 // in the code generated, and thus requiring a fixed address. 1239 // 1240 DbgStrings::DbgStrings() : _strings(new DbgStringCollection()) { 1241 assert(_strings != nullptr, "Allocation failure!"); 1242 } 1243 1244 DbgStrings::~DbgStrings() { 1245 assert(_strings == nullptr, "Must 'clear()' before deleting!"); 1246 } 1247 1248 const char* DbgStrings::insert(const char* dbgstr) { 1249 const char* str = _strings->lookup(dbgstr); 1250 return str != nullptr ? str : _strings->insert(dbgstr); 1251 } 1252 1253 bool DbgStrings::is_empty() const { 1254 return _strings->is_empty(); 1255 } 1256 1257 void DbgStrings::share(const DbgStrings &src) { 1258 precond(is_empty()); 1259 clear(); 1260 _strings = src._strings->reuse(); 1261 } 1262 1263 void DbgStrings::clear() { 1264 if (_strings->clear() == 0) { 1265 delete _strings; 1266 } 1267 _strings = nullptr; 1268 } 1269 1270 // ----- AsmRemarkCollection --------------------------------------------------- 1271 1272 const char* AsmRemarkCollection::insert(uint offset, const char* remstr) { 1273 precond(remstr != nullptr); 1274 Cell* cell = new Cell { remstr, offset }; 1275 if (is_empty()) { 1276 cell->prev = cell; 1277 cell->next = cell; 1278 _remarks = cell; 1279 } else { 1280 _remarks->push_back(cell); 1281 } 1282 return cell->string(); 1283 } 1284 1285 const char* AsmRemarkCollection::lookup(uint offset) const { 1286 _next = _remarks; 1287 return next(offset); 1288 } 1289 1290 const char* AsmRemarkCollection::next(uint offset) const { 1291 if (_next != nullptr) { 1292 Cell* i = _next; 1293 do { 1294 if (i->offset == offset) { 1295 _next = i->next == _remarks ? nullptr : i->next; 1296 return i->string(); 1297 } 1298 i = i->next; 1299 } while (i != _remarks); 1300 _next = nullptr; 1301 } 1302 return nullptr; 1303 } 1304 1305 uint AsmRemarkCollection::clear() { 1306 precond(_ref_cnt > 0); 1307 if (--_ref_cnt > 0) { 1308 return _ref_cnt; 1309 } 1310 if (!is_empty()) { 1311 uint count = 0; 1312 Cell* i = _remarks; 1313 do { 1314 Cell* next = i->next; 1315 delete i; 1316 i = next; 1317 count++; 1318 } while (i != _remarks); 1319 1320 log_debug(codestrings)("Clear %u asm-remark%s.", count, count == 1 ? "" : "s"); 1321 _remarks = nullptr; 1322 } 1323 return 0; // i.e. _ref_cnt == 0 1324 } 1325 1326 // ----- DbgStringCollection --------------------------------------------------- 1327 1328 const char* DbgStringCollection::insert(const char* dbgstr) { 1329 precond(dbgstr != nullptr); 1330 Cell* cell = new Cell { dbgstr }; 1331 1332 if (is_empty()) { 1333 cell->prev = cell; 1334 cell->next = cell; 1335 _strings = cell; 1336 } else { 1337 _strings->push_back(cell); 1338 } 1339 return cell->string(); 1340 } 1341 1342 const char* DbgStringCollection::lookup(const char* dbgstr) const { 1343 precond(dbgstr != nullptr); 1344 if (_strings != nullptr) { 1345 Cell* i = _strings; 1346 do { 1347 if (strcmp(i->string(), dbgstr) == 0) { 1348 return i->string(); 1349 } 1350 i = i->next; 1351 } while (i != _strings); 1352 } 1353 return nullptr; 1354 } 1355 1356 uint DbgStringCollection::clear() { 1357 precond(_ref_cnt > 0); 1358 if (--_ref_cnt > 0) { 1359 return _ref_cnt; 1360 } 1361 if (!is_empty()) { 1362 uint count = 0; 1363 Cell* i = _strings; 1364 do { 1365 Cell* next = i->next; 1366 delete i; 1367 i = next; 1368 count++; 1369 } while (i != _strings); 1370 1371 log_debug(codestrings)("Clear %u dbg-string%s.", count, count == 1 ? "" : "s"); 1372 _strings = nullptr; 1373 } 1374 return 0; // i.e. _ref_cnt == 0 1375 } 1376 1377 #endif // not PRODUCT