1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/codeBuffer.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/oopRecorder.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "logging/log.hpp"
  31 #include "oops/klass.inline.hpp"
  32 #include "oops/methodCounters.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/icache.hpp"
  36 #include "runtime/safepointVerifiers.hpp"
  37 #include "utilities/align.hpp"
  38 #include "utilities/copy.hpp"
  39 #include "utilities/powerOfTwo.hpp"
  40 #include "utilities/xmlstream.hpp"
  41 
  42 // The structure of a CodeSection:
  43 //
  44 //    _start ->           +----------------+
  45 //                        | machine code...|
  46 //    _end ->             |----------------|
  47 //                        |                |
  48 //                        |    (empty)     |
  49 //                        |                |
  50 //                        |                |
  51 //                        +----------------+
  52 //    _limit ->           |                |
  53 //
  54 //    _locs_start ->      +----------------+
  55 //                        |reloc records...|
  56 //                        |----------------|
  57 //    _locs_end ->        |                |
  58 //                        |                |
  59 //                        |    (empty)     |
  60 //                        |                |
  61 //                        |                |
  62 //                        +----------------+
  63 //    _locs_limit ->      |                |
  64 // The _end (resp. _limit) pointer refers to the first
  65 // unused (resp. unallocated) byte.
  66 
  67 // The structure of the CodeBuffer while code is being accumulated:
  68 //
  69 //    _total_start ->    \
  70 //    _consts._start ->             +----------------+
  71 //                                  |                |
  72 //                                  |   Constants    |
  73 //                                  |                |
  74 //    _insts._start ->              |----------------|
  75 //                                  |                |
  76 //                                  |     Code       |
  77 //                                  |                |
  78 //    _stubs._start ->              |----------------|
  79 //                                  |                |
  80 //                                  |    Stubs       | (also handlers for deopt/exception)
  81 //                                  |                |
  82 //                                  +----------------+
  83 //    + _total_size ->              |                |
  84 //
  85 // When the code and relocations are copied to the code cache,
  86 // the empty parts of each section are removed, and everything
  87 // is copied into contiguous locations.
  88 
  89 typedef CodeBuffer::csize_t csize_t;  // file-local definition
  90 
  91 // External buffer, in a predefined CodeBlob.
  92 // Important: The code_start must be taken exactly, and not realigned.
  93 CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
  94   // Provide code buffer with meaningful name
  95   initialize_misc(blob->name());
  96   initialize(blob->content_begin(), blob->content_size());
  97   DEBUG_ONLY(verify_section_allocation();)
  98 }
  99 
 100 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
 101   // Always allow for empty slop around each section.
 102   int slop = (int) CodeSection::end_slop();
 103 
 104   assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments");
 105   int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop;
 106 
 107   assert(blob() == nullptr, "only once");
 108   set_blob(BufferBlob::create(_name, total_size));
 109   if (blob() == nullptr) {
 110     // The assembler constructor will throw a fatal on an empty CodeBuffer.
 111     return;  // caller must test this
 112   }
 113 
 114   // Set up various pointers into the blob.
 115   initialize(_total_start, _total_size);
 116 
 117   assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
 118 
 119   pd_initialize();
 120 
 121   if (locs_size != 0) {
 122     _insts.initialize_locs(locs_size / sizeof(relocInfo));
 123   }
 124 
 125   DEBUG_ONLY(verify_section_allocation();)
 126 }
 127 
 128 
 129 CodeBuffer::~CodeBuffer() {
 130   verify_section_allocation();
 131 
 132   // If we allocated our code buffer from the CodeCache via a BufferBlob, and
 133   // it's not permanent, then free the BufferBlob.  The rest of the memory
 134   // will be freed when the ResourceObj is released.
 135   for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) {
 136     // Previous incarnations of this buffer are held live, so that internal
 137     // addresses constructed before expansions will not be confused.
 138     cb->free_blob();
 139   }
 140   if (_overflow_arena != nullptr) {
 141     // free any overflow storage
 142     delete _overflow_arena;
 143   }
 144   if (_shared_trampoline_requests != nullptr) {
 145     delete _shared_trampoline_requests;
 146   }
 147 
 148   NOT_PRODUCT(clear_strings());
 149 }
 150 
 151 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
 152   assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
 153   DEBUG_ONLY(_default_oop_recorder.freeze());  // force unused OR to be frozen
 154   _oop_recorder = r;
 155 }
 156 
 157 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
 158   assert(cs != &_insts, "insts is the memory provider, not the consumer");
 159   csize_t slop = CodeSection::end_slop();  // margin between sections
 160   int align = cs->alignment();
 161   assert(is_power_of_2(align), "sanity");
 162   address start  = _insts._start;
 163   address limit  = _insts._limit;
 164   address middle = limit - size;
 165   middle -= (intptr_t)middle & (align-1);  // align the division point downward
 166   guarantee(middle - slop > start, "need enough space to divide up");
 167   _insts._limit = middle - slop;  // subtract desired space, plus slop
 168   cs->initialize(middle, limit - middle);
 169   assert(cs->start() == middle, "sanity");
 170   assert(cs->limit() == limit,  "sanity");
 171   // give it some relocations to start with, if the main section has them
 172   if (_insts.has_locs())  cs->initialize_locs(1);
 173 }
 174 
 175 void CodeBuffer::set_blob(BufferBlob* blob) {
 176   _blob = blob;
 177   if (blob != nullptr) {
 178     address start = blob->content_begin();
 179     address end   = blob->content_end();
 180     // Round up the starting address.
 181     int align = _insts.alignment();
 182     start += (-(intptr_t)start) & (align-1);
 183     _total_start = start;
 184     _total_size  = end - start;
 185   } else {
 186 #ifdef ASSERT
 187     // Clean out dangling pointers.
 188     _total_start    = badAddress;
 189     _consts._start  = _consts._end  = badAddress;
 190     _insts._start   = _insts._end   = badAddress;
 191     _stubs._start   = _stubs._end   = badAddress;
 192 #endif //ASSERT
 193   }
 194 }
 195 
 196 void CodeBuffer::free_blob() {
 197   if (_blob != nullptr) {
 198     BufferBlob::free(_blob);
 199     set_blob(nullptr);
 200   }
 201 }
 202 
 203 const char* CodeBuffer::code_section_name(int n) {
 204 #ifdef PRODUCT
 205   return nullptr;
 206 #else //PRODUCT
 207   switch (n) {
 208   case SECT_CONSTS:            return "consts";
 209   case SECT_INSTS:             return "insts";
 210   case SECT_STUBS:             return "stubs";
 211   default:                     return nullptr;
 212   }
 213 #endif //PRODUCT
 214 }
 215 
 216 int CodeBuffer::section_index_of(address addr) const {
 217   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 218     const CodeSection* cs = code_section(n);
 219     if (cs->allocates(addr))  return n;
 220   }
 221   return SECT_NONE;
 222 }
 223 
 224 int CodeBuffer::locator(address addr) const {
 225   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 226     const CodeSection* cs = code_section(n);
 227     if (cs->allocates(addr)) {
 228       return locator(addr - cs->start(), n);
 229     }
 230   }
 231   return -1;
 232 }
 233 
 234 
 235 bool CodeBuffer::is_backward_branch(Label& L) {
 236   return L.is_bound() && insts_end() <= locator_address(L.loc());
 237 }
 238 
 239 #ifndef PRODUCT
 240 address CodeBuffer::decode_begin() {
 241   address begin = _insts.start();
 242   if (_decode_begin != nullptr && _decode_begin > begin)
 243     begin = _decode_begin;
 244   return begin;
 245 }
 246 #endif // !PRODUCT
 247 
 248 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
 249   if (_overflow_arena == nullptr) {
 250     _overflow_arena = new (mtCode) Arena(mtCode);
 251   }
 252   return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
 253 }
 254 
 255 
 256 // Helper function for managing labels and their target addresses.
 257 // Returns a sensible address, and if it is not the label's final
 258 // address, notes the dependency (at 'branch_pc') on the label.
 259 address CodeSection::target(Label& L, address branch_pc) {
 260   if (L.is_bound()) {
 261     int loc = L.loc();
 262     if (index() == CodeBuffer::locator_sect(loc)) {
 263       return start() + CodeBuffer::locator_pos(loc);
 264     } else {
 265       return outer()->locator_address(loc);
 266     }
 267   } else {
 268     assert(allocates2(branch_pc), "sanity");
 269     address base = start();
 270     int patch_loc = CodeBuffer::locator(branch_pc - base, index());
 271     L.add_patch_at(outer(), patch_loc);
 272 
 273     // Need to return a pc, doesn't matter what it is since it will be
 274     // replaced during resolution later.
 275     // Don't return null or badAddress, since branches shouldn't overflow.
 276     // Don't return base either because that could overflow displacements
 277     // for shorter branches.  It will get checked when bound.
 278     return branch_pc;
 279   }
 280 }
 281 
 282 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) {
 283   RelocationHolder rh;
 284   switch (rtype) {
 285     case relocInfo::none: return;
 286     case relocInfo::opt_virtual_call_type: {
 287       rh = opt_virtual_call_Relocation::spec(method_index);
 288       break;
 289     }
 290     case relocInfo::static_call_type: {
 291       rh = static_call_Relocation::spec(method_index);
 292       break;
 293     }
 294     case relocInfo::virtual_call_type: {
 295       assert(method_index == 0, "resolved method overriding is not supported");
 296       rh = Relocation::spec_simple(rtype);
 297       break;
 298     }
 299     default: {
 300       rh = Relocation::spec_simple(rtype);
 301       break;
 302     }
 303   }
 304   relocate(at, rh, format);
 305 }
 306 
 307 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
 308   // Do not relocate in scratch buffers.
 309   if (scratch_emit()) { return; }
 310   Relocation* reloc = spec.reloc();
 311   relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
 312   if (rtype == relocInfo::none)  return;
 313 
 314   // The assertion below has been adjusted, to also work for
 315   // relocation for fixup.  Sometimes we want to put relocation
 316   // information for the next instruction, since it will be patched
 317   // with a call.
 318   assert(start() <= at && at <= end()+1,
 319          "cannot relocate data outside code boundaries");
 320 
 321   if (!has_locs()) {
 322     // no space for relocation information provided => code cannot be
 323     // relocated.  Make sure that relocate is only called with rtypes
 324     // that can be ignored for this kind of code.
 325     assert(rtype == relocInfo::none              ||
 326            rtype == relocInfo::runtime_call_type ||
 327            rtype == relocInfo::internal_word_type||
 328            rtype == relocInfo::section_word_type ||
 329            rtype == relocInfo::external_word_type||
 330            rtype == relocInfo::barrier_type,
 331            "code needs relocation information");
 332     // leave behind an indication that we attempted a relocation
 333     DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
 334     return;
 335   }
 336 
 337   // Advance the point, noting the offset we'll have to record.
 338   csize_t offset = at - locs_point();
 339   set_locs_point(at);
 340 
 341   // Test for a couple of overflow conditions; maybe expand the buffer.
 342   relocInfo* end = locs_end();
 343   relocInfo* req = end + relocInfo::length_limit;
 344   // Check for (potential) overflow
 345   if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
 346     req += (uint)offset / (uint)relocInfo::offset_limit();
 347     if (req >= locs_limit()) {
 348       // Allocate or reallocate.
 349       expand_locs(locs_count() + (req - end));
 350       // reload pointer
 351       end = locs_end();
 352     }
 353   }
 354 
 355   // If the offset is giant, emit filler relocs, of type 'none', but
 356   // each carrying the largest possible offset, to advance the locs_point.
 357   while (offset >= relocInfo::offset_limit()) {
 358     assert(end < locs_limit(), "adjust previous paragraph of code");
 359     *end++ = relocInfo::filler_info();
 360     offset -= relocInfo::filler_info().addr_offset();
 361   }
 362 
 363   // If it's a simple reloc with no data, we'll just write (rtype | offset).
 364   (*end) = relocInfo(rtype, offset, format);
 365 
 366   // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
 367   end->initialize(this, reloc);
 368 }
 369 
 370 void CodeSection::initialize_locs(int locs_capacity) {
 371   assert(_locs_start == nullptr, "only one locs init step, please");
 372   // Apply a priori lower limits to relocation size:
 373   csize_t min_locs = MAX2(size() / 16, (csize_t)4);
 374   if (locs_capacity < min_locs)  locs_capacity = min_locs;
 375   relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
 376   _locs_start    = locs_start;
 377   _locs_end      = locs_start;
 378   _locs_limit    = locs_start + locs_capacity;
 379   _locs_own      = true;
 380 }
 381 
 382 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
 383   assert(_locs_start == nullptr, "do this before locs are allocated");
 384   // Internal invariant:  locs buf must be fully aligned.
 385   // See copy_relocations_to() below.
 386   while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
 387     ++buf; --length;
 388   }
 389   if (length > 0) {
 390     _locs_start = buf;
 391     _locs_end   = buf;
 392     _locs_limit = buf + length;
 393     _locs_own   = false;
 394   }
 395 }
 396 
 397 void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
 398   int lcount = source_cs->locs_count();
 399   if (lcount != 0) {
 400     initialize_shared_locs(source_cs->locs_start(), lcount);
 401     _locs_end = _locs_limit = _locs_start + lcount;
 402     assert(is_allocated(), "must have copied code already");
 403     set_locs_point(start() + source_cs->locs_point_off());
 404   }
 405   assert(this->locs_count() == source_cs->locs_count(), "sanity");
 406 }
 407 
 408 void CodeSection::expand_locs(int new_capacity) {
 409   if (_locs_start == nullptr) {
 410     initialize_locs(new_capacity);
 411     return;
 412   } else {
 413     int old_count    = locs_count();
 414     int old_capacity = locs_capacity();
 415     if (new_capacity < old_capacity * 2)
 416       new_capacity = old_capacity * 2;
 417     relocInfo* locs_start;
 418     if (_locs_own) {
 419       locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
 420     } else {
 421       locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
 422       Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
 423       _locs_own = true;
 424     }
 425     _locs_start    = locs_start;
 426     _locs_end      = locs_start + old_count;
 427     _locs_limit    = locs_start + new_capacity;
 428   }
 429 }
 430 
 431 int CodeSection::alignment() const {
 432   if (_index == CodeBuffer::SECT_CONSTS) {
 433     // CodeBuffer controls the alignment of the constants section
 434     return _outer->_const_section_alignment;
 435   }
 436   if (_index == CodeBuffer::SECT_INSTS) {
 437     return (int) CodeEntryAlignment;
 438   }
 439   if (_index == CodeBuffer::SECT_STUBS) {
 440     // CodeBuffer installer expects sections to be HeapWordSize aligned
 441     return HeapWordSize;
 442   }
 443   ShouldNotReachHere();
 444   return 0;
 445 }
 446 
 447 /// Support for emitting the code to its final location.
 448 /// The pattern is the same for all functions.
 449 /// We iterate over all the sections, padding each to alignment.
 450 
 451 csize_t CodeBuffer::total_content_size() const {
 452   csize_t size_so_far = 0;
 453   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 454     const CodeSection* cs = code_section(n);
 455     if (cs->is_empty())  continue;  // skip trivial section
 456     size_so_far = cs->align_at_start(size_so_far);
 457     size_so_far += cs->size();
 458   }
 459   return size_so_far;
 460 }
 461 
 462 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
 463   address buf = dest->_total_start;
 464   csize_t buf_offset = 0;
 465   assert(dest->_total_size >= total_content_size(), "must be big enough");
 466   assert(!_finalize_stubs, "non-finalized stubs");
 467 
 468   {
 469     // not sure why this is here, but why not...
 470     int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
 471     assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
 472   }
 473 
 474   const CodeSection* prev_cs      = nullptr;
 475   CodeSection*       prev_dest_cs = nullptr;
 476 
 477   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 478     // figure compact layout of each section
 479     const CodeSection* cs = code_section(n);
 480     csize_t csize = cs->size();
 481 
 482     CodeSection* dest_cs = dest->code_section(n);
 483     if (!cs->is_empty()) {
 484       // Compute initial padding; assign it to the previous non-empty guy.
 485       // Cf. figure_expanded_capacities.
 486       csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
 487       if (prev_dest_cs != nullptr) {
 488         if (padding != 0) {
 489           buf_offset += padding;
 490           prev_dest_cs->_limit += padding;
 491         }
 492       } else {
 493         guarantee(padding == 0, "In first iteration no padding should be needed.");
 494       }
 495       prev_dest_cs = dest_cs;
 496       prev_cs      = cs;
 497     }
 498 
 499     DEBUG_ONLY(dest_cs->_start = nullptr);  // defeat double-initialization assert
 500     dest_cs->initialize(buf+buf_offset, csize);
 501     dest_cs->set_end(buf+buf_offset+csize);
 502     assert(dest_cs->is_allocated(), "must always be allocated");
 503     assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
 504 
 505     buf_offset += csize;
 506   }
 507 
 508   // Done calculating sections; did it come out to the right end?
 509   assert(buf_offset == total_content_size(), "sanity");
 510   DEBUG_ONLY(dest->verify_section_allocation();)
 511 }
 512 
 513 // Append an oop reference that keeps the class alive.
 514 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
 515   oop cl = k->klass_holder();
 516   if (cl != nullptr && !oops->contains(cl)) {
 517     oops->append(cl);
 518   }
 519 }
 520 
 521 void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
 522   NoSafepointVerifier nsv;
 523 
 524   GrowableArray<oop> oops;
 525 
 526   // Make sure that immediate metadata records something in the OopRecorder
 527   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 528     // pull code out of each section
 529     CodeSection* cs = code_section(n);
 530     if (cs->is_empty() || (cs->locs_count() == 0)) continue;  // skip trivial section
 531     RelocIterator iter(cs);
 532     while (iter.next()) {
 533       if (iter.type() == relocInfo::metadata_type) {
 534         metadata_Relocation* md = iter.metadata_reloc();
 535         if (md->metadata_is_immediate()) {
 536           Metadata* m = md->metadata_value();
 537           if (oop_recorder()->is_real(m)) {
 538             if (m->is_methodData()) {
 539               m = ((MethodData*)m)->method();
 540             }
 541             if (m->is_methodCounters()) {
 542               m = ((MethodCounters*)m)->method();
 543             }
 544             if (m->is_method()) {
 545               m = ((Method*)m)->method_holder();
 546             }
 547             if (m->is_klass()) {
 548               append_oop_references(&oops, (Klass*)m);
 549             } else {
 550               // XXX This will currently occur for MDO which don't
 551               // have a backpointer.  This has to be fixed later.
 552               m->print();
 553               ShouldNotReachHere();
 554             }
 555           }
 556         }
 557       }
 558     }
 559   }
 560 
 561   if (!oop_recorder()->is_unused()) {
 562     for (int i = 0; i < oop_recorder()->metadata_count(); i++) {
 563       Metadata* m = oop_recorder()->metadata_at(i);
 564       if (oop_recorder()->is_real(m)) {
 565         if (m->is_methodData()) {
 566           m = ((MethodData*)m)->method();
 567         }
 568         if (m->is_methodCounters()) {
 569           m = ((MethodCounters*)m)->method();
 570         }
 571         if (m->is_method()) {
 572           m = ((Method*)m)->method_holder();
 573         }
 574         if (m->is_klass()) {
 575           append_oop_references(&oops, (Klass*)m);
 576         } else {
 577           m->print();
 578           ShouldNotReachHere();
 579         }
 580       }
 581     }
 582 
 583   }
 584 
 585   // Add the class loader of Method* for the nmethod itself
 586   append_oop_references(&oops, mh->method_holder());
 587 
 588   // Add any oops that we've found
 589   Thread* thread = Thread::current();
 590   for (int i = 0; i < oops.length(); i++) {
 591     oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i)));
 592   }
 593 }
 594 
 595 
 596 
 597 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
 598   csize_t size_so_far = 0;
 599   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 600     const CodeSection* cur_cs = code_section(n);
 601     if (!cur_cs->is_empty()) {
 602       size_so_far = cur_cs->align_at_start(size_so_far);
 603     }
 604     if (cur_cs->index() == cs->index()) {
 605       return size_so_far;
 606     }
 607     size_so_far += cur_cs->size();
 608   }
 609   ShouldNotReachHere();
 610   return -1;
 611 }
 612 
 613 int CodeBuffer::total_skipped_instructions_size() const {
 614   int total_skipped_size = 0;
 615   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 616     const CodeSection* cur_cs = code_section(n);
 617     if (!cur_cs->is_empty()) {
 618       total_skipped_size += cur_cs->_skipped_instructions_size;
 619     }
 620   }
 621   return total_skipped_size;
 622 }
 623 
 624 csize_t CodeBuffer::total_relocation_size() const {
 625   csize_t total = copy_relocations_to(nullptr);  // dry run only
 626   return (csize_t) align_up(total, HeapWordSize);
 627 }
 628 
 629 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const {
 630   csize_t buf_offset = 0;
 631   csize_t code_end_so_far = 0;
 632   csize_t code_point_so_far = 0;
 633 
 634   assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
 635   assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
 636 
 637   for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
 638     if (only_inst && (n != (int)SECT_INSTS)) {
 639       // Need only relocation info for code.
 640       continue;
 641     }
 642     // pull relocs out of each section
 643     const CodeSection* cs = code_section(n);
 644     assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
 645     if (cs->is_empty())  continue;  // skip trivial section
 646     relocInfo* lstart = cs->locs_start();
 647     relocInfo* lend   = cs->locs_end();
 648     csize_t    lsize  = (csize_t)( (address)lend - (address)lstart );
 649     csize_t    csize  = cs->size();
 650     code_end_so_far = cs->align_at_start(code_end_so_far);
 651 
 652     if (lsize > 0) {
 653       // Figure out how to advance the combined relocation point
 654       // first to the beginning of this section.
 655       // We'll insert one or more filler relocs to span that gap.
 656       // (Don't bother to improve this by editing the first reloc's offset.)
 657       csize_t new_code_point = code_end_so_far;
 658       for (csize_t jump;
 659            code_point_so_far < new_code_point;
 660            code_point_so_far += jump) {
 661         jump = new_code_point - code_point_so_far;
 662         relocInfo filler = relocInfo::filler_info();
 663         if (jump >= filler.addr_offset()) {
 664           jump = filler.addr_offset();
 665         } else {  // else shrink the filler to fit
 666           filler = relocInfo(relocInfo::none, jump);
 667         }
 668         if (buf != nullptr) {
 669           assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
 670           *(relocInfo*)(buf+buf_offset) = filler;
 671         }
 672         buf_offset += sizeof(filler);
 673       }
 674 
 675       // Update code point and end to skip past this section:
 676       csize_t last_code_point = code_end_so_far + cs->locs_point_off();
 677       assert(code_point_so_far <= last_code_point, "sanity");
 678       code_point_so_far = last_code_point; // advance past this guy's relocs
 679     }
 680     code_end_so_far += csize;  // advance past this guy's instructions too
 681 
 682     // Done with filler; emit the real relocations:
 683     if (buf != nullptr && lsize != 0) {
 684       assert(buf_offset + lsize <= buf_limit, "target in bounds");
 685       assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
 686       if (buf_offset % HeapWordSize == 0) {
 687         // Use wordwise copies if possible:
 688         Copy::disjoint_words((HeapWord*)lstart,
 689                              (HeapWord*)(buf+buf_offset),
 690                              (lsize + HeapWordSize-1) / HeapWordSize);
 691       } else {
 692         Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
 693       }
 694     }
 695     buf_offset += lsize;
 696   }
 697 
 698   // Align end of relocation info in target.
 699   while (buf_offset % HeapWordSize != 0) {
 700     if (buf != nullptr) {
 701       relocInfo padding = relocInfo(relocInfo::none, 0);
 702       assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
 703       *(relocInfo*)(buf+buf_offset) = padding;
 704     }
 705     buf_offset += sizeof(relocInfo);
 706   }
 707 
 708   assert(only_inst || code_end_so_far == total_content_size(), "sanity");
 709 
 710   return buf_offset;
 711 }
 712 
 713 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
 714   address buf = nullptr;
 715   csize_t buf_offset = 0;
 716   csize_t buf_limit = 0;
 717 
 718   if (dest != nullptr) {
 719     buf = (address)dest->relocation_begin();
 720     buf_limit = (address)dest->relocation_end() - buf;
 721   }
 722   // if dest is null, this is just the sizing pass
 723   //
 724   buf_offset = copy_relocations_to(buf, buf_limit, false);
 725 
 726   return buf_offset;
 727 }
 728 
 729 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
 730 #ifndef PRODUCT
 731   if (PrintNMethods && (WizardMode || Verbose)) {
 732     tty->print("done with CodeBuffer:");
 733     ((CodeBuffer*)this)->print_on(tty);
 734   }
 735 #endif //PRODUCT
 736 
 737   CodeBuffer dest(dest_blob);
 738   assert(dest_blob->content_size() >= total_content_size(), "good sizing");
 739   this->compute_final_layout(&dest);
 740 
 741   // Set beginning of constant table before relocating.
 742   dest_blob->set_ctable_begin(dest.consts()->start());
 743 
 744   relocate_code_to(&dest);
 745 
 746   // Share assembly remarks and debug strings with the blob.
 747   NOT_PRODUCT(dest_blob->use_remarks(_asm_remarks));
 748   NOT_PRODUCT(dest_blob->use_strings(_dbg_strings));
 749 
 750   // Done moving code bytes; were they the right size?
 751   assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
 752 
 753   // Flush generated code
 754   ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
 755 }
 756 
 757 // Move all my code into another code buffer.  Consult applicable
 758 // relocs to repair embedded addresses.  The layout in the destination
 759 // CodeBuffer is different to the source CodeBuffer: the destination
 760 // CodeBuffer gets the final layout (consts, insts, stubs in order of
 761 // ascending address).
 762 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
 763   address dest_end = dest->_total_start + dest->_total_size;
 764   address dest_filled = nullptr;
 765   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 766     // pull code out of each section
 767     const CodeSection* cs = code_section(n);
 768     if (cs->is_empty())  continue;  // skip trivial section
 769     CodeSection* dest_cs = dest->code_section(n);
 770     assert(cs->size() == dest_cs->size(), "sanity");
 771     csize_t usize = dest_cs->size();
 772     csize_t wsize = align_up(usize, HeapWordSize);
 773     assert(dest_cs->start() + wsize <= dest_end, "no overflow");
 774     // Copy the code as aligned machine words.
 775     // This may also include an uninitialized partial word at the end.
 776     Copy::disjoint_words((HeapWord*)cs->start(),
 777                          (HeapWord*)dest_cs->start(),
 778                          wsize / HeapWordSize);
 779 
 780     if (dest->blob() == nullptr) {
 781       // Destination is a final resting place, not just another buffer.
 782       // Normalize uninitialized bytes in the final padding.
 783       Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
 784                           Assembler::code_fill_byte());
 785     }
 786     // Keep track of the highest filled address
 787     dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining());
 788 
 789     assert(cs->locs_start() != (relocInfo*)badAddress,
 790            "this section carries no reloc storage, but reloc was attempted");
 791 
 792     // Make the new code copy use the old copy's relocations:
 793     dest_cs->initialize_locs_from(cs);
 794   }
 795 
 796   // Do relocation after all sections are copied.
 797   // This is necessary if the code uses constants in stubs, which are
 798   // relocated when the corresponding instruction in the code (e.g., a
 799   // call) is relocated. Stubs are placed behind the main code
 800   // section, so that section has to be copied before relocating.
 801   for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
 802     CodeSection* dest_cs = dest->code_section(n);
 803     if (dest_cs->is_empty() || (dest_cs->locs_count() == 0)) continue;  // skip trivial section
 804     { // Repair the pc relative information in the code after the move
 805       RelocIterator iter(dest_cs);
 806       while (iter.next()) {
 807         iter.reloc()->fix_relocation_after_move(this, dest);
 808       }
 809     }
 810   }
 811 
 812   if (dest->blob() == nullptr && dest_filled != nullptr) {
 813     // Destination is a final resting place, not just another buffer.
 814     // Normalize uninitialized bytes in the final padding.
 815     Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
 816                         Assembler::code_fill_byte());
 817 
 818   }
 819 }
 820 
 821 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
 822                                                csize_t amount,
 823                                                csize_t* new_capacity) {
 824   csize_t new_total_cap = 0;
 825 
 826   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 827     const CodeSection* sect = code_section(n);
 828 
 829     if (!sect->is_empty()) {
 830       // Compute initial padding; assign it to the previous section,
 831       // even if it's empty (e.g. consts section can be empty).
 832       // Cf. compute_final_layout
 833       csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
 834       if (padding != 0) {
 835         new_total_cap += padding;
 836         assert(n - 1 >= SECT_FIRST, "sanity");
 837         new_capacity[n - 1] += padding;
 838       }
 839     }
 840 
 841     csize_t exp = sect->size();  // 100% increase
 842     if ((uint)exp < 4*K)  exp = 4*K;       // minimum initial increase
 843     if (sect == which_cs) {
 844       if (exp < amount)  exp = amount;
 845       if (StressCodeBuffers)  exp = amount;  // expand only slightly
 846     } else if (n == SECT_INSTS) {
 847       // scale down inst increases to a more modest 25%
 848       exp = 4*K + ((exp - 4*K) >> 2);
 849       if (StressCodeBuffers)  exp = amount / 2;  // expand only slightly
 850     } else if (sect->is_empty()) {
 851       // do not grow an empty secondary section
 852       exp = 0;
 853     }
 854     // Allow for inter-section slop:
 855     exp += CodeSection::end_slop();
 856     csize_t new_cap = sect->size() + exp;
 857     if (new_cap < sect->capacity()) {
 858       // No need to expand after all.
 859       new_cap = sect->capacity();
 860     }
 861     new_capacity[n] = new_cap;
 862     new_total_cap += new_cap;
 863   }
 864 
 865   return new_total_cap;
 866 }
 867 
 868 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
 869 #ifndef PRODUCT
 870   if (PrintNMethods && (WizardMode || Verbose)) {
 871     tty->print("expanding CodeBuffer:");
 872     this->print_on(tty);
 873   }
 874 
 875   if (StressCodeBuffers && blob() != nullptr) {
 876     static int expand_count = 0;
 877     if (expand_count >= 0)  expand_count += 1;
 878     if (expand_count > 100 && is_power_of_2(expand_count)) {
 879       tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
 880       // simulate an occasional allocation failure:
 881       free_blob();
 882     }
 883   }
 884 #endif //PRODUCT
 885 
 886   // Resizing must be allowed
 887   {
 888     if (blob() == nullptr)  return;  // caller must check if blob is null
 889   }
 890 
 891   // Figure new capacity for each section.
 892   csize_t new_capacity[SECT_LIMIT];
 893   memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT);
 894   csize_t new_total_cap
 895     = figure_expanded_capacities(which_cs, amount, new_capacity);
 896 
 897   // Create a new (temporary) code buffer to hold all the new data
 898   CodeBuffer cb(name(), new_total_cap, 0);
 899   cb.set_const_section_alignment(_const_section_alignment);
 900   if (cb.blob() == nullptr) {
 901     // Failed to allocate in code cache.
 902     free_blob();
 903     return;
 904   }
 905 
 906   // Create an old code buffer to remember which addresses used to go where.
 907   // This will be useful when we do final assembly into the code cache,
 908   // because we will need to know how to warp any internal address that
 909   // has been created at any time in this CodeBuffer's past.
 910   CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
 911   bxp->take_over_code_from(this);  // remember the old undersized blob
 912   DEBUG_ONLY(this->_blob = nullptr);  // silence a later assert
 913   bxp->_before_expand = this->_before_expand;
 914   this->_before_expand = bxp;
 915 
 916   // Give each section its required (expanded) capacity.
 917   for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) {
 918     CodeSection* cb_sect   = cb.code_section(n);
 919     CodeSection* this_sect = code_section(n);
 920     if (new_capacity[n] == 0)  continue;  // already nulled out
 921     if (n != SECT_INSTS) {
 922       cb.initialize_section_size(cb_sect, new_capacity[n]);
 923     }
 924     assert(cb_sect->capacity() >= new_capacity[n], "big enough");
 925     address cb_start = cb_sect->start();
 926     cb_sect->set_end(cb_start + this_sect->size());
 927     if (this_sect->mark() == nullptr) {
 928       cb_sect->clear_mark();
 929     } else {
 930       cb_sect->set_mark(cb_start + this_sect->mark_off());
 931     }
 932   }
 933 
 934   // Needs to be initialized when calling fix_relocation_after_move.
 935   cb.blob()->set_ctable_begin(cb.consts()->start());
 936 
 937   // Move all the code and relocations to the new blob:
 938   relocate_code_to(&cb);
 939 
 940   // some internal addresses, _last_insn _last_label, are used during code emission,
 941   // adjust them in expansion
 942   adjust_internal_address(insts_begin(), cb.insts_begin());
 943 
 944   // Copy the temporary code buffer into the current code buffer.
 945   // Basically, do {*this = cb}, except for some control information.
 946   this->take_over_code_from(&cb);
 947   cb.set_blob(nullptr);
 948 
 949   // Zap the old code buffer contents, to avoid mistakenly using them.
 950   DEBUG_ONLY(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
 951                                  badCodeHeapFreeVal);)
 952 
 953   // Make certain that the new sections are all snugly inside the new blob.
 954   DEBUG_ONLY(verify_section_allocation();)
 955 
 956 #ifndef PRODUCT
 957   _decode_begin = nullptr;  // sanity
 958   if (PrintNMethods && (WizardMode || Verbose)) {
 959     tty->print("expanded CodeBuffer:");
 960     this->print_on(tty);
 961   }
 962 #endif //PRODUCT
 963 }
 964 
 965 void CodeBuffer::adjust_internal_address(address from, address to) {
 966   if (_last_insn != nullptr) {
 967     _last_insn += to - from;
 968   }
 969   if (_last_label != nullptr) {
 970     _last_label += to - from;
 971   }
 972 }
 973 
 974 void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
 975   // Must already have disposed of the old blob somehow.
 976   assert(blob() == nullptr, "must be empty");
 977   // Take the new blob away from cb.
 978   set_blob(cb->blob());
 979   // Take over all the section pointers.
 980   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 981     CodeSection* cb_sect   = cb->code_section(n);
 982     CodeSection* this_sect = code_section(n);
 983     this_sect->take_over_code_from(cb_sect);
 984   }
 985   // Make sure the old cb won't try to use it or free it.
 986   DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
 987 }
 988 
 989 void CodeBuffer::verify_section_allocation() {
 990   address tstart = _total_start;
 991   if (tstart == nullptr) return;  // ignore not fully initialized buffer
 992   if (tstart == badAddress)  return;  // smashed by set_blob(nullptr)
 993   address tend   = tstart + _total_size;
 994   if (_blob != nullptr) {
 995     guarantee(tstart >= _blob->content_begin(), "sanity");
 996     guarantee(tend   <= _blob->content_end(),   "sanity");
 997   }
 998   // Verify disjointness.
 999   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
1000     CodeSection* sect = code_section(n);
1001     if (!sect->is_allocated() || sect->is_empty()) {
1002       continue;
1003     }
1004     guarantee(_blob == nullptr || is_aligned(sect->start(), sect->alignment()),
1005            "start is aligned");
1006     for (int m = n + 1; m < (int) SECT_LIMIT; m++) {
1007       CodeSection* other = code_section(m);
1008       if (!other->is_allocated() || other == sect) {
1009         continue;
1010       }
1011       guarantee(other->disjoint(sect), "sanity");
1012     }
1013     guarantee(sect->end() <= tend, "sanity, sect_end: " PTR_FORMAT " tend: " PTR_FORMAT " size: %d", p2i(sect->end()), p2i(tend), (int)_total_size);
1014     guarantee(sect->end() <= sect->limit(), "sanity, sect_end: " PTR_FORMAT " sect_limit: " PTR_FORMAT, p2i(sect->end()), p2i(sect->limit()));
1015   }
1016 }
1017 
1018 void CodeBuffer::log_section_sizes(const char* name) {
1019   if (xtty != nullptr) {
1020     ttyLocker ttyl;
1021     // log info about buffer usage
1022     xtty->head("blob name='%s' total_size='%d'", name, _total_size);
1023     for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
1024       CodeSection* sect = code_section(n);
1025       if (!sect->is_allocated() || sect->is_empty())  continue;
1026       xtty->elem("sect index='%d' capacity='%d' size='%d' remaining='%d'",
1027                  n, sect->capacity(), sect->size(), sect->remaining());
1028     }
1029     xtty->tail("blob");
1030   }
1031 }
1032 
1033 bool CodeBuffer::finalize_stubs() {
1034   if (_finalize_stubs && !pd_finalize_stubs()) {
1035     // stub allocation failure
1036     return false;
1037   }
1038   _finalize_stubs = false;
1039   return true;
1040 }
1041 
1042 void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) {
1043   if (_shared_stub_to_interp_requests == nullptr) {
1044     _shared_stub_to_interp_requests = new SharedStubToInterpRequests(8);
1045   }
1046   SharedStubToInterpRequest request(callee, call_offset);
1047   _shared_stub_to_interp_requests->push(request);
1048   _finalize_stubs = true;
1049 }
1050 
1051 #ifndef PRODUCT
1052 void CodeBuffer::block_comment(ptrdiff_t offset, const char* comment) {
1053   if (insts()->scratch_emit()) {
1054     return;
1055   }
1056   if (_collect_comments) {
1057     const char* str = _asm_remarks.insert(offset, comment);
1058     postcond(str != comment);
1059   }
1060 }
1061 
1062 const char* CodeBuffer::code_string(const char* str) {
1063   if (insts()->scratch_emit()) {
1064     return str;
1065   }
1066   const char* tmp = _dbg_strings.insert(str);
1067   postcond(tmp != str);
1068   return tmp;
1069 }
1070 
1071 void CodeBuffer::decode() {
1072   ttyLocker ttyl;
1073   Disassembler::decode(decode_begin(), insts_end(), tty NOT_PRODUCT(COMMA &asm_remarks()));
1074   _decode_begin = insts_end();
1075 }
1076 
1077 void CodeSection::print_on(outputStream* st, const char* name) {
1078   csize_t locs_size = locs_end() - locs_start();
1079   st->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)",
1080                 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity());
1081   st->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1082                 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off());
1083   if (PrintRelocations && (locs_size != 0)) {
1084     RelocIterator iter(this);
1085     iter.print_on(st);
1086   }
1087 }
1088 
1089 void CodeBuffer::print_on(outputStream* st) {
1090   st->print_cr("CodeBuffer:%s", name());
1091   for (int n = 0; n < (int)SECT_LIMIT; n++) {
1092     // print each section
1093     CodeSection* cs = code_section(n);
1094     cs->print_on(st, code_section_name(n));
1095   }
1096 }
1097 
1098 CHeapString::~CHeapString() {
1099   os::free((void*)_string);
1100   _string = nullptr;
1101 }
1102 
1103 // ----- AsmRemarks ------------------------------------------------------------
1104 //
1105 // Acting as interface to reference counted mapping [offset -> remark], where
1106 // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or
1107 // other memory buffer) and remark is a string (comment).
1108 //
1109 AsmRemarks::AsmRemarks() : _remarks(new AsmRemarkCollection()) {
1110   assert(_remarks != nullptr, "Allocation failure!");
1111 }
1112 
1113 AsmRemarks::~AsmRemarks() {
1114   if (_remarks != nullptr) {
1115     clear();
1116   }
1117   assert(_remarks == nullptr, "Must 'clear()' before deleting!");
1118 }
1119 
1120 void AsmRemarks::init(AsmRemarks& asm_remarks) {
1121   asm_remarks._remarks = new AsmRemarkCollection();
1122 }
1123 
1124 const char* AsmRemarks::insert(uint offset, const char* remstr) {
1125   precond(remstr != nullptr);
1126   return _remarks->insert(offset, remstr);
1127 }
1128 
1129 bool AsmRemarks::is_empty() const {
1130   return _remarks->is_empty();
1131 }
1132 
1133 void AsmRemarks::share(const AsmRemarks &src) {
1134   precond(_remarks == nullptr || is_empty());
1135   clear();
1136   _remarks = src._remarks->reuse();
1137 }
1138 
1139 void AsmRemarks::clear() {
1140   if (_remarks != nullptr && _remarks->clear() == 0) {
1141     delete _remarks;
1142   }
1143   _remarks = nullptr;
1144 }
1145 
1146 uint AsmRemarks::print(uint offset, outputStream* strm) const {
1147   uint count = 0;
1148   const char* prefix = " ;; ";
1149   const char* remstr = _remarks->lookup(offset);
1150   while (remstr != nullptr) {
1151     strm->bol();
1152     strm->print("%s", prefix);
1153     // Don't interpret as format strings since it could contain '%'.
1154     strm->print_raw(remstr);
1155     // Advance to next line iff string didn't contain a cr() at the end.
1156     strm->bol();
1157     remstr = _remarks->next(offset);
1158     count++;
1159   }
1160   return count;
1161 }
1162 
1163 // ----- DbgStrings ------------------------------------------------------------
1164 //
1165 // Acting as interface to reference counted collection of (debug) strings used
1166 // in the code generated, and thus requiring a fixed address.
1167 //
1168 DbgStrings::DbgStrings() : _strings(new DbgStringCollection()) {
1169   assert(_strings != nullptr, "Allocation failure!");
1170 }
1171 
1172 DbgStrings::~DbgStrings() {
1173   if (_strings != nullptr) {
1174     clear();
1175   }
1176   assert(_strings == nullptr, "Must 'clear()' before deleting!");
1177 }
1178 
1179 void DbgStrings::init(DbgStrings& dbg_strings) {
1180   dbg_strings._strings = new DbgStringCollection();
1181 }
1182 
1183 const char* DbgStrings::insert(const char* dbgstr) {
1184   const char* str = _strings->lookup(dbgstr);
1185   return str != nullptr ? str : _strings->insert(dbgstr);
1186 }
1187 
1188 bool DbgStrings::is_empty() const {
1189   return _strings->is_empty();
1190 }
1191 
1192 void DbgStrings::share(const DbgStrings &src) {
1193   precond(_strings == nullptr || is_empty());
1194   clear();
1195   _strings = src._strings->reuse();
1196 }
1197 
1198 void DbgStrings::clear() {
1199   if (_strings != nullptr && _strings->clear() == 0) {
1200     delete _strings;
1201   }
1202   _strings = nullptr;
1203 }
1204 
1205 // ----- AsmRemarkCollection ---------------------------------------------------
1206 
1207 const char* AsmRemarkCollection::insert(uint offset, const char* remstr) {
1208   precond(remstr != nullptr);
1209   Cell* cell = new Cell { remstr, offset };
1210   if (is_empty()) {
1211     cell->prev = cell;
1212     cell->next = cell;
1213     _remarks = cell;
1214   } else {
1215     _remarks->push_back(cell);
1216   }
1217   return cell->string();
1218 }
1219 
1220 const char* AsmRemarkCollection::lookup(uint offset) const {
1221   _next = _remarks;
1222   return next(offset);
1223 }
1224 
1225 const char* AsmRemarkCollection::next(uint offset) const {
1226   if (_next != nullptr) {
1227     Cell* i = _next;
1228     do {
1229       if (i->offset == offset) {
1230         _next = i->next == _remarks ? nullptr : i->next;
1231         return i->string();
1232       }
1233       i = i->next;
1234     } while (i != _remarks);
1235     _next = nullptr;
1236   }
1237   return nullptr;
1238 }
1239 
1240 uint AsmRemarkCollection::clear() {
1241   precond(_ref_cnt > 0);
1242   if (--_ref_cnt > 0) {
1243     return _ref_cnt;
1244   }
1245   if (!is_empty()) {
1246     uint count = 0;
1247     Cell* i = _remarks;
1248     do {
1249       Cell* next = i->next;
1250       delete i;
1251       i = next;
1252       count++;
1253     } while (i != _remarks);
1254 
1255     log_debug(codestrings)("Clear %u asm-remark%s.", count, count == 1 ? "" : "s");
1256     _remarks = nullptr;
1257   }
1258   return 0; // i.e. _ref_cnt == 0
1259 }
1260 
1261 // ----- DbgStringCollection ---------------------------------------------------
1262 
1263 const char* DbgStringCollection::insert(const char* dbgstr) {
1264   precond(dbgstr != nullptr);
1265   Cell* cell = new Cell { dbgstr };
1266 
1267   if (is_empty()) {
1268      cell->prev = cell;
1269      cell->next = cell;
1270      _strings = cell;
1271   } else {
1272     _strings->push_back(cell);
1273   }
1274   return cell->string();
1275 }
1276 
1277 const char* DbgStringCollection::lookup(const char* dbgstr) const {
1278   precond(dbgstr != nullptr);
1279   if (_strings != nullptr) {
1280     Cell* i = _strings;
1281     do {
1282       if (strcmp(i->string(), dbgstr) == 0) {
1283         return i->string();
1284       }
1285       i = i->next;
1286     } while (i != _strings);
1287   }
1288   return nullptr;
1289 }
1290 
1291 uint DbgStringCollection::clear() {
1292   precond(_ref_cnt > 0);
1293   if (--_ref_cnt > 0) {
1294     return _ref_cnt;
1295   }
1296   if (!is_empty()) {
1297     uint count = 0;
1298     Cell* i = _strings;
1299     do {
1300       Cell* next = i->next;
1301       delete i;
1302       i = next;
1303       count++;
1304     } while (i != _strings);
1305 
1306     log_debug(codestrings)("Clear %u dbg-string%s.", count, count == 1 ? "" : "s");
1307     _strings = nullptr;
1308   }
1309   return 0; // i.e. _ref_cnt == 0
1310 }
1311 
1312 #endif // not PRODUCT