1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/codeBuffer.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/oopRecorder.inline.hpp"
  28 #include "compiler/disassembler.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/klass.inline.hpp"
  31 #include "oops/methodData.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/icache.hpp"
  34 #include "runtime/safepointVerifiers.hpp"
  35 #include "utilities/align.hpp"
  36 #include "utilities/copy.hpp"
  37 #include "utilities/powerOfTwo.hpp"
  38 #include "utilities/xmlstream.hpp"
  39 
  40 // The structure of a CodeSection:
  41 //
  42 //    _start ->           +----------------+
  43 //                        | machine code...|
  44 //    _end ->             |----------------|
  45 //                        |                |
  46 //                        |    (empty)     |
  47 //                        |                |
  48 //                        |                |
  49 //                        +----------------+
  50 //    _limit ->           |                |
  51 //
  52 //    _locs_start ->      +----------------+
  53 //                        |reloc records...|
  54 //                        |----------------|
  55 //    _locs_end ->        |                |
  56 //                        |                |
  57 //                        |    (empty)     |
  58 //                        |                |
  59 //                        |                |
  60 //                        +----------------+
  61 //    _locs_limit ->      |                |
  62 // The _end (resp. _limit) pointer refers to the first
  63 // unused (resp. unallocated) byte.
  64 
  65 // The structure of the CodeBuffer while code is being accumulated:
  66 //
  67 //    _total_start ->    \
  68 //    _consts._start ->             +----------------+
  69 //                                  |                |
  70 //                                  |   Constants    |
  71 //                                  |                |
  72 //    _insts._start ->              |----------------|
  73 //                                  |                |
  74 //                                  |     Code       |
  75 //                                  |                |
  76 //    _stubs._start ->              |----------------|
  77 //                                  |                |
  78 //                                  |    Stubs       | (also handlers for deopt/exception)
  79 //                                  |                |
  80 //                                  +----------------+
  81 //    + _total_size ->              |                |
  82 //
  83 // When the code and relocations are copied to the code cache,
  84 // the empty parts of each section are removed, and everything
  85 // is copied into contiguous locations.
  86 
  87 typedef CodeBuffer::csize_t csize_t;  // file-local definition
  88 
  89 // External buffer, in a predefined CodeBlob.
  90 // Important: The code_start must be taken exactly, and not realigned.
  91 CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
  92   // Provide code buffer with meaningful name
  93   initialize_misc(blob->name());
  94   initialize(blob->content_begin(), blob->content_size());
  95   debug_only(verify_section_allocation();)
  96 }
  97 
  98 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
  99   // Always allow for empty slop around each section.
 100   int slop = (int) CodeSection::end_slop();
 101 
 102   assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments");
 103   int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop;
 104 
 105   assert(blob() == nullptr, "only once");
 106   set_blob(BufferBlob::create(_name, total_size));
 107   if (blob() == nullptr) {
 108     // The assembler constructor will throw a fatal on an empty CodeBuffer.
 109     return;  // caller must test this
 110   }
 111 
 112   // Set up various pointers into the blob.
 113   initialize(_total_start, _total_size);
 114 
 115   assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
 116 
 117   pd_initialize();
 118 
 119   if (locs_size != 0) {
 120     _insts.initialize_locs(locs_size / sizeof(relocInfo));
 121   }
 122 
 123   debug_only(verify_section_allocation();)
 124 }
 125 
 126 
 127 CodeBuffer::~CodeBuffer() {
 128   verify_section_allocation();
 129 
 130   // If we allocated our code buffer from the CodeCache via a BufferBlob, and
 131   // it's not permanent, then free the BufferBlob.  The rest of the memory
 132   // will be freed when the ResourceObj is released.
 133   for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) {
 134     // Previous incarnations of this buffer are held live, so that internal
 135     // addresses constructed before expansions will not be confused.
 136     cb->free_blob();
 137   }
 138   if (_overflow_arena != nullptr) {
 139     // free any overflow storage
 140     delete _overflow_arena;
 141   }
 142   if (_shared_trampoline_requests != nullptr) {
 143     delete _shared_trampoline_requests;
 144   }
 145 
 146   NOT_PRODUCT(clear_strings());
 147 }
 148 
 149 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
 150   assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
 151   DEBUG_ONLY(_default_oop_recorder.freeze());  // force unused OR to be frozen
 152   _oop_recorder = r;
 153 }
 154 
 155 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
 156   assert(cs != &_insts, "insts is the memory provider, not the consumer");
 157   csize_t slop = CodeSection::end_slop();  // margin between sections
 158   int align = cs->alignment();
 159   assert(is_power_of_2(align), "sanity");
 160   address start  = _insts._start;
 161   address limit  = _insts._limit;
 162   address middle = limit - size;
 163   middle -= (intptr_t)middle & (align-1);  // align the division point downward
 164   guarantee(middle - slop > start, "need enough space to divide up");
 165   _insts._limit = middle - slop;  // subtract desired space, plus slop
 166   cs->initialize(middle, limit - middle);
 167   assert(cs->start() == middle, "sanity");
 168   assert(cs->limit() == limit,  "sanity");
 169   // give it some relocations to start with, if the main section has them
 170   if (_insts.has_locs())  cs->initialize_locs(1);
 171 }
 172 
 173 void CodeBuffer::set_blob(BufferBlob* blob) {
 174   _blob = blob;
 175   if (blob != nullptr) {
 176     address start = blob->content_begin();
 177     address end   = blob->content_end();
 178     // Round up the starting address.
 179     int align = _insts.alignment();
 180     start += (-(intptr_t)start) & (align-1);
 181     _total_start = start;
 182     _total_size  = end - start;
 183   } else {
 184 #ifdef ASSERT
 185     // Clean out dangling pointers.
 186     _total_start    = badAddress;
 187     _consts._start  = _consts._end  = badAddress;
 188     _insts._start   = _insts._end   = badAddress;
 189     _stubs._start   = _stubs._end   = badAddress;
 190 #endif //ASSERT
 191   }
 192 }
 193 
 194 void CodeBuffer::free_blob() {
 195   if (_blob != nullptr) {
 196     BufferBlob::free(_blob);
 197     set_blob(nullptr);
 198   }
 199 }
 200 
 201 const char* CodeBuffer::code_section_name(int n) {
 202 #ifdef PRODUCT
 203   return nullptr;
 204 #else //PRODUCT
 205   switch (n) {
 206   case SECT_CONSTS:            return "consts";
 207   case SECT_INSTS:             return "insts";
 208   case SECT_STUBS:             return "stubs";
 209   default:                     return nullptr;
 210   }
 211 #endif //PRODUCT
 212 }
 213 
 214 int CodeBuffer::section_index_of(address addr) const {
 215   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 216     const CodeSection* cs = code_section(n);
 217     if (cs->allocates(addr))  return n;
 218   }
 219   return SECT_NONE;
 220 }
 221 
 222 int CodeBuffer::locator(address addr) const {
 223   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 224     const CodeSection* cs = code_section(n);
 225     if (cs->allocates(addr)) {
 226       return locator(addr - cs->start(), n);
 227     }
 228   }
 229   return -1;
 230 }
 231 
 232 
 233 bool CodeBuffer::is_backward_branch(Label& L) {
 234   return L.is_bound() && insts_end() <= locator_address(L.loc());
 235 }
 236 
 237 #ifndef PRODUCT
 238 address CodeBuffer::decode_begin() {
 239   address begin = _insts.start();
 240   if (_decode_begin != nullptr && _decode_begin > begin)
 241     begin = _decode_begin;
 242   return begin;
 243 }
 244 #endif // !PRODUCT
 245 
 246 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
 247   if (_overflow_arena == nullptr) {
 248     _overflow_arena = new (mtCode) Arena(mtCode);
 249   }
 250   return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
 251 }
 252 
 253 
 254 // Helper function for managing labels and their target addresses.
 255 // Returns a sensible address, and if it is not the label's final
 256 // address, notes the dependency (at 'branch_pc') on the label.
 257 address CodeSection::target(Label& L, address branch_pc) {
 258   if (L.is_bound()) {
 259     int loc = L.loc();
 260     if (index() == CodeBuffer::locator_sect(loc)) {
 261       return start() + CodeBuffer::locator_pos(loc);
 262     } else {
 263       return outer()->locator_address(loc);
 264     }
 265   } else {
 266     assert(allocates2(branch_pc), "sanity");
 267     address base = start();
 268     int patch_loc = CodeBuffer::locator(branch_pc - base, index());
 269     L.add_patch_at(outer(), patch_loc);
 270 
 271     // Need to return a pc, doesn't matter what it is since it will be
 272     // replaced during resolution later.
 273     // Don't return null or badAddress, since branches shouldn't overflow.
 274     // Don't return base either because that could overflow displacements
 275     // for shorter branches.  It will get checked when bound.
 276     return branch_pc;
 277   }
 278 }
 279 
 280 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) {
 281   RelocationHolder rh;
 282   switch (rtype) {
 283     case relocInfo::none: return;
 284     case relocInfo::opt_virtual_call_type: {
 285       rh = opt_virtual_call_Relocation::spec(method_index);
 286       break;
 287     }
 288     case relocInfo::static_call_type: {
 289       rh = static_call_Relocation::spec(method_index);
 290       break;
 291     }
 292     case relocInfo::virtual_call_type: {
 293       assert(method_index == 0, "resolved method overriding is not supported");
 294       rh = Relocation::spec_simple(rtype);
 295       break;
 296     }
 297     default: {
 298       rh = Relocation::spec_simple(rtype);
 299       break;
 300     }
 301   }
 302   relocate(at, rh, format);
 303 }
 304 
 305 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
 306   // Do not relocate in scratch buffers.
 307   if (scratch_emit()) { return; }
 308   Relocation* reloc = spec.reloc();
 309   relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
 310   if (rtype == relocInfo::none)  return;
 311 
 312   // The assertion below has been adjusted, to also work for
 313   // relocation for fixup.  Sometimes we want to put relocation
 314   // information for the next instruction, since it will be patched
 315   // with a call.
 316   assert(start() <= at && at <= end()+1,
 317          "cannot relocate data outside code boundaries");
 318 
 319   if (!has_locs()) {
 320     // no space for relocation information provided => code cannot be
 321     // relocated.  Make sure that relocate is only called with rtypes
 322     // that can be ignored for this kind of code.
 323     assert(rtype == relocInfo::none              ||
 324            rtype == relocInfo::runtime_call_type ||
 325            rtype == relocInfo::internal_word_type||
 326            rtype == relocInfo::section_word_type ||
 327            rtype == relocInfo::external_word_type||
 328            rtype == relocInfo::barrier_type,
 329            "code needs relocation information");
 330     // leave behind an indication that we attempted a relocation
 331     DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
 332     return;
 333   }
 334 
 335   // Advance the point, noting the offset we'll have to record.
 336   csize_t offset = at - locs_point();
 337   set_locs_point(at);
 338 
 339   // Test for a couple of overflow conditions; maybe expand the buffer.
 340   relocInfo* end = locs_end();
 341   relocInfo* req = end + relocInfo::length_limit;
 342   // Check for (potential) overflow
 343   if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
 344     req += (uint)offset / (uint)relocInfo::offset_limit();
 345     if (req >= locs_limit()) {
 346       // Allocate or reallocate.
 347       expand_locs(locs_count() + (req - end));
 348       // reload pointer
 349       end = locs_end();
 350     }
 351   }
 352 
 353   // If the offset is giant, emit filler relocs, of type 'none', but
 354   // each carrying the largest possible offset, to advance the locs_point.
 355   while (offset >= relocInfo::offset_limit()) {
 356     assert(end < locs_limit(), "adjust previous paragraph of code");
 357     *end++ = relocInfo::filler_info();
 358     offset -= relocInfo::filler_info().addr_offset();
 359   }
 360 
 361   // If it's a simple reloc with no data, we'll just write (rtype | offset).
 362   (*end) = relocInfo(rtype, offset, format);
 363 
 364   // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
 365   end->initialize(this, reloc);
 366 }
 367 
 368 void CodeSection::initialize_locs(int locs_capacity) {
 369   assert(_locs_start == nullptr, "only one locs init step, please");
 370   // Apply a priori lower limits to relocation size:
 371   csize_t min_locs = MAX2(size() / 16, (csize_t)4);
 372   if (locs_capacity < min_locs)  locs_capacity = min_locs;
 373   relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
 374   _locs_start    = locs_start;
 375   _locs_end      = locs_start;
 376   _locs_limit    = locs_start + locs_capacity;
 377   _locs_own      = true;
 378 }
 379 
 380 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
 381   assert(_locs_start == nullptr, "do this before locs are allocated");
 382   // Internal invariant:  locs buf must be fully aligned.
 383   // See copy_relocations_to() below.
 384   while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
 385     ++buf; --length;
 386   }
 387   if (length > 0) {
 388     _locs_start = buf;
 389     _locs_end   = buf;
 390     _locs_limit = buf + length;
 391     _locs_own   = false;
 392   }
 393 }
 394 
 395 void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
 396   int lcount = source_cs->locs_count();
 397   if (lcount != 0) {
 398     initialize_shared_locs(source_cs->locs_start(), lcount);
 399     _locs_end = _locs_limit = _locs_start + lcount;
 400     assert(is_allocated(), "must have copied code already");
 401     set_locs_point(start() + source_cs->locs_point_off());
 402   }
 403   assert(this->locs_count() == source_cs->locs_count(), "sanity");
 404 }
 405 
 406 void CodeSection::expand_locs(int new_capacity) {
 407   if (_locs_start == nullptr) {
 408     initialize_locs(new_capacity);
 409     return;
 410   } else {
 411     int old_count    = locs_count();
 412     int old_capacity = locs_capacity();
 413     if (new_capacity < old_capacity * 2)
 414       new_capacity = old_capacity * 2;
 415     relocInfo* locs_start;
 416     if (_locs_own) {
 417       locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
 418     } else {
 419       locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
 420       Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
 421       _locs_own = true;
 422     }
 423     _locs_start    = locs_start;
 424     _locs_end      = locs_start + old_count;
 425     _locs_limit    = locs_start + new_capacity;
 426   }
 427 }
 428 
 429 int CodeSection::alignment() const {
 430   if (_index == CodeBuffer::SECT_CONSTS) {
 431     // CodeBuffer controls the alignment of the constants section
 432     return _outer->_const_section_alignment;
 433   }
 434   if (_index == CodeBuffer::SECT_INSTS) {
 435     return (int) CodeEntryAlignment;
 436   }
 437   if (_index == CodeBuffer::SECT_STUBS) {
 438     // CodeBuffer installer expects sections to be HeapWordSize aligned
 439     return HeapWordSize;
 440   }
 441   ShouldNotReachHere();
 442   return 0;
 443 }
 444 
 445 /// Support for emitting the code to its final location.
 446 /// The pattern is the same for all functions.
 447 /// We iterate over all the sections, padding each to alignment.
 448 
 449 csize_t CodeBuffer::total_content_size() const {
 450   csize_t size_so_far = 0;
 451   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 452     const CodeSection* cs = code_section(n);
 453     if (cs->is_empty())  continue;  // skip trivial section
 454     size_so_far = cs->align_at_start(size_so_far);
 455     size_so_far += cs->size();
 456   }
 457   return size_so_far;
 458 }
 459 
 460 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
 461   address buf = dest->_total_start;
 462   csize_t buf_offset = 0;
 463   assert(dest->_total_size >= total_content_size(), "must be big enough");
 464   assert(!_finalize_stubs, "non-finalized stubs");
 465 
 466   {
 467     // not sure why this is here, but why not...
 468     int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
 469     assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
 470   }
 471 
 472   const CodeSection* prev_cs      = nullptr;
 473   CodeSection*       prev_dest_cs = nullptr;
 474 
 475   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 476     // figure compact layout of each section
 477     const CodeSection* cs = code_section(n);
 478     csize_t csize = cs->size();
 479 
 480     CodeSection* dest_cs = dest->code_section(n);
 481     if (!cs->is_empty()) {
 482       // Compute initial padding; assign it to the previous non-empty guy.
 483       // Cf. figure_expanded_capacities.
 484       csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
 485       if (prev_dest_cs != nullptr) {
 486         if (padding != 0) {
 487           buf_offset += padding;
 488           prev_dest_cs->_limit += padding;
 489         }
 490       } else {
 491         guarantee(padding == 0, "In first iteration no padding should be needed.");
 492       }
 493       prev_dest_cs = dest_cs;
 494       prev_cs      = cs;
 495     }
 496 
 497     debug_only(dest_cs->_start = nullptr);  // defeat double-initialization assert
 498     dest_cs->initialize(buf+buf_offset, csize);
 499     dest_cs->set_end(buf+buf_offset+csize);
 500     assert(dest_cs->is_allocated(), "must always be allocated");
 501     assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
 502 
 503     buf_offset += csize;
 504   }
 505 
 506   // Done calculating sections; did it come out to the right end?
 507   assert(buf_offset == total_content_size(), "sanity");
 508   debug_only(dest->verify_section_allocation();)
 509 }
 510 
 511 // Append an oop reference that keeps the class alive.
 512 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
 513   oop cl = k->klass_holder();
 514   if (cl != nullptr && !oops->contains(cl)) {
 515     oops->append(cl);
 516   }
 517 }
 518 
 519 void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
 520   NoSafepointVerifier nsv;
 521 
 522   GrowableArray<oop> oops;
 523 
 524   // Make sure that immediate metadata records something in the OopRecorder
 525   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 526     // pull code out of each section
 527     CodeSection* cs = code_section(n);
 528     if (cs->is_empty() || (cs->locs_count() == 0)) continue;  // skip trivial section
 529     RelocIterator iter(cs);
 530     while (iter.next()) {
 531       if (iter.type() == relocInfo::metadata_type) {
 532         metadata_Relocation* md = iter.metadata_reloc();
 533         if (md->metadata_is_immediate()) {
 534           Metadata* m = md->metadata_value();
 535           if (oop_recorder()->is_real(m)) {
 536             if (m->is_methodData()) {
 537               m = ((MethodData*)m)->method();
 538             }
 539             if (m->is_method()) {
 540               m = ((Method*)m)->method_holder();
 541             }
 542             if (m->is_klass()) {
 543               append_oop_references(&oops, (Klass*)m);
 544             } else {
 545               // XXX This will currently occur for MDO which don't
 546               // have a backpointer.  This has to be fixed later.
 547               m->print();
 548               ShouldNotReachHere();
 549             }
 550           }
 551         }
 552       }
 553     }
 554   }
 555 
 556   if (!oop_recorder()->is_unused()) {
 557     for (int i = 0; i < oop_recorder()->metadata_count(); i++) {
 558       Metadata* m = oop_recorder()->metadata_at(i);
 559       if (oop_recorder()->is_real(m)) {
 560         if (m->is_methodData()) {
 561           m = ((MethodData*)m)->method();
 562         }
 563         if (m->is_method()) {
 564           m = ((Method*)m)->method_holder();
 565         }
 566         if (m->is_klass()) {
 567           append_oop_references(&oops, (Klass*)m);
 568         } else {
 569           m->print();
 570           ShouldNotReachHere();
 571         }
 572       }
 573     }
 574 
 575   }
 576 
 577   // Add the class loader of Method* for the nmethod itself
 578   append_oop_references(&oops, mh->method_holder());
 579 
 580   // Add any oops that we've found
 581   Thread* thread = Thread::current();
 582   for (int i = 0; i < oops.length(); i++) {
 583     oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i)));
 584   }
 585 }
 586 
 587 
 588 
 589 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
 590   csize_t size_so_far = 0;
 591   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 592     const CodeSection* cur_cs = code_section(n);
 593     if (!cur_cs->is_empty()) {
 594       size_so_far = cur_cs->align_at_start(size_so_far);
 595     }
 596     if (cur_cs->index() == cs->index()) {
 597       return size_so_far;
 598     }
 599     size_so_far += cur_cs->size();
 600   }
 601   ShouldNotReachHere();
 602   return -1;
 603 }
 604 
 605 int CodeBuffer::total_skipped_instructions_size() const {
 606   int total_skipped_size = 0;
 607   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 608     const CodeSection* cur_cs = code_section(n);
 609     if (!cur_cs->is_empty()) {
 610       total_skipped_size += cur_cs->_skipped_instructions_size;
 611     }
 612   }
 613   return total_skipped_size;
 614 }
 615 
 616 csize_t CodeBuffer::total_relocation_size() const {
 617   csize_t total = copy_relocations_to(nullptr);  // dry run only
 618   return (csize_t) align_up(total, HeapWordSize);
 619 }
 620 
 621 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const {
 622   csize_t buf_offset = 0;
 623   csize_t code_end_so_far = 0;
 624   csize_t code_point_so_far = 0;
 625 
 626   assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
 627   assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
 628 
 629   for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
 630     if (only_inst && (n != (int)SECT_INSTS)) {
 631       // Need only relocation info for code.
 632       continue;
 633     }
 634     // pull relocs out of each section
 635     const CodeSection* cs = code_section(n);
 636     assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
 637     if (cs->is_empty())  continue;  // skip trivial section
 638     relocInfo* lstart = cs->locs_start();
 639     relocInfo* lend   = cs->locs_end();
 640     csize_t    lsize  = (csize_t)( (address)lend - (address)lstart );
 641     csize_t    csize  = cs->size();
 642     code_end_so_far = cs->align_at_start(code_end_so_far);
 643 
 644     if (lsize > 0) {
 645       // Figure out how to advance the combined relocation point
 646       // first to the beginning of this section.
 647       // We'll insert one or more filler relocs to span that gap.
 648       // (Don't bother to improve this by editing the first reloc's offset.)
 649       csize_t new_code_point = code_end_so_far;
 650       for (csize_t jump;
 651            code_point_so_far < new_code_point;
 652            code_point_so_far += jump) {
 653         jump = new_code_point - code_point_so_far;
 654         relocInfo filler = relocInfo::filler_info();
 655         if (jump >= filler.addr_offset()) {
 656           jump = filler.addr_offset();
 657         } else {  // else shrink the filler to fit
 658           filler = relocInfo(relocInfo::none, jump);
 659         }
 660         if (buf != nullptr) {
 661           assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
 662           *(relocInfo*)(buf+buf_offset) = filler;
 663         }
 664         buf_offset += sizeof(filler);
 665       }
 666 
 667       // Update code point and end to skip past this section:
 668       csize_t last_code_point = code_end_so_far + cs->locs_point_off();
 669       assert(code_point_so_far <= last_code_point, "sanity");
 670       code_point_so_far = last_code_point; // advance past this guy's relocs
 671     }
 672     code_end_so_far += csize;  // advance past this guy's instructions too
 673 
 674     // Done with filler; emit the real relocations:
 675     if (buf != nullptr && lsize != 0) {
 676       assert(buf_offset + lsize <= buf_limit, "target in bounds");
 677       assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
 678       if (buf_offset % HeapWordSize == 0) {
 679         // Use wordwise copies if possible:
 680         Copy::disjoint_words((HeapWord*)lstart,
 681                              (HeapWord*)(buf+buf_offset),
 682                              (lsize + HeapWordSize-1) / HeapWordSize);
 683       } else {
 684         Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
 685       }
 686     }
 687     buf_offset += lsize;
 688   }
 689 
 690   // Align end of relocation info in target.
 691   while (buf_offset % HeapWordSize != 0) {
 692     if (buf != nullptr) {
 693       relocInfo padding = relocInfo(relocInfo::none, 0);
 694       assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
 695       *(relocInfo*)(buf+buf_offset) = padding;
 696     }
 697     buf_offset += sizeof(relocInfo);
 698   }
 699 
 700   assert(only_inst || code_end_so_far == total_content_size(), "sanity");
 701 
 702   return buf_offset;
 703 }
 704 
 705 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
 706   address buf = nullptr;
 707   csize_t buf_offset = 0;
 708   csize_t buf_limit = 0;
 709 
 710   if (dest != nullptr) {
 711     buf = (address)dest->relocation_begin();
 712     buf_limit = (address)dest->relocation_end() - buf;
 713   }
 714   // if dest is null, this is just the sizing pass
 715   //
 716   buf_offset = copy_relocations_to(buf, buf_limit, false);
 717 
 718   return buf_offset;
 719 }
 720 
 721 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
 722 #ifndef PRODUCT
 723   if (PrintNMethods && (WizardMode || Verbose)) {
 724     tty->print("done with CodeBuffer:");
 725     ((CodeBuffer*)this)->print();
 726   }
 727 #endif //PRODUCT
 728 
 729   CodeBuffer dest(dest_blob);
 730   assert(dest_blob->content_size() >= total_content_size(), "good sizing");
 731   this->compute_final_layout(&dest);
 732 
 733   // Set beginning of constant table before relocating.
 734   dest_blob->set_ctable_begin(dest.consts()->start());
 735 
 736   relocate_code_to(&dest);
 737 
 738   // Share assembly remarks and debug strings with the blob.
 739   NOT_PRODUCT(dest_blob->use_remarks(_asm_remarks));
 740   NOT_PRODUCT(dest_blob->use_strings(_dbg_strings));
 741 
 742   // Done moving code bytes; were they the right size?
 743   assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
 744 
 745   // Flush generated code
 746   ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
 747 }
 748 
 749 // Move all my code into another code buffer.  Consult applicable
 750 // relocs to repair embedded addresses.  The layout in the destination
 751 // CodeBuffer is different to the source CodeBuffer: the destination
 752 // CodeBuffer gets the final layout (consts, insts, stubs in order of
 753 // ascending address).
 754 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
 755   address dest_end = dest->_total_start + dest->_total_size;
 756   address dest_filled = nullptr;
 757   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 758     // pull code out of each section
 759     const CodeSection* cs = code_section(n);
 760     if (cs->is_empty())  continue;  // skip trivial section
 761     CodeSection* dest_cs = dest->code_section(n);
 762     assert(cs->size() == dest_cs->size(), "sanity");
 763     csize_t usize = dest_cs->size();
 764     csize_t wsize = align_up(usize, HeapWordSize);
 765     assert(dest_cs->start() + wsize <= dest_end, "no overflow");
 766     // Copy the code as aligned machine words.
 767     // This may also include an uninitialized partial word at the end.
 768     Copy::disjoint_words((HeapWord*)cs->start(),
 769                          (HeapWord*)dest_cs->start(),
 770                          wsize / HeapWordSize);
 771 
 772     if (dest->blob() == nullptr) {
 773       // Destination is a final resting place, not just another buffer.
 774       // Normalize uninitialized bytes in the final padding.
 775       Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
 776                           Assembler::code_fill_byte());
 777     }
 778     // Keep track of the highest filled address
 779     dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining());
 780 
 781     assert(cs->locs_start() != (relocInfo*)badAddress,
 782            "this section carries no reloc storage, but reloc was attempted");
 783 
 784     // Make the new code copy use the old copy's relocations:
 785     dest_cs->initialize_locs_from(cs);
 786   }
 787 
 788   // Do relocation after all sections are copied.
 789   // This is necessary if the code uses constants in stubs, which are
 790   // relocated when the corresponding instruction in the code (e.g., a
 791   // call) is relocated. Stubs are placed behind the main code
 792   // section, so that section has to be copied before relocating.
 793   for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
 794     CodeSection* dest_cs = dest->code_section(n);
 795     if (dest_cs->is_empty() || (dest_cs->locs_count() == 0)) continue;  // skip trivial section
 796     { // Repair the pc relative information in the code after the move
 797       RelocIterator iter(dest_cs);
 798       while (iter.next()) {
 799         iter.reloc()->fix_relocation_after_move(this, dest);
 800       }
 801     }
 802   }
 803 
 804   if (dest->blob() == nullptr && dest_filled != nullptr) {
 805     // Destination is a final resting place, not just another buffer.
 806     // Normalize uninitialized bytes in the final padding.
 807     Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
 808                         Assembler::code_fill_byte());
 809 
 810   }
 811 }
 812 
 813 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
 814                                                csize_t amount,
 815                                                csize_t* new_capacity) {
 816   csize_t new_total_cap = 0;
 817 
 818   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 819     const CodeSection* sect = code_section(n);
 820 
 821     if (!sect->is_empty()) {
 822       // Compute initial padding; assign it to the previous section,
 823       // even if it's empty (e.g. consts section can be empty).
 824       // Cf. compute_final_layout
 825       csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
 826       if (padding != 0) {
 827         new_total_cap += padding;
 828         assert(n - 1 >= SECT_FIRST, "sanity");
 829         new_capacity[n - 1] += padding;
 830       }
 831     }
 832 
 833     csize_t exp = sect->size();  // 100% increase
 834     if ((uint)exp < 4*K)  exp = 4*K;       // minimum initial increase
 835     if (sect == which_cs) {
 836       if (exp < amount)  exp = amount;
 837       if (StressCodeBuffers)  exp = amount;  // expand only slightly
 838     } else if (n == SECT_INSTS) {
 839       // scale down inst increases to a more modest 25%
 840       exp = 4*K + ((exp - 4*K) >> 2);
 841       if (StressCodeBuffers)  exp = amount / 2;  // expand only slightly
 842     } else if (sect->is_empty()) {
 843       // do not grow an empty secondary section
 844       exp = 0;
 845     }
 846     // Allow for inter-section slop:
 847     exp += CodeSection::end_slop();
 848     csize_t new_cap = sect->size() + exp;
 849     if (new_cap < sect->capacity()) {
 850       // No need to expand after all.
 851       new_cap = sect->capacity();
 852     }
 853     new_capacity[n] = new_cap;
 854     new_total_cap += new_cap;
 855   }
 856 
 857   return new_total_cap;
 858 }
 859 
 860 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
 861 #ifndef PRODUCT
 862   if (PrintNMethods && (WizardMode || Verbose)) {
 863     tty->print("expanding CodeBuffer:");
 864     this->print();
 865   }
 866 
 867   if (StressCodeBuffers && blob() != nullptr) {
 868     static int expand_count = 0;
 869     if (expand_count >= 0)  expand_count += 1;
 870     if (expand_count > 100 && is_power_of_2(expand_count)) {
 871       tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
 872       // simulate an occasional allocation failure:
 873       free_blob();
 874     }
 875   }
 876 #endif //PRODUCT
 877 
 878   // Resizing must be allowed
 879   {
 880     if (blob() == nullptr)  return;  // caller must check if blob is null
 881   }
 882 
 883   // Figure new capacity for each section.
 884   csize_t new_capacity[SECT_LIMIT];
 885   memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT);
 886   csize_t new_total_cap
 887     = figure_expanded_capacities(which_cs, amount, new_capacity);
 888 
 889   // Create a new (temporary) code buffer to hold all the new data
 890   CodeBuffer cb(name(), new_total_cap, 0);
 891   cb.set_const_section_alignment(_const_section_alignment);
 892   if (cb.blob() == nullptr) {
 893     // Failed to allocate in code cache.
 894     free_blob();
 895     return;
 896   }
 897 
 898   // Create an old code buffer to remember which addresses used to go where.
 899   // This will be useful when we do final assembly into the code cache,
 900   // because we will need to know how to warp any internal address that
 901   // has been created at any time in this CodeBuffer's past.
 902   CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
 903   bxp->take_over_code_from(this);  // remember the old undersized blob
 904   DEBUG_ONLY(this->_blob = nullptr);  // silence a later assert
 905   bxp->_before_expand = this->_before_expand;
 906   this->_before_expand = bxp;
 907 
 908   // Give each section its required (expanded) capacity.
 909   for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) {
 910     CodeSection* cb_sect   = cb.code_section(n);
 911     CodeSection* this_sect = code_section(n);
 912     if (new_capacity[n] == 0)  continue;  // already nulled out
 913     if (n != SECT_INSTS) {
 914       cb.initialize_section_size(cb_sect, new_capacity[n]);
 915     }
 916     assert(cb_sect->capacity() >= new_capacity[n], "big enough");
 917     address cb_start = cb_sect->start();
 918     cb_sect->set_end(cb_start + this_sect->size());
 919     if (this_sect->mark() == nullptr) {
 920       cb_sect->clear_mark();
 921     } else {
 922       cb_sect->set_mark(cb_start + this_sect->mark_off());
 923     }
 924   }
 925 
 926   // Needs to be initialized when calling fix_relocation_after_move.
 927   cb.blob()->set_ctable_begin(cb.consts()->start());
 928 
 929   // Move all the code and relocations to the new blob:
 930   relocate_code_to(&cb);
 931 
 932   // some internal addresses, _last_insn _last_label, are used during code emission,
 933   // adjust them in expansion
 934   adjust_internal_address(insts_begin(), cb.insts_begin());
 935 
 936   // Copy the temporary code buffer into the current code buffer.
 937   // Basically, do {*this = cb}, except for some control information.
 938   this->take_over_code_from(&cb);
 939   cb.set_blob(nullptr);
 940 
 941   // Zap the old code buffer contents, to avoid mistakenly using them.
 942   debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
 943                                  badCodeHeapFreeVal);)
 944 
 945   // Make certain that the new sections are all snugly inside the new blob.
 946   debug_only(verify_section_allocation();)
 947 
 948 #ifndef PRODUCT
 949   _decode_begin = nullptr;  // sanity
 950   if (PrintNMethods && (WizardMode || Verbose)) {
 951     tty->print("expanded CodeBuffer:");
 952     this->print();
 953   }
 954 #endif //PRODUCT
 955 }
 956 
 957 void CodeBuffer::adjust_internal_address(address from, address to) {
 958   if (_last_insn != nullptr) {
 959     _last_insn += to - from;
 960   }
 961   if (_last_label != nullptr) {
 962     _last_label += to - from;
 963   }
 964 }
 965 
 966 void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
 967   // Must already have disposed of the old blob somehow.
 968   assert(blob() == nullptr, "must be empty");
 969   // Take the new blob away from cb.
 970   set_blob(cb->blob());
 971   // Take over all the section pointers.
 972   for (int n = 0; n < (int)SECT_LIMIT; n++) {
 973     CodeSection* cb_sect   = cb->code_section(n);
 974     CodeSection* this_sect = code_section(n);
 975     this_sect->take_over_code_from(cb_sect);
 976   }
 977   // Make sure the old cb won't try to use it or free it.
 978   DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
 979 }
 980 
 981 void CodeBuffer::verify_section_allocation() {
 982   address tstart = _total_start;
 983   if (tstart == badAddress)  return;  // smashed by set_blob(nullptr)
 984   address tend   = tstart + _total_size;
 985   if (_blob != nullptr) {
 986     guarantee(tstart >= _blob->content_begin(), "sanity");
 987     guarantee(tend   <= _blob->content_end(),   "sanity");
 988   }
 989   // Verify disjointness.
 990   for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
 991     CodeSection* sect = code_section(n);
 992     if (!sect->is_allocated() || sect->is_empty()) {
 993       continue;
 994     }
 995     guarantee(_blob == nullptr || is_aligned(sect->start(), sect->alignment()),
 996            "start is aligned");
 997     for (int m = n + 1; m < (int) SECT_LIMIT; m++) {
 998       CodeSection* other = code_section(m);
 999       if (!other->is_allocated() || other == sect) {
1000         continue;
1001       }
1002       guarantee(other->disjoint(sect), "sanity");
1003     }
1004     guarantee(sect->end() <= tend, "sanity");
1005     guarantee(sect->end() <= sect->limit(), "sanity");
1006   }
1007 }
1008 
1009 void CodeBuffer::log_section_sizes(const char* name) {
1010   if (xtty != nullptr) {
1011     ttyLocker ttyl;
1012     // log info about buffer usage
1013     xtty->print_cr("<blob name='%s' total_size='%d'>", name, _total_size);
1014     for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
1015       CodeSection* sect = code_section(n);
1016       if (!sect->is_allocated() || sect->is_empty())  continue;
1017       xtty->print_cr("<sect index='%d' capacity='%d' size='%d' remaining='%d'/>",
1018                      n, sect->capacity(), sect->size(), sect->remaining());
1019     }
1020     xtty->print_cr("</blob>");
1021   }
1022 }
1023 
1024 bool CodeBuffer::finalize_stubs() {
1025   if (_finalize_stubs && !pd_finalize_stubs()) {
1026     // stub allocation failure
1027     return false;
1028   }
1029   _finalize_stubs = false;
1030   return true;
1031 }
1032 
1033 void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) {
1034   if (_shared_stub_to_interp_requests == nullptr) {
1035     _shared_stub_to_interp_requests = new SharedStubToInterpRequests(8);
1036   }
1037   SharedStubToInterpRequest request(callee, call_offset);
1038   _shared_stub_to_interp_requests->push(request);
1039   _finalize_stubs = true;
1040 }
1041 
1042 #ifndef PRODUCT
1043 void CodeBuffer::block_comment(ptrdiff_t offset, const char* comment) {
1044   if (_collect_comments) {
1045     const char* str = _asm_remarks.insert(offset, comment);
1046     postcond(str != comment);
1047   }
1048 }
1049 
1050 const char* CodeBuffer::code_string(const char* str) {
1051   const char* tmp = _dbg_strings.insert(str);
1052   postcond(tmp != str);
1053   return tmp;
1054 }
1055 
1056 void CodeBuffer::decode() {
1057   ttyLocker ttyl;
1058   Disassembler::decode(decode_begin(), insts_end(), tty NOT_PRODUCT(COMMA &asm_remarks()));
1059   _decode_begin = insts_end();
1060 }
1061 
1062 void CodeSection::print(const char* name) {
1063   csize_t locs_size = locs_end() - locs_start();
1064   tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)",
1065                 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity());
1066   tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1067                 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off());
1068   if (PrintRelocations && (locs_size != 0)) {
1069     RelocIterator iter(this);
1070     iter.print();
1071   }
1072 }
1073 
1074 void CodeBuffer::print() {
1075   tty->print_cr("CodeBuffer:");
1076   for (int n = 0; n < (int)SECT_LIMIT; n++) {
1077     // print each section
1078     CodeSection* cs = code_section(n);
1079     cs->print(code_section_name(n));
1080   }
1081 }
1082 
1083 // ----- CHeapString -----------------------------------------------------------
1084 
1085 class CHeapString : public CHeapObj<mtCode> {
1086  public:
1087   CHeapString(const char* str) : _string(os::strdup(str)) {}
1088  ~CHeapString() {
1089     os::free((void*)_string);
1090     _string = nullptr;
1091   }
1092   const char* string() const { return _string; }
1093 
1094  private:
1095   const char* _string;
1096 };
1097 
1098 // ----- AsmRemarkCollection ---------------------------------------------------
1099 
1100 class AsmRemarkCollection : public CHeapObj<mtCode> {
1101  public:
1102   AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {}
1103  ~AsmRemarkCollection() {
1104     assert(is_empty(), "Must 'clear()' before deleting!");
1105     assert(_ref_cnt == 0, "No uses must remain when deleting!");
1106   }
1107   AsmRemarkCollection* reuse() {
1108     precond(_ref_cnt > 0);
1109     return _ref_cnt++, this;
1110   }
1111 
1112   const char* insert(uint offset, const char* remark);
1113   const char* lookup(uint offset) const;
1114   const char* next(uint offset) const;
1115 
1116   bool is_empty() const { return _remarks == nullptr; }
1117   uint clear();
1118 
1119  private:
1120   struct Cell : CHeapString {
1121     Cell(const char* remark, uint offset) :
1122         CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {}
1123     void push_back(Cell* cell) {
1124       Cell* head = this;
1125       Cell* tail = prev;
1126       tail->next = cell;
1127       cell->next = head;
1128       cell->prev = tail;
1129       prev = cell;
1130     }
1131     uint offset;
1132     Cell* prev;
1133     Cell* next;
1134   };
1135   uint  _ref_cnt;
1136   Cell* _remarks;
1137   // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that
1138   // does not change the state of the list per se), supportig a simplistic
1139   // iteration scheme.
1140   mutable Cell* _next;
1141 };
1142 
1143 // ----- DbgStringCollection ---------------------------------------------------
1144 
1145 class DbgStringCollection : public CHeapObj<mtCode> {
1146  public:
1147   DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {}
1148  ~DbgStringCollection() {
1149     assert(is_empty(), "Must 'clear()' before deleting!");
1150     assert(_ref_cnt == 0, "No uses must remain when deleting!");
1151   }
1152   DbgStringCollection* reuse() {
1153     precond(_ref_cnt > 0);
1154     return _ref_cnt++, this;
1155   }
1156 
1157   const char* insert(const char* str);
1158   const char* lookup(const char* str) const;
1159 
1160   bool is_empty() const { return _strings == nullptr; }
1161   uint clear();
1162 
1163  private:
1164   struct Cell : CHeapString {
1165     Cell(const char* dbgstr) :
1166         CHeapString(dbgstr), prev(nullptr), next(nullptr) {}
1167     void push_back(Cell* cell) {
1168       Cell* head = this;
1169       Cell* tail = prev;
1170       tail->next = cell;
1171       cell->next = head;
1172       cell->prev = tail;
1173       prev = cell;
1174     }
1175     Cell* prev;
1176     Cell* next;
1177   };
1178   uint  _ref_cnt;
1179   Cell* _strings;
1180 };
1181 
1182 // ----- AsmRemarks ------------------------------------------------------------
1183 //
1184 // Acting as interface to reference counted mapping [offset -> remark], where
1185 // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or
1186 // other memory buffer) and remark is a string (comment).
1187 //
1188 AsmRemarks::AsmRemarks() : _remarks(new AsmRemarkCollection()) {
1189   assert(_remarks != nullptr, "Allocation failure!");
1190 }
1191 
1192 AsmRemarks::~AsmRemarks() {
1193   assert(_remarks == nullptr, "Must 'clear()' before deleting!");
1194 }
1195 
1196 const char* AsmRemarks::insert(uint offset, const char* remstr) {
1197   precond(remstr != nullptr);
1198   return _remarks->insert(offset, remstr);
1199 }
1200 
1201 bool AsmRemarks::is_empty() const {
1202   return _remarks->is_empty();
1203 }
1204 
1205 void AsmRemarks::share(const AsmRemarks &src) {
1206   precond(is_empty());
1207   clear();
1208   _remarks = src._remarks->reuse();
1209 }
1210 
1211 void AsmRemarks::clear() {
1212   if (_remarks->clear() == 0) {
1213     delete _remarks;
1214   }
1215   _remarks = nullptr;
1216 }
1217 
1218 uint AsmRemarks::print(uint offset, outputStream* strm) const {
1219   uint count = 0;
1220   const char* prefix = " ;; ";
1221   const char* remstr = _remarks->lookup(offset);
1222   while (remstr != nullptr) {
1223     strm->bol();
1224     strm->print("%s", prefix);
1225     // Don't interpret as format strings since it could contain '%'.
1226     strm->print_raw(remstr);
1227     // Advance to next line iff string didn't contain a cr() at the end.
1228     strm->bol();
1229     remstr = _remarks->next(offset);
1230     count++;
1231   }
1232   return count;
1233 }
1234 
1235 // ----- DbgStrings ------------------------------------------------------------
1236 //
1237 // Acting as interface to reference counted collection of (debug) strings used
1238 // in the code generated, and thus requiring a fixed address.
1239 //
1240 DbgStrings::DbgStrings() : _strings(new DbgStringCollection()) {
1241   assert(_strings != nullptr, "Allocation failure!");
1242 }
1243 
1244 DbgStrings::~DbgStrings() {
1245   assert(_strings == nullptr, "Must 'clear()' before deleting!");
1246 }
1247 
1248 const char* DbgStrings::insert(const char* dbgstr) {
1249   const char* str = _strings->lookup(dbgstr);
1250   return str != nullptr ? str : _strings->insert(dbgstr);
1251 }
1252 
1253 bool DbgStrings::is_empty() const {
1254   return _strings->is_empty();
1255 }
1256 
1257 void DbgStrings::share(const DbgStrings &src) {
1258   precond(is_empty());
1259   clear();
1260   _strings = src._strings->reuse();
1261 }
1262 
1263 void DbgStrings::clear() {
1264   if (_strings->clear() == 0) {
1265     delete _strings;
1266   }
1267   _strings = nullptr;
1268 }
1269 
1270 // ----- AsmRemarkCollection ---------------------------------------------------
1271 
1272 const char* AsmRemarkCollection::insert(uint offset, const char* remstr) {
1273   precond(remstr != nullptr);
1274   Cell* cell = new Cell { remstr, offset };
1275   if (is_empty()) {
1276     cell->prev = cell;
1277     cell->next = cell;
1278     _remarks = cell;
1279   } else {
1280     _remarks->push_back(cell);
1281   }
1282   return cell->string();
1283 }
1284 
1285 const char* AsmRemarkCollection::lookup(uint offset) const {
1286   _next = _remarks;
1287   return next(offset);
1288 }
1289 
1290 const char* AsmRemarkCollection::next(uint offset) const {
1291   if (_next != nullptr) {
1292     Cell* i = _next;
1293     do {
1294       if (i->offset == offset) {
1295         _next = i->next == _remarks ? nullptr : i->next;
1296         return i->string();
1297       }
1298       i = i->next;
1299     } while (i != _remarks);
1300     _next = nullptr;
1301   }
1302   return nullptr;
1303 }
1304 
1305 uint AsmRemarkCollection::clear() {
1306   precond(_ref_cnt > 0);
1307   if (--_ref_cnt > 0) {
1308     return _ref_cnt;
1309   }
1310   if (!is_empty()) {
1311     uint count = 0;
1312     Cell* i = _remarks;
1313     do {
1314       Cell* next = i->next;
1315       delete i;
1316       i = next;
1317       count++;
1318     } while (i != _remarks);
1319 
1320     log_debug(codestrings)("Clear %u asm-remark%s.", count, count == 1 ? "" : "s");
1321     _remarks = nullptr;
1322   }
1323   return 0; // i.e. _ref_cnt == 0
1324 }
1325 
1326 // ----- DbgStringCollection ---------------------------------------------------
1327 
1328 const char* DbgStringCollection::insert(const char* dbgstr) {
1329   precond(dbgstr != nullptr);
1330   Cell* cell = new Cell { dbgstr };
1331 
1332   if (is_empty()) {
1333      cell->prev = cell;
1334      cell->next = cell;
1335      _strings = cell;
1336   } else {
1337     _strings->push_back(cell);
1338   }
1339   return cell->string();
1340 }
1341 
1342 const char* DbgStringCollection::lookup(const char* dbgstr) const {
1343   precond(dbgstr != nullptr);
1344   if (_strings != nullptr) {
1345     Cell* i = _strings;
1346     do {
1347       if (strcmp(i->string(), dbgstr) == 0) {
1348         return i->string();
1349       }
1350       i = i->next;
1351     } while (i != _strings);
1352   }
1353   return nullptr;
1354 }
1355 
1356 uint DbgStringCollection::clear() {
1357   precond(_ref_cnt > 0);
1358   if (--_ref_cnt > 0) {
1359     return _ref_cnt;
1360   }
1361   if (!is_empty()) {
1362     uint count = 0;
1363     Cell* i = _strings;
1364     do {
1365       Cell* next = i->next;
1366       delete i;
1367       i = next;
1368       count++;
1369     } while (i != _strings);
1370 
1371     log_debug(codestrings)("Clear %u dbg-string%s.", count, count == 1 ? "" : "s");
1372     _strings = nullptr;
1373   }
1374   return 0; // i.e. _ref_cnt == 0
1375 }
1376 
1377 #endif // not PRODUCT