1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/codeBuffer.hpp"
26 #include "code/aotCodeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/oopRecorder.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "logging/log.hpp"
31 #include "oops/klass.inline.hpp"
32 #include "oops/methodCounters.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/icache.hpp"
36 #include "runtime/safepointVerifiers.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/copy.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "utilities/xmlstream.hpp"
41
42 // The structure of a CodeSection:
43 //
44 // _start -> +----------------+
45 // | machine code...|
46 // _end -> |----------------|
47 // | |
48 // | (empty) |
49 // | |
50 // | |
51 // +----------------+
52 // _limit -> | |
53 //
54 // _locs_start -> +----------------+
55 // |reloc records...|
56 // |----------------|
57 // _locs_end -> | |
58 // | |
59 // | (empty) |
60 // | |
61 // | |
62 // +----------------+
63 // _locs_limit -> | |
64 // The _end (resp. _limit) pointer refers to the first
65 // unused (resp. unallocated) byte.
66
67 // The structure of the CodeBuffer while code is being accumulated:
68 //
69 // _total_start -> \
70 // _consts._start -> +----------------+
71 // | |
72 // | Constants |
73 // | |
74 // _insts._start -> |----------------|
75 // | |
76 // | Code |
77 // | |
78 // _stubs._start -> |----------------|
79 // | |
80 // | Stubs | (also handlers for deopt/exception)
81 // | |
82 // +----------------+
83 // + _total_size -> | |
84 //
85 // When the code and relocations are copied to the code cache,
86 // the empty parts of each section are removed, and everything
87 // is copied into contiguous locations.
88
89 typedef CodeBuffer::csize_t csize_t; // file-local definition
90
91 // External buffer, in a predefined CodeBlob.
92 // Important: The code_start must be taken exactly, and not realigned.
93 CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
94 // Provide code buffer with meaningful name
95 initialize_misc(blob->name());
96 initialize(blob->content_begin(), blob->content_size());
97 DEBUG_ONLY(verify_section_allocation();)
98 }
99
100 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
101 // Always allow for empty slop around each section.
102 int slop = (int) CodeSection::end_slop();
103
104 assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments");
105 int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop;
106
107 assert(blob() == nullptr, "only once");
108 set_blob(BufferBlob::create(_name, total_size));
109 if (blob() == nullptr) {
110 // The assembler constructor will throw a fatal on an empty CodeBuffer.
111 return; // caller must test this
112 }
113
114 // Set up various pointers into the blob.
115 initialize(_total_start, _total_size);
116
117 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
118
119 pd_initialize();
120
121 if (locs_size != 0) {
122 _insts.initialize_locs(locs_size / sizeof(relocInfo));
123 }
124
125 DEBUG_ONLY(verify_section_allocation();)
126 }
127
128
129 CodeBuffer::~CodeBuffer() {
130 verify_section_allocation();
131
132 // If we allocated our code buffer from the CodeCache via a BufferBlob, and
133 // it's not permanent, then free the BufferBlob. The rest of the memory
134 // will be freed when the ResourceObj is released.
135 for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) {
136 // Previous incarnations of this buffer are held live, so that internal
137 // addresses constructed before expansions will not be confused.
138 cb->free_blob();
139 }
140 if (_overflow_arena != nullptr) {
141 // free any overflow storage
142 delete _overflow_arena;
143 }
144 if (_shared_trampoline_requests != nullptr) {
145 delete _shared_trampoline_requests;
146 }
147
148 NOT_PRODUCT(clear_strings());
149 }
150
151 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
152 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
153 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen
154 _oop_recorder = r;
155 }
156
157 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
158 assert(cs != &_insts, "insts is the memory provider, not the consumer");
159 csize_t slop = CodeSection::end_slop(); // margin between sections
160 int align = cs->alignment();
161 assert(is_power_of_2(align), "sanity");
162 address start = _insts._start;
163 address limit = _insts._limit;
164 address middle = limit - size;
165 middle -= (intptr_t)middle & (align-1); // align the division point downward
166 guarantee(middle - slop > start, "need enough space to divide up");
167 _insts._limit = middle - slop; // subtract desired space, plus slop
168 cs->initialize(middle, limit - middle);
169 assert(cs->start() == middle, "sanity");
170 assert(cs->limit() == limit, "sanity");
171 // give it some relocations to start with, if the main section has them
172 if (_insts.has_locs()) cs->initialize_locs(1);
173 }
174
175 void CodeBuffer::set_blob(BufferBlob* blob) {
176 _blob = blob;
177 if (blob != nullptr) {
178 address start = blob->content_begin();
179 address end = blob->content_end();
180 // Round up the starting address.
181 int align = _insts.alignment();
182 start += (-(intptr_t)start) & (align-1);
183 _total_start = start;
184 _total_size = end - start;
185 } else {
186 #ifdef ASSERT
187 // Clean out dangling pointers.
188 _total_start = badAddress;
189 _consts._start = _consts._end = badAddress;
190 _insts._start = _insts._end = badAddress;
191 _stubs._start = _stubs._end = badAddress;
192 #endif //ASSERT
193 }
194 }
195
196 void CodeBuffer::free_blob() {
197 if (_blob != nullptr) {
198 BufferBlob::free(_blob);
199 set_blob(nullptr);
200 }
201 }
202
203 const char* CodeBuffer::code_section_name(int n) {
204 #ifdef PRODUCT
205 return nullptr;
206 #else //PRODUCT
207 switch (n) {
208 case SECT_CONSTS: return "consts";
209 case SECT_INSTS: return "insts";
210 case SECT_STUBS: return "stubs";
211 default: return nullptr;
212 }
213 #endif //PRODUCT
214 }
215
216 int CodeBuffer::section_index_of(address addr) const {
217 for (int n = 0; n < (int)SECT_LIMIT; n++) {
218 const CodeSection* cs = code_section(n);
219 if (cs->allocates(addr)) return n;
220 }
221 return SECT_NONE;
222 }
223
224 int CodeBuffer::locator(address addr) const {
225 for (int n = 0; n < (int)SECT_LIMIT; n++) {
226 const CodeSection* cs = code_section(n);
227 if (cs->allocates(addr)) {
228 return locator(addr - cs->start(), n);
229 }
230 }
231 return -1;
232 }
233
234
235 bool CodeBuffer::is_backward_branch(Label& L) {
236 return L.is_bound() && insts_end() <= locator_address(L.loc());
237 }
238
239 #ifndef PRODUCT
240 address CodeBuffer::decode_begin() {
241 address begin = _insts.start();
242 if (_decode_begin != nullptr && _decode_begin > begin)
243 begin = _decode_begin;
244 return begin;
245 }
246 #endif // !PRODUCT
247
248 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
249 if (_overflow_arena == nullptr) {
250 _overflow_arena = new (mtCode) Arena(mtCode);
251 }
252 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
253 }
254
255
256 // Helper function for managing labels and their target addresses.
257 // Returns a sensible address, and if it is not the label's final
258 // address, notes the dependency (at 'branch_pc') on the label.
259 address CodeSection::target(Label& L, address branch_pc) {
260 if (L.is_bound()) {
261 int loc = L.loc();
262 if (index() == CodeBuffer::locator_sect(loc)) {
263 return start() + CodeBuffer::locator_pos(loc);
264 } else {
265 return outer()->locator_address(loc);
266 }
267 } else {
268 assert(allocates2(branch_pc), "sanity");
269 address base = start();
270 int patch_loc = CodeBuffer::locator(branch_pc - base, index());
271 L.add_patch_at(outer(), patch_loc);
272
273 // Need to return a pc, doesn't matter what it is since it will be
274 // replaced during resolution later.
275 // Don't return null or badAddress, since branches shouldn't overflow.
276 // Don't return base either because that could overflow displacements
277 // for shorter branches. It will get checked when bound.
278 return branch_pc;
279 }
280 }
281
282 void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) {
283 RelocationHolder rh;
284 switch (rtype) {
285 case relocInfo::none: return;
286 case relocInfo::opt_virtual_call_type: {
287 rh = opt_virtual_call_Relocation::spec(method_index);
288 break;
289 }
290 case relocInfo::static_call_type: {
291 rh = static_call_Relocation::spec(method_index);
292 break;
293 }
294 case relocInfo::virtual_call_type: {
295 assert(method_index == 0, "resolved method overriding is not supported");
296 rh = Relocation::spec_simple(rtype);
297 break;
298 }
299 default: {
300 rh = Relocation::spec_simple(rtype);
301 break;
302 }
303 }
304 relocate(at, rh, format);
305 }
306
307 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
308 // Do not relocate in scratch buffers.
309 if (scratch_emit()) { return; }
310 Relocation* reloc = spec.reloc();
311 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
312 if (rtype == relocInfo::none) return;
313
314 // The assertion below has been adjusted, to also work for
315 // relocation for fixup. Sometimes we want to put relocation
316 // information for the next instruction, since it will be patched
317 // with a call.
318 assert(start() <= at && at <= end()+1,
319 "cannot relocate data outside code boundaries");
320
321 if (!has_locs()) {
322 // no space for relocation information provided => code cannot be
323 // relocated. Make sure that relocate is only called with rtypes
324 // that can be ignored for this kind of code.
325 assert(rtype == relocInfo::none ||
326 rtype == relocInfo::runtime_call_type ||
327 rtype == relocInfo::internal_word_type||
328 rtype == relocInfo::section_word_type ||
329 rtype == relocInfo::external_word_type||
330 rtype == relocInfo::barrier_type,
331 "code needs relocation information");
332 // leave behind an indication that we attempted a relocation
333 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
334 return;
335 }
336
337 // Advance the point, noting the offset we'll have to record.
338 csize_t offset = at - locs_point();
339 set_locs_point(at);
340
341 // Test for a couple of overflow conditions; maybe expand the buffer.
342 relocInfo* end = locs_end();
343 relocInfo* req = end + relocInfo::length_limit;
344 // Check for (potential) overflow
345 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
346 req += (uint)offset / (uint)relocInfo::offset_limit();
347 if (req >= locs_limit()) {
348 // Allocate or reallocate.
349 expand_locs(locs_count() + (req - end));
350 // reload pointer
351 end = locs_end();
352 }
353 }
354
355 // If the offset is giant, emit filler relocs, of type 'none', but
356 // each carrying the largest possible offset, to advance the locs_point.
357 while (offset >= relocInfo::offset_limit()) {
358 assert(end < locs_limit(), "adjust previous paragraph of code");
359 *end++ = relocInfo::filler_info();
360 offset -= relocInfo::filler_info().addr_offset();
361 }
362
363 // If it's a simple reloc with no data, we'll just write (rtype | offset).
364 (*end) = relocInfo(rtype, offset, format);
365
366 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
367 end->initialize(this, reloc);
368 }
369
370 void CodeSection::initialize_locs(int locs_capacity) {
371 assert(_locs_start == nullptr, "only one locs init step, please");
372 // Apply a priori lower limits to relocation size:
373 csize_t min_locs = MAX2(size() / 16, (csize_t)4);
374 if (locs_capacity < min_locs) locs_capacity = min_locs;
375 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
376 _locs_start = locs_start;
377 _locs_end = locs_start;
378 _locs_limit = locs_start + locs_capacity;
379 _locs_own = true;
380 }
381
382 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
383 assert(_locs_start == nullptr, "do this before locs are allocated");
384 // Internal invariant: locs buf must be fully aligned.
385 // See copy_relocations_to() below.
386 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
387 ++buf; --length;
388 }
389 if (length > 0) {
390 _locs_start = buf;
391 _locs_end = buf;
392 _locs_limit = buf + length;
393 _locs_own = false;
394 }
395 }
396
397 void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
398 int lcount = source_cs->locs_count();
399 if (lcount != 0) {
400 initialize_shared_locs(source_cs->locs_start(), lcount);
401 _locs_end = _locs_limit = _locs_start + lcount;
402 assert(is_allocated(), "must have copied code already");
403 set_locs_point(start() + source_cs->locs_point_off());
404 }
405 assert(this->locs_count() == source_cs->locs_count(), "sanity");
406 }
407
408 void CodeSection::expand_locs(int new_capacity) {
409 if (_locs_start == nullptr) {
410 initialize_locs(new_capacity);
411 return;
412 } else {
413 int old_count = locs_count();
414 int old_capacity = locs_capacity();
415 if (new_capacity < old_capacity * 2)
416 new_capacity = old_capacity * 2;
417 relocInfo* locs_start;
418 if (_locs_own) {
419 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
420 } else {
421 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
422 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
423 _locs_own = true;
424 }
425 _locs_start = locs_start;
426 _locs_end = locs_start + old_count;
427 _locs_limit = locs_start + new_capacity;
428 }
429 }
430
431 int CodeSection::alignment() const {
432 if (_index == CodeBuffer::SECT_CONSTS) {
433 // CodeBuffer controls the alignment of the constants section
434 return _outer->_const_section_alignment;
435 }
436 if (_index == CodeBuffer::SECT_INSTS) {
437 return (int) CodeEntryAlignment;
438 }
439 if (_index == CodeBuffer::SECT_STUBS) {
440 // CodeBuffer installer expects sections to be HeapWordSize aligned
441 return HeapWordSize;
442 }
443 ShouldNotReachHere();
444 return 0;
445 }
446
447 /// Support for emitting the code to its final location.
448 /// The pattern is the same for all functions.
449 /// We iterate over all the sections, padding each to alignment.
450
451 csize_t CodeBuffer::total_content_size() const {
452 csize_t size_so_far = 0;
453 for (int n = 0; n < (int)SECT_LIMIT; n++) {
454 const CodeSection* cs = code_section(n);
455 if (cs->is_empty()) continue; // skip trivial section
456 size_so_far = cs->align_at_start(size_so_far);
457 size_so_far += cs->size();
458 }
459 return size_so_far;
460 }
461
462 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
463 address buf = dest->_total_start;
464 csize_t buf_offset = 0;
465 assert(dest->_total_size >= total_content_size(), "must be big enough");
466 assert(!_finalize_stubs, "non-finalized stubs");
467
468 {
469 // not sure why this is here, but why not...
470 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
471 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
472 }
473
474 const CodeSection* prev_cs = nullptr;
475 CodeSection* prev_dest_cs = nullptr;
476
477 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
478 // figure compact layout of each section
479 const CodeSection* cs = code_section(n);
480 csize_t csize = cs->size();
481
482 CodeSection* dest_cs = dest->code_section(n);
483 if (!cs->is_empty()) {
484 // Compute initial padding; assign it to the previous non-empty guy.
485 // Cf. figure_expanded_capacities.
486 csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
487 if (prev_dest_cs != nullptr) {
488 if (padding != 0) {
489 buf_offset += padding;
490 prev_dest_cs->_limit += padding;
491 }
492 } else {
493 guarantee(padding == 0, "In first iteration no padding should be needed.");
494 }
495 prev_dest_cs = dest_cs;
496 prev_cs = cs;
497 }
498
499 DEBUG_ONLY(dest_cs->_start = nullptr); // defeat double-initialization assert
500 dest_cs->initialize(buf+buf_offset, csize);
501 dest_cs->set_end(buf+buf_offset+csize);
502 assert(dest_cs->is_allocated(), "must always be allocated");
503 assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
504
505 buf_offset += csize;
506 }
507
508 // Done calculating sections; did it come out to the right end?
509 assert(buf_offset == total_content_size(), "sanity");
510 DEBUG_ONLY(dest->verify_section_allocation();)
511 }
512
513 // Append an oop reference that keeps the class alive.
514 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
515 oop cl = k->klass_holder();
516 if (cl != nullptr && !oops->contains(cl)) {
517 oops->append(cl);
518 }
519 }
520
521 void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
522 NoSafepointVerifier nsv;
523
524 GrowableArray<oop> oops;
525
526 // Make sure that immediate metadata records something in the OopRecorder
527 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
528 // pull code out of each section
529 CodeSection* cs = code_section(n);
530 if (cs->is_empty() || (cs->locs_count() == 0)) continue; // skip trivial section
531 RelocIterator iter(cs);
532 while (iter.next()) {
533 if (iter.type() == relocInfo::metadata_type) {
534 metadata_Relocation* md = iter.metadata_reloc();
535 if (md->metadata_is_immediate()) {
536 Metadata* m = md->metadata_value();
537 if (oop_recorder()->is_real(m)) {
538 if (m->is_methodData()) {
539 m = ((MethodData*)m)->method();
540 }
541 if (m->is_methodCounters()) {
542 m = ((MethodCounters*)m)->method();
543 }
544 if (m->is_method()) {
545 m = ((Method*)m)->method_holder();
546 }
547 if (m->is_klass()) {
548 append_oop_references(&oops, (Klass*)m);
549 } else {
550 // XXX This will currently occur for MDO which don't
551 // have a backpointer. This has to be fixed later.
552 m->print();
553 ShouldNotReachHere();
554 }
555 }
556 }
557 }
558 }
559 }
560
561 if (!oop_recorder()->is_unused()) {
562 for (int i = 0; i < oop_recorder()->metadata_count(); i++) {
563 Metadata* m = oop_recorder()->metadata_at(i);
564 if (oop_recorder()->is_real(m)) {
565 if (m->is_methodData()) {
566 m = ((MethodData*)m)->method();
567 }
568 if (m->is_methodCounters()) {
569 m = ((MethodCounters*)m)->method();
570 }
571 if (m->is_method()) {
572 m = ((Method*)m)->method_holder();
573 }
574 if (m->is_klass()) {
575 append_oop_references(&oops, (Klass*)m);
576 } else {
577 m->print();
578 ShouldNotReachHere();
579 }
580 }
581 }
582
583 }
584
585 // Add the class loader of Method* for the nmethod itself
586 append_oop_references(&oops, mh->method_holder());
587
588 // Add any oops that we've found
589 Thread* thread = Thread::current();
590 for (int i = 0; i < oops.length(); i++) {
591 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i)));
592 }
593 }
594
595
596
597 csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
598 csize_t size_so_far = 0;
599 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
600 const CodeSection* cur_cs = code_section(n);
601 if (!cur_cs->is_empty()) {
602 size_so_far = cur_cs->align_at_start(size_so_far);
603 }
604 if (cur_cs->index() == cs->index()) {
605 return size_so_far;
606 }
607 size_so_far += cur_cs->size();
608 }
609 ShouldNotReachHere();
610 return -1;
611 }
612
613 int CodeBuffer::total_skipped_instructions_size() const {
614 int total_skipped_size = 0;
615 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
616 const CodeSection* cur_cs = code_section(n);
617 if (!cur_cs->is_empty()) {
618 total_skipped_size += cur_cs->_skipped_instructions_size;
619 }
620 }
621 return total_skipped_size;
622 }
623
624 csize_t CodeBuffer::total_relocation_size() const {
625 csize_t total = copy_relocations_to(nullptr); // dry run only
626 return (csize_t) align_up(total, HeapWordSize);
627 }
628
629 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit) const {
630 csize_t buf_offset = 0;
631 csize_t code_end_so_far = 0;
632 csize_t code_point_so_far = 0;
633
634 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
635 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
636
637 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
638 // pull relocs out of each section
639 const CodeSection* cs = code_section(n);
640 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
641 if (cs->is_empty()) continue; // skip trivial section
642 relocInfo* lstart = cs->locs_start();
643 relocInfo* lend = cs->locs_end();
644 csize_t lsize = (csize_t)( (address)lend - (address)lstart );
645 csize_t csize = cs->size();
646 code_end_so_far = cs->align_at_start(code_end_so_far);
647
648 if (lsize > 0) {
649 // Figure out how to advance the combined relocation point
650 // first to the beginning of this section.
651 // We'll insert one or more filler relocs to span that gap.
652 // (Don't bother to improve this by editing the first reloc's offset.)
653 csize_t new_code_point = code_end_so_far;
654 for (csize_t jump;
655 code_point_so_far < new_code_point;
656 code_point_so_far += jump) {
657 jump = new_code_point - code_point_so_far;
658 relocInfo filler = relocInfo::filler_info();
659 if (jump >= filler.addr_offset()) {
660 jump = filler.addr_offset();
661 } else { // else shrink the filler to fit
662 filler = relocInfo(relocInfo::none, jump);
663 }
664 if (buf != nullptr) {
665 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
666 *(relocInfo*)(buf+buf_offset) = filler;
667 }
668 buf_offset += sizeof(filler);
669 }
670
671 // Update code point and end to skip past this section:
672 csize_t last_code_point = code_end_so_far + cs->locs_point_off();
673 assert(code_point_so_far <= last_code_point, "sanity");
674 code_point_so_far = last_code_point; // advance past this guy's relocs
675 }
676 code_end_so_far += csize; // advance past this guy's instructions too
677
678 // Done with filler; emit the real relocations:
679 if (buf != nullptr && lsize != 0) {
680 assert(buf_offset + lsize <= buf_limit, "target in bounds");
681 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
682 if (buf_offset % HeapWordSize == 0) {
683 // Use wordwise copies if possible:
684 Copy::disjoint_words((HeapWord*)lstart,
685 (HeapWord*)(buf+buf_offset),
686 (lsize + HeapWordSize-1) / HeapWordSize);
687 } else {
688 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
689 }
690 }
691 buf_offset += lsize;
692 }
693
694 // Align end of relocation info in target.
695 while (buf_offset % HeapWordSize != 0) {
696 if (buf != nullptr) {
697 relocInfo padding = relocInfo(relocInfo::none, 0);
698 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
699 *(relocInfo*)(buf+buf_offset) = padding;
700 }
701 buf_offset += sizeof(relocInfo);
702 }
703
704 assert(code_end_so_far == total_content_size(), "sanity");
705
706 return buf_offset;
707 }
708
709 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
710 address buf = nullptr;
711 csize_t buf_offset = 0;
712 csize_t buf_limit = 0;
713
714 if (dest != nullptr) {
715 buf = (address)dest->relocation_begin();
716 buf_limit = (address)dest->relocation_end() - buf;
717 }
718 // if dest is null, this is just the sizing pass
719 //
720 buf_offset = copy_relocations_to(buf, buf_limit);
721
722 return buf_offset;
723 }
724
725 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
726 #ifndef PRODUCT
727 if (PrintNMethods && (WizardMode || Verbose)) {
728 tty->print("done with CodeBuffer:");
729 ((CodeBuffer*)this)->print_on(tty);
730 }
731 #endif //PRODUCT
732
733 CodeBuffer dest(dest_blob);
734 assert(dest_blob->content_size() >= total_content_size(), "good sizing");
735 this->compute_final_layout(&dest);
736
737 // Set beginning of constant table before relocating.
738 dest_blob->set_ctable_begin(dest.consts()->start());
739
740 relocate_code_to(&dest);
741
742 // Share assembly remarks and debug strings with the blob.
743 NOT_PRODUCT(dest_blob->use_remarks(_asm_remarks));
744 NOT_PRODUCT(dest_blob->use_strings(_dbg_strings));
745
746 // Done moving code bytes; were they the right size?
747 assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
748
749 // Flush generated code
750 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
751 }
752
753 // Move all my code into another code buffer. Consult applicable
754 // relocs to repair embedded addresses. The layout in the destination
755 // CodeBuffer is different to the source CodeBuffer: the destination
756 // CodeBuffer gets the final layout (consts, insts, stubs in order of
757 // ascending address).
758 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
759 address dest_end = dest->_total_start + dest->_total_size;
760 address dest_filled = nullptr;
761 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
762 // pull code out of each section
763 const CodeSection* cs = code_section(n);
764 if (cs->is_empty()) continue; // skip trivial section
765 CodeSection* dest_cs = dest->code_section(n);
766 assert(cs->size() == dest_cs->size(), "sanity");
767 csize_t usize = dest_cs->size();
768 csize_t wsize = align_up(usize, HeapWordSize);
769 assert(dest_cs->start() + wsize <= dest_end, "no overflow");
770 // Copy the code as aligned machine words.
771 // This may also include an uninitialized partial word at the end.
772 Copy::disjoint_words((HeapWord*)cs->start(),
773 (HeapWord*)dest_cs->start(),
774 wsize / HeapWordSize);
775
776 if (dest->blob() == nullptr) {
777 // Destination is a final resting place, not just another buffer.
778 // Normalize uninitialized bytes in the final padding.
779 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
780 Assembler::code_fill_byte());
781 }
782 // Keep track of the highest filled address
783 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining());
784
785 assert(cs->locs_start() != (relocInfo*)badAddress,
786 "this section carries no reloc storage, but reloc was attempted");
787
788 // Make the new code copy use the old copy's relocations:
789 dest_cs->initialize_locs_from(cs);
790 }
791
792 // Do relocation after all sections are copied.
793 // This is necessary if the code uses constants in stubs, which are
794 // relocated when the corresponding instruction in the code (e.g., a
795 // call) is relocated. Stubs are placed behind the main code
796 // section, so that section has to be copied before relocating.
797 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
798 CodeSection* dest_cs = dest->code_section(n);
799 if (dest_cs->is_empty() || (dest_cs->locs_count() == 0)) continue; // skip trivial section
800 { // Repair the pc relative information in the code after the move
801 RelocIterator iter(dest_cs);
802 while (iter.next()) {
803 iter.reloc()->fix_relocation_after_move(this, dest);
804 }
805 }
806 }
807
808 if (dest->blob() == nullptr && dest_filled != nullptr) {
809 // Destination is a final resting place, not just another buffer.
810 // Normalize uninitialized bytes in the final padding.
811 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
812 Assembler::code_fill_byte());
813
814 }
815 }
816
817 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
818 csize_t amount,
819 csize_t* new_capacity) {
820 csize_t new_total_cap = 0;
821
822 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
823 const CodeSection* sect = code_section(n);
824
825 if (!sect->is_empty()) {
826 // Compute initial padding; assign it to the previous section,
827 // even if it's empty (e.g. consts section can be empty).
828 // Cf. compute_final_layout
829 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
830 if (padding != 0) {
831 new_total_cap += padding;
832 assert(n - 1 >= SECT_FIRST, "sanity");
833 new_capacity[n - 1] += padding;
834 }
835 }
836
837 csize_t exp = sect->size(); // 100% increase
838 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase
839 if (sect == which_cs) {
840 if (exp < amount) exp = amount;
841 if (StressCodeBuffers) exp = amount; // expand only slightly
842 } else if (n == SECT_INSTS) {
843 // scale down inst increases to a more modest 25%
844 exp = 4*K + ((exp - 4*K) >> 2);
845 if (StressCodeBuffers) exp = amount / 2; // expand only slightly
846 } else if (sect->is_empty()) {
847 // do not grow an empty secondary section
848 exp = 0;
849 }
850 // Allow for inter-section slop:
851 exp += CodeSection::end_slop();
852 csize_t new_cap = sect->size() + exp;
853 if (new_cap < sect->capacity()) {
854 // No need to expand after all.
855 new_cap = sect->capacity();
856 }
857 new_capacity[n] = new_cap;
858 new_total_cap += new_cap;
859 }
860
861 return new_total_cap;
862 }
863
864 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
865 #ifndef PRODUCT
866 if (PrintNMethods && (WizardMode || Verbose)) {
867 tty->print("expanding CodeBuffer:");
868 this->print_on(tty);
869 }
870
871 if (StressCodeBuffers && blob() != nullptr) {
872 static int expand_count = 0;
873 if (expand_count >= 0) expand_count += 1;
874 if (expand_count > 100 && is_power_of_2(expand_count)) {
875 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
876 // simulate an occasional allocation failure:
877 free_blob();
878 }
879 }
880 #endif //PRODUCT
881
882 // Resizing must be allowed
883 {
884 if (blob() == nullptr) return; // caller must check if blob is null
885 }
886
887 // Figure new capacity for each section.
888 csize_t new_capacity[SECT_LIMIT];
889 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT);
890 csize_t new_total_cap
891 = figure_expanded_capacities(which_cs, amount, new_capacity);
892
893 // Create a new (temporary) code buffer to hold all the new data
894 CodeBuffer cb(name(), new_total_cap, 0);
895 cb.set_const_section_alignment(_const_section_alignment);
896 if (cb.blob() == nullptr) {
897 // Failed to allocate in code cache.
898 free_blob();
899 return;
900 }
901
902 // Create an old code buffer to remember which addresses used to go where.
903 // This will be useful when we do final assembly into the code cache,
904 // because we will need to know how to warp any internal address that
905 // has been created at any time in this CodeBuffer's past.
906 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
907 bxp->take_over_code_from(this); // remember the old undersized blob
908 DEBUG_ONLY(this->_blob = nullptr); // silence a later assert
909 bxp->_before_expand = this->_before_expand;
910 this->_before_expand = bxp;
911
912 // Give each section its required (expanded) capacity.
913 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) {
914 CodeSection* cb_sect = cb.code_section(n);
915 CodeSection* this_sect = code_section(n);
916 if (new_capacity[n] == 0) continue; // already nulled out
917 if (n != SECT_INSTS) {
918 cb.initialize_section_size(cb_sect, new_capacity[n]);
919 }
920 assert(cb_sect->capacity() >= new_capacity[n], "big enough");
921 address cb_start = cb_sect->start();
922 cb_sect->set_end(cb_start + this_sect->size());
923 if (this_sect->mark() == nullptr) {
924 cb_sect->clear_mark();
925 } else {
926 cb_sect->set_mark(cb_start + this_sect->mark_off());
927 }
928 }
929
930 // Needs to be initialized when calling fix_relocation_after_move.
931 cb.blob()->set_ctable_begin(cb.consts()->start());
932
933 // Move all the code and relocations to the new blob:
934 relocate_code_to(&cb);
935
936 // some internal addresses, _last_insn _last_label, are used during code emission,
937 // adjust them in expansion
938 adjust_internal_address(insts_begin(), cb.insts_begin());
939
940 // Copy the temporary code buffer into the current code buffer.
941 // Basically, do {*this = cb}, except for some control information.
942 this->take_over_code_from(&cb);
943 cb.set_blob(nullptr);
944
945 // Zap the old code buffer contents, to avoid mistakenly using them.
946 DEBUG_ONLY(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
947 badCodeHeapFreeVal);)
948
949 // Make certain that the new sections are all snugly inside the new blob.
950 DEBUG_ONLY(verify_section_allocation();)
951
952 #ifndef PRODUCT
953 _decode_begin = nullptr; // sanity
954 if (PrintNMethods && (WizardMode || Verbose)) {
955 tty->print("expanded CodeBuffer:");
956 this->print_on(tty);
957 }
958 #endif //PRODUCT
959 }
960
961 void CodeBuffer::adjust_internal_address(address from, address to) {
962 if (_last_insn != nullptr) {
963 _last_insn += to - from;
964 }
965 if (_last_label != nullptr) {
966 _last_label += to - from;
967 }
968 }
969
970 void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
971 // Must already have disposed of the old blob somehow.
972 assert(blob() == nullptr, "must be empty");
973 // Take the new blob away from cb.
974 set_blob(cb->blob());
975 // Take over all the section pointers.
976 for (int n = 0; n < (int)SECT_LIMIT; n++) {
977 CodeSection* cb_sect = cb->code_section(n);
978 CodeSection* this_sect = code_section(n);
979 this_sect->take_over_code_from(cb_sect);
980 }
981 // Make sure the old cb won't try to use it or free it.
982 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
983 }
984
985 void CodeBuffer::verify_section_allocation() {
986 address tstart = _total_start;
987 if (tstart == nullptr) return; // ignore not fully initialized buffer
988 if (tstart == badAddress) return; // smashed by set_blob(nullptr)
989 address tend = tstart + _total_size;
990 if (_blob != nullptr) {
991 guarantee(tstart >= _blob->content_begin(), "sanity");
992 guarantee(tend <= _blob->content_end(), "sanity");
993 }
994 // Verify disjointness.
995 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
996 CodeSection* sect = code_section(n);
997 if (!sect->is_allocated() || sect->is_empty()) {
998 continue;
999 }
1000 guarantee(_blob == nullptr || is_aligned(sect->start(), sect->alignment()),
1001 "start is aligned");
1002 for (int m = n + 1; m < (int) SECT_LIMIT; m++) {
1003 CodeSection* other = code_section(m);
1004 if (!other->is_allocated() || other == sect) {
1005 continue;
1006 }
1007 guarantee(other->disjoint(sect), "sanity");
1008 }
1009 guarantee(sect->end() <= tend, "sanity, sect_end: " PTR_FORMAT " tend: " PTR_FORMAT " size: %d", p2i(sect->end()), p2i(tend), (int)_total_size);
1010 guarantee(sect->end() <= sect->limit(), "sanity, sect_end: " PTR_FORMAT " sect_limit: " PTR_FORMAT, p2i(sect->end()), p2i(sect->limit()));
1011 }
1012 }
1013
1014 void CodeBuffer::log_section_sizes(const char* name) {
1015 if (xtty != nullptr) {
1016 ttyLocker ttyl;
1017 // log info about buffer usage
1018 xtty->head("blob name='%s' total_size='%d'", name, _total_size);
1019 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
1020 CodeSection* sect = code_section(n);
1021 if (!sect->is_allocated() || sect->is_empty()) continue;
1022 xtty->elem("sect index='%d' capacity='%d' size='%d' remaining='%d'",
1023 n, sect->capacity(), sect->size(), sect->remaining());
1024 }
1025 xtty->tail("blob");
1026 }
1027 }
1028
1029 bool CodeBuffer::finalize_stubs() {
1030 if (_finalize_stubs && !pd_finalize_stubs()) {
1031 // stub allocation failure
1032 return false;
1033 }
1034 _finalize_stubs = false;
1035 return true;
1036 }
1037
1038 void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) {
1039 if (_shared_stub_to_interp_requests == nullptr) {
1040 _shared_stub_to_interp_requests = new SharedStubToInterpRequests(8);
1041 }
1042 SharedStubToInterpRequest request(callee, call_offset);
1043 _shared_stub_to_interp_requests->push(request);
1044 _finalize_stubs = true;
1045 }
1046
1047 #ifndef PRODUCT
1048 void CodeBuffer::block_comment(ptrdiff_t offset, const char* comment) {
1049 if (insts()->scratch_emit()) {
1050 return;
1051 }
1052 if (_collect_comments) {
1053 const char* str = _asm_remarks.insert(offset, comment);
1054 postcond(str != comment);
1055 }
1056 }
1057
1058 const char* CodeBuffer::code_string(const char* str) {
1059 if (insts()->scratch_emit()) {
1060 return str;
1061 }
1062 const char* tmp = _dbg_strings.insert(str);
1063 postcond(tmp != str);
1064 return tmp;
1065 }
1066
1067 void CodeBuffer::decode() {
1068 ttyLocker ttyl;
1069 Disassembler::decode(decode_begin(), insts_end(), tty NOT_PRODUCT(COMMA &asm_remarks()));
1070 _decode_begin = insts_end();
1071 }
1072
1073 void CodeSection::print_on(outputStream* st, const char* name) {
1074 csize_t locs_size = locs_end() - locs_start();
1075 st->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)",
1076 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity());
1077 st->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1078 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off());
1079 if (PrintRelocations && (locs_size != 0)) {
1080 RelocIterator iter(this);
1081 iter.print_on(st);
1082 }
1083 }
1084
1085 void CodeBuffer::print_on(outputStream* st) {
1086 st->print_cr("CodeBuffer:%s", name());
1087 for (int n = 0; n < (int)SECT_LIMIT; n++) {
1088 // print each section
1089 CodeSection* cs = code_section(n);
1090 cs->print_on(st, code_section_name(n));
1091 }
1092 }
1093
1094 CHeapString::~CHeapString() {
1095 os::free((void*)_string);
1096 _string = nullptr;
1097 }
1098
1099 // ----- AsmRemarks ------------------------------------------------------------
1100 //
1101 // Acting as interface to reference counted mapping [offset -> remark], where
1102 // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or
1103 // other memory buffer) and remark is a string (comment).
1104 //
1105 AsmRemarks::AsmRemarks() {
1106 init();
1107 assert(_remarks != nullptr, "Allocation failure!");
1108 }
1109
1110 AsmRemarks::~AsmRemarks() {
1111 if (_remarks != nullptr) {
1112 clear();
1113 }
1114 assert(_remarks == nullptr, "must be");
1115 }
1116
1117 void AsmRemarks::init() {
1118 _remarks = new AsmRemarkCollection();
1119 }
1120
1121 const char* AsmRemarks::insert(uint offset, const char* remstr) {
1122 precond(remstr != nullptr);
1123 return _remarks->insert(offset, remstr);
1124 }
1125
1126 bool AsmRemarks::is_empty() const {
1127 return _remarks->is_empty();
1128 }
1129
1130 void AsmRemarks::share(const AsmRemarks &src) {
1131 precond(_remarks == nullptr || is_empty());
1132 clear();
1133 _remarks = src._remarks->reuse();
1134 }
1135
1136 void AsmRemarks::clear() {
1137 if (_remarks != nullptr && _remarks->clear() == 0) {
1138 delete _remarks;
1139 }
1140 _remarks = nullptr;
1141 }
1142
1143 uint AsmRemarks::print(uint offset, outputStream* strm) const {
1144 uint count = 0;
1145 const char* prefix = " ;; ";
1146 const char* remstr = _remarks->lookup(offset);
1147 while (remstr != nullptr) {
1148 strm->bol();
1149 strm->print("%s", prefix);
1150 // Don't interpret as format strings since it could contain '%'.
1151 strm->print_raw(remstr);
1152 // Advance to next line iff string didn't contain a cr() at the end.
1153 strm->bol();
1154 remstr = _remarks->next(offset);
1155 count++;
1156 }
1157 return count;
1158 }
1159
1160 // ----- DbgStrings ------------------------------------------------------------
1161 //
1162 // Acting as interface to reference counted collection of (debug) strings used
1163 // in the code generated, and thus requiring a fixed address.
1164 //
1165 DbgStrings::DbgStrings() {
1166 init();
1167 assert(_strings != nullptr, "Allocation failure!");
1168 }
1169
1170 DbgStrings::~DbgStrings() {
1171 if (_strings != nullptr) {
1172 clear();
1173 }
1174 assert(_strings == nullptr, "must be");
1175 }
1176
1177 void DbgStrings::init() {
1178 _strings = new DbgStringCollection();
1179 }
1180
1181 const char* DbgStrings::insert(const char* dbgstr) {
1182 const char* str = _strings->lookup(dbgstr);
1183 return str != nullptr ? str : _strings->insert(dbgstr);
1184 }
1185
1186 bool DbgStrings::is_empty() const {
1187 return _strings->is_empty();
1188 }
1189
1190 void DbgStrings::share(const DbgStrings &src) {
1191 precond(_strings == nullptr || is_empty());
1192 clear();
1193 _strings = src._strings->reuse();
1194 }
1195
1196 void DbgStrings::clear() {
1197 if (_strings != nullptr && _strings->clear() == 0) {
1198 delete _strings;
1199 }
1200 _strings = nullptr;
1201 }
1202
1203 // ----- AsmRemarkCollection ---------------------------------------------------
1204
1205 const char* AsmRemarkCollection::insert(uint offset, const char* remstr) {
1206 precond(remstr != nullptr);
1207 Cell* cell = new Cell { remstr, offset };
1208 if (is_empty()) {
1209 cell->prev = cell;
1210 cell->next = cell;
1211 _remarks = cell;
1212 } else {
1213 _remarks->push_back(cell);
1214 }
1215 return cell->string();
1216 }
1217
1218 const char* AsmRemarkCollection::lookup(uint offset) const {
1219 _next = _remarks;
1220 return next(offset);
1221 }
1222
1223 const char* AsmRemarkCollection::next(uint offset) const {
1224 if (_next != nullptr) {
1225 Cell* i = _next;
1226 do {
1227 if (i->offset == offset) {
1228 _next = i->next == _remarks ? nullptr : i->next;
1229 return i->string();
1230 }
1231 i = i->next;
1232 } while (i != _remarks);
1233 _next = nullptr;
1234 }
1235 return nullptr;
1236 }
1237
1238 uint AsmRemarkCollection::clear() {
1239 precond(_ref_cnt > 0);
1240 if (--_ref_cnt > 0) {
1241 return _ref_cnt;
1242 }
1243 if (!is_empty()) {
1244 uint count = 0;
1245 Cell* i = _remarks;
1246 do {
1247 Cell* next = i->next;
1248 delete i;
1249 i = next;
1250 count++;
1251 } while (i != _remarks);
1252
1253 log_debug(codestrings)("Clear %u asm-remark%s.", count, count == 1 ? "" : "s");
1254 _remarks = nullptr;
1255 }
1256 return 0; // i.e. _ref_cnt == 0
1257 }
1258
1259 // ----- DbgStringCollection ---------------------------------------------------
1260
1261 const char* DbgStringCollection::insert(const char* dbgstr) {
1262 precond(dbgstr != nullptr);
1263 Cell* cell = new Cell { dbgstr };
1264
1265 if (is_empty()) {
1266 cell->prev = cell;
1267 cell->next = cell;
1268 _strings = cell;
1269 } else {
1270 _strings->push_back(cell);
1271 }
1272 return cell->string();
1273 }
1274
1275 const char* DbgStringCollection::lookup(const char* dbgstr) const {
1276 precond(dbgstr != nullptr);
1277 if (_strings != nullptr) {
1278 Cell* i = _strings;
1279 do {
1280 if (strcmp(i->string(), dbgstr) == 0) {
1281 return i->string();
1282 }
1283 i = i->next;
1284 } while (i != _strings);
1285 }
1286 return nullptr;
1287 }
1288
1289 uint DbgStringCollection::clear() {
1290 precond(_ref_cnt > 0);
1291 if (--_ref_cnt > 0) {
1292 return _ref_cnt;
1293 }
1294 if (!is_empty()) {
1295 uint count = 0;
1296 Cell* i = _strings;
1297 do {
1298 Cell* next = i->next;
1299 delete i;
1300 i = next;
1301 count++;
1302 } while (i != _strings);
1303
1304 log_debug(codestrings)("Clear %u dbg-string%s.", count, count == 1 ? "" : "s");
1305 _strings = nullptr;
1306 }
1307 return 0; // i.e. _ref_cnt == 0
1308 }
1309
1310 #endif // not PRODUCT