1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_ASM_CODEBUFFER_HPP
 26 #define SHARE_ASM_CODEBUFFER_HPP
 27 
 28 #include "code/oopRecorder.hpp"
 29 #include "code/relocInfo.hpp"
 30 #include "compiler/compiler_globals.hpp"
 31 #include "runtime/os.hpp"
 32 #include "utilities/align.hpp"
 33 #include "utilities/debug.hpp"
 34 #include "utilities/growableArray.hpp"
 35 #include "utilities/linkedlist.hpp"
 36 #include "utilities/macros.hpp"
 37 #include "utilities/resizableHashTable.hpp"
 38 
 39 template <typename T>
 40 static inline void put_native(address p, T x) {
 41     memcpy((void*)p, &x, sizeof x);
 42 }
 43 
 44 class PhaseCFG;
 45 class Compile;
 46 class BufferBlob;
 47 class CodeBuffer;
 48 class Label;
 49 class ciMethod;
 50 class SharedStubToInterpRequest;
 51 
 52 class CodeOffsets: public StackObj {
 53 public:
 54   enum Entries { Entry,
 55                  Verified_Entry,
 56                  Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
 57                  OSR_Entry,
 58                  Exceptions,     // Offset where exception handler lives
 59                  Deopt,          // Offset where deopt handler lives
 60                  UnwindHandler,  // Offset to default unwind handler
 61                  max_Entries };
 62 
 63   // special value to note codeBlobs where profile (forte) stack walking is
 64   // always dangerous and suspect.
 65 
 66   enum { frame_never_safe = -1 };
 67 
 68 private:
 69   int _values[max_Entries];
 70 
 71 public:
 72   CodeOffsets() {
 73     _values[Entry         ] = 0;
 74     _values[Verified_Entry] = 0;
 75     _values[Frame_Complete] = frame_never_safe;
 76     _values[OSR_Entry     ] = 0;
 77     _values[Exceptions    ] = -1;
 78     _values[Deopt         ] = -1;
 79     _values[UnwindHandler ] = -1;
 80   }
 81 
 82   int value(Entries e) { return _values[e]; }
 83   void set_value(Entries e, int val) { _values[e] = val; }
 84 };
 85 
 86 // This class represents a stream of code and associated relocations.
 87 // There are a few in each CodeBuffer.
 88 // They are filled concurrently, and concatenated at the end.
 89 class CodeSection {
 90   friend class CodeBuffer;
 91   friend class AOTCodeReader;
 92  public:
 93   typedef int csize_t;  // code size type; would be size_t except for history
 94 
 95  private:
 96   address     _start;           // first byte of contents (instructions)
 97   address     _mark;            // user mark, usually an instruction beginning
 98   address     _end;             // current end address
 99   address     _limit;           // last possible (allocated) end address
100   relocInfo*  _locs_start;      // first byte of relocation information
101   relocInfo*  _locs_end;        // first byte after relocation information
102   relocInfo*  _locs_limit;      // first byte after relocation information buf
103   address     _locs_point;      // last relocated position (grows upward)
104   bool        _locs_own;        // did I allocate the locs myself?
105   bool        _scratch_emit;    // Buffer is used for scratch emit, don't relocate.
106   int         _skipped_instructions_size;
107   int8_t      _index;           // my section number (SECT_INST, etc.)
108   CodeBuffer* _outer;           // enclosing CodeBuffer
109 
110   // (Note:  _locs_point used to be called _last_reloc_offset.)
111 
112   CodeSection() {
113     _start         = nullptr;
114     _mark          = nullptr;
115     _end           = nullptr;
116     _limit         = nullptr;
117     _locs_start    = nullptr;
118     _locs_end      = nullptr;
119     _locs_limit    = nullptr;
120     _locs_point    = nullptr;
121     _locs_own      = false;
122     _scratch_emit  = false;
123     _skipped_instructions_size = 0;
124     DEBUG_ONLY(_index = -1);
125     DEBUG_ONLY(_outer = (CodeBuffer*)badAddress);
126   }
127 
128   void initialize_outer(CodeBuffer* outer, int8_t index) {
129     _outer = outer;
130     _index = index;
131   }
132 
133   void initialize(address start, csize_t size = 0) {
134     assert(_start == nullptr, "only one init step, please");
135     _start         = start;
136     _mark          = nullptr;
137     _end           = start;
138 
139     _limit         = start + size;
140     _locs_point    = start;
141   }
142 
143   void initialize_locs(int locs_capacity);
144   void expand_locs(int new_capacity);
145   void initialize_locs_from(const CodeSection* source_cs);
146 
147   // helper for CodeBuffer::expand()
148   void take_over_code_from(CodeSection* cs) {
149     _start      = cs->_start;
150     _mark       = cs->_mark;
151     _end        = cs->_end;
152     _limit      = cs->_limit;
153     _locs_point = cs->_locs_point;
154     _skipped_instructions_size = cs->_skipped_instructions_size;
155   }
156 
157  public:
158   address     start() const         { return _start; }
159   address     mark() const          { return _mark; }
160   address     end() const           { return _end; }
161   address     limit() const         { return _limit; }
162   csize_t     size() const          { return (csize_t)(_end - _start); }
163   csize_t     mark_off() const      { assert(_mark != nullptr, "not an offset");
164                                       return (csize_t)(_mark - _start); }
165   csize_t     capacity() const      { return (csize_t)(_limit - _start); }
166   csize_t     remaining() const     { return (csize_t)(_limit - _end); }
167 
168   relocInfo*  locs_start() const    { return _locs_start; }
169   relocInfo*  locs_end() const      { return _locs_end; }
170   int         locs_count() const    { return (int)(_locs_end - _locs_start); }
171   relocInfo*  locs_limit() const    { return _locs_limit; }
172   address     locs_point() const    { return _locs_point; }
173   csize_t     locs_point_off() const{ return (csize_t)(_locs_point - _start); }
174   csize_t     locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
175 
176   int8_t      index() const         { return _index; }
177   bool        is_allocated() const  { return _start != nullptr; }
178   bool        is_empty() const      { return _start == _end; }
179   bool        has_locs() const      { return _locs_end != nullptr; }
180 
181   // Mark scratch buffer.
182   void        set_scratch_emit()    { _scratch_emit = true; }
183   void        clear_scratch_emit()  { _scratch_emit = false; }
184   bool        scratch_emit()        { return _scratch_emit; }
185 
186   CodeBuffer* outer() const         { return _outer; }
187 
188   // is a given address in this section?  (2nd version is end-inclusive)
189   bool contains(address pc) const   { return pc >= _start && pc <  _end; }
190   bool contains2(address pc) const  { return pc >= _start && pc <= _end; }
191   bool allocates(address pc) const  { return pc >= _start && pc <  _limit; }
192   bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
193 
194   // checks if two CodeSections are disjoint
195   //
196   // limit is an exclusive address and can be the start of another
197   // section.
198   bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; }
199 
200   void    set_end(address pc)       { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
201   void    set_mark(address pc)      { assert(contains2(pc), "not in codeBuffer");
202                                       _mark = pc; }
203   void    set_mark()                { _mark = _end; }
204   void    clear_mark()              { _mark = nullptr; }
205 
206   void    set_locs_end(relocInfo* p) {
207     assert(p <= locs_limit(), "locs data fits in allocated buffer");
208     _locs_end = p;
209   }
210   void    set_locs_point(address pc) {
211     assert(pc >= locs_point(), "relocation addr may not decrease");
212     assert(allocates2(pc),     "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit));
213     _locs_point = pc;
214   }
215 
216   void register_skipped(int size) {
217     _skipped_instructions_size += size;
218   }
219 
220   void set_skipped(int size) {
221     _skipped_instructions_size = size;
222   }
223 
224   int get_skipped() {
225     return _skipped_instructions_size;
226   }
227 
228   // Code emission
229   void emit_int8(uint8_t x1) {
230     address curr = end();
231     *((uint8_t*)  curr++) = x1;
232     set_end(curr);
233   }
234 
235   template <typename T>
236   void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); }
237 
238   void emit_int16(uint16_t x) { emit_native(x); }
239   void emit_int16(uint8_t x1, uint8_t x2) {
240     address curr = end();
241     *((uint8_t*)  curr++) = x1;
242     *((uint8_t*)  curr++) = x2;
243     set_end(curr);
244   }
245 
246   void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3)  {
247     address curr = end();
248     *((uint8_t*)  curr++) = x1;
249     *((uint8_t*)  curr++) = x2;
250     *((uint8_t*)  curr++) = x3;
251     set_end(curr);
252   }
253 
254   void emit_int32(uint32_t x) { emit_native(x); }
255   void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4)  {
256     address curr = end();
257     *((uint8_t*)  curr++) = x1;
258     *((uint8_t*)  curr++) = x2;
259     *((uint8_t*)  curr++) = x3;
260     *((uint8_t*)  curr++) = x4;
261     set_end(curr);
262   }
263 
264   void emit_int64(uint64_t x)  { emit_native(x); }
265   void emit_float(jfloat  x)   { emit_native(x); }
266   void emit_double(jdouble x)  { emit_native(x); }
267   void emit_address(address x) { emit_native(x); }
268 
269   // Share a scratch buffer for relocinfo.  (Hacky; saves a resource allocation.)
270   void initialize_shared_locs(relocInfo* buf, int length);
271 
272   // Manage labels and their addresses.
273   address target(Label& L, address branch_pc);
274 
275   // Emit a relocation.
276   void relocate(address at, RelocationHolder const& rspec, int format = 0);
277   void relocate(address at,    relocInfo::relocType rtype, int format = 0, jint method_index = 0);
278 
279   int alignment() const;
280 
281   // Slop between sections, used only when allocating temporary BufferBlob buffers.
282   static csize_t end_slop()         { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
283 
284   csize_t align_at_start(csize_t off) const {
285     return (csize_t) align_up(off, alignment());
286   }
287 
288   // Ensure there's enough space left in the current section.
289   // Return true if there was an expansion.
290   bool maybe_expand_to_ensure_remaining(csize_t amount);
291 
292 #ifndef PRODUCT
293   void decode();
294   void print_on(outputStream* st, const char* name);
295 #endif //PRODUCT
296 };
297 
298 
299 #ifndef PRODUCT
300 
301 // ----- CHeapString -----------------------------------------------------------
302 
303 class CHeapString : public CHeapObj<mtCode> {
304  public:
305   CHeapString(const char* str) : _string(os::strdup(str)) {}
306   ~CHeapString();
307   const char* string() const { return _string; }
308 
309  private:
310   const char* _string;
311 };
312 
313 // ----- AsmRemarkCollection ---------------------------------------------------
314 
315 class AsmRemarkCollection : public CHeapObj<mtCode> {
316  public:
317   AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {}
318  ~AsmRemarkCollection() {
319     assert(is_empty(), "Must 'clear()' before deleting!");
320     assert(_ref_cnt == 0, "No uses must remain when deleting!");
321   }
322   AsmRemarkCollection* reuse() {
323     precond(_ref_cnt > 0);
324     return _ref_cnt++, this;
325   }
326 
327   const char* insert(uint offset, const char* remark);
328   const char* lookup(uint offset) const;
329   const char* next(uint offset) const;
330 
331   bool is_empty() const { return _remarks == nullptr; }
332   uint clear();
333 
334   template<typename Function>
335   bool iterate(Function function) const { // lambda enabled API
336     if (_remarks != nullptr) {
337       Cell* tmp = _remarks;
338       do {
339         if(!function(tmp->offset, tmp->string())) {
340           return false;
341         }
342         tmp = tmp->next;
343       } while (tmp != _remarks);
344     }
345     return true;
346   }
347 
348  private:
349   struct Cell : CHeapString {
350     Cell(const char* remark, uint offset) :
351         CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {}
352     void push_back(Cell* cell) {
353       Cell* head = this;
354       Cell* tail = prev;
355       tail->next = cell;
356       cell->next = head;
357       cell->prev = tail;
358       prev = cell;
359     }
360     uint offset;
361     Cell* prev;
362     Cell* next;
363   };
364   uint  _ref_cnt;
365   Cell* _remarks;
366   // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that
367   // does not change the state of the list per se), supportig a simplistic
368   // iteration scheme.
369   mutable Cell* _next;
370 };
371 
372 // ----- DbgStringCollection ---------------------------------------------------
373 
374 class DbgStringCollection : public CHeapObj<mtCode> {
375  public:
376   DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {}
377  ~DbgStringCollection() {
378     assert(is_empty(), "Must 'clear()' before deleting!");
379     assert(_ref_cnt == 0, "No uses must remain when deleting!");
380   }
381   DbgStringCollection* reuse() {
382     precond(_ref_cnt > 0);
383     return _ref_cnt++, this;
384   }
385 
386   const char* insert(const char* str);
387   const char* lookup(const char* str) const;
388 
389   bool is_empty() const { return _strings == nullptr; }
390   uint clear();
391 
392   template<typename Function>
393   bool iterate(Function function) const { // lambda enabled API
394     if (_strings != nullptr) {
395       Cell* tmp = _strings;
396       do {
397         if (!function(tmp->string())) {
398           return false;
399         }
400         tmp = tmp->next;
401       } while (tmp != _strings);
402     }
403     return true;
404   }
405 
406  private:
407   struct Cell : CHeapString {
408     Cell(const char* dbgstr) :
409         CHeapString(dbgstr), prev(nullptr), next(nullptr) {}
410     void push_back(Cell* cell) {
411       Cell* head = this;
412       Cell* tail = prev;
413       tail->next = cell;
414       cell->next = head;
415       cell->prev = tail;
416       prev = cell;
417     }
418     Cell* prev;
419     Cell* next;
420   };
421   uint  _ref_cnt;
422   Cell* _strings;
423 };
424 
425 // The assumption made here is that most code remarks (or comments) added to
426 // the generated assembly code are unique, i.e. there is very little gain in
427 // trying to share the strings between the different offsets tracked in a
428 // buffer (or blob).
429 
430 class AsmRemarks {
431  public:
432   AsmRemarks();
433  ~AsmRemarks();
434 
435   void init();
436 
437   const char* insert(uint offset, const char* remstr);
438 
439   bool is_empty() const;
440 
441   void share(const AsmRemarks &src);
442   void clear();
443   uint print(uint offset, outputStream* strm = tty) const;
444 
445   // For testing purposes only.
446   const AsmRemarkCollection* ref() const { return _remarks; }
447 
448   template<typename Function>
449   inline bool iterate(Function function) const { return _remarks->iterate(function); }
450 
451 private:
452   AsmRemarkCollection* _remarks;
453 };
454 
455 // The assumption made here is that the number of debug strings (with a fixed
456 // address requirement) is a rather small set per compilation unit.
457 
458 class DbgStrings {
459  public:
460   DbgStrings();
461  ~DbgStrings();
462 
463   void init();
464 
465   const char* insert(const char* dbgstr);
466 
467   bool is_empty() const;
468 
469   void share(const DbgStrings &src);
470   void clear();
471 
472   // For testing purposes only.
473   const DbgStringCollection* ref() const { return _strings; }
474 
475   template<typename Function>
476   bool iterate(Function function) const { return _strings->iterate(function); }
477 
478 private:
479   DbgStringCollection* _strings;
480 };
481 #endif // not PRODUCT
482 
483 
484 #ifdef ASSERT
485 #include "utilities/copy.hpp"
486 
487 class Scrubber {
488  public:
489   Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {}
490  ~Scrubber() {
491     Copy::fill_to_bytes(_addr, _size, badResourceValue);
492   }
493  private:
494   void*  _addr;
495   size_t _size;
496 };
497 #endif // ASSERT
498 
499 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests;
500 
501 // A CodeBuffer describes a memory space into which assembly
502 // code is generated.  This memory space usually occupies the
503 // interior of a single BufferBlob, but in some cases it may be
504 // an arbitrary span of memory, even outside the code cache.
505 //
506 // A code buffer comes in two variants:
507 //
508 // (1) A CodeBuffer referring to an already allocated piece of memory:
509 //     This is used to direct 'static' code generation (e.g. for interpreter
510 //     or stubroutine generation, etc.).  This code comes with NO relocation
511 //     information.
512 //
513 // (2) A CodeBuffer referring to a piece of memory allocated when the
514 //     CodeBuffer is allocated.  This is used for nmethod generation.
515 //
516 // The memory can be divided up into several parts called sections.
517 // Each section independently accumulates code (or data) an relocations.
518 // Sections can grow (at the expense of a reallocation of the BufferBlob
519 // and recopying of all active sections).  When the buffered code is finally
520 // written to an nmethod (or other CodeBlob), the contents (code, data,
521 // and relocations) of the sections are padded to an alignment and concatenated.
522 // Instructions and data in one section can contain relocatable references to
523 // addresses in a sibling section.
524 
525 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
526   friend class CodeSection;
527   friend class StubCodeGenerator;
528   friend class AOTCodeReader;
529 
530  private:
531   // CodeBuffers must be allocated on the stack except for a single
532   // special case during expansion which is handled internally.  This
533   // is done to guarantee proper cleanup of resources.
534   void* operator new(size_t size) throw() { return resource_allocate_bytes(size); }
535   void  operator delete(void* p)          { ShouldNotCallThis(); }
536 
537  public:
538   typedef int csize_t;  // code size type; would be size_t except for history
539   enum : int8_t {
540     // Here is the list of all possible sections.  The order reflects
541     // the final layout.
542     SECT_FIRST = 0,
543     SECT_CONSTS = SECT_FIRST, // Non-instruction data:  Floats, jump tables, etc.
544     SECT_INSTS,               // Executable instructions.
545     SECT_STUBS,               // Outbound trampolines for supporting call sites.
546     SECT_LIMIT, SECT_NONE = -1
547   };
548 
549   typedef LinkedListImpl<int> Offsets;
550   typedef ResizeableHashTable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests;
551 
552  private:
553   enum {
554     sect_bits = 2,      // assert (SECT_LIMIT <= (1<<sect_bits))
555     sect_mask = (1<<sect_bits)-1
556   };
557 
558   const char*  _name;
559 
560   CodeSection  _consts;             // constants, jump tables
561   CodeSection  _insts;              // instructions (the main section)
562   CodeSection  _stubs;              // stubs (call site support), deopt, exception handling
563 
564   CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
565 
566   BufferBlob*  _blob;           // optional buffer in CodeCache for generated code
567   address      _total_start;    // first address of combined memory buffer
568   csize_t      _total_size;     // size in bytes of combined memory buffer
569 
570   OopRecorder* _oop_recorder;
571 
572   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
573   Arena*       _overflow_arena;
574 
575   address      _last_insn;      // used to merge consecutive memory barriers, loads or stores.
576   address      _last_label;     // record last bind label address, it's also the start of current bb.
577 
578   SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs
579   SharedTrampolineRequests*   _shared_trampoline_requests;     // used to collect requests for shared trampolines
580   bool         _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final.
581 
582   int          _const_section_alignment;
583 
584 #ifndef PRODUCT
585   AsmRemarks   _asm_remarks;
586   DbgStrings   _dbg_strings;
587   bool         _collect_comments; // Indicate if we need to collect block comments at all.
588   address      _decode_begin;     // start address for decode
589   address      decode_begin();
590 #endif
591 
592   void initialize_misc(const char * name) {
593     // all pointers other than code_start/end and those inside the sections
594     assert(name != nullptr, "must have a name");
595     _name            = name;
596     _before_expand   = nullptr;
597     _blob            = nullptr;
598     _total_start     = nullptr;
599     _total_size      = 0;
600     _oop_recorder    = nullptr;
601     _overflow_arena  = nullptr;
602     _last_insn       = nullptr;
603     _last_label      = nullptr;
604     _finalize_stubs  = false;
605     _shared_stub_to_interp_requests = nullptr;
606     _shared_trampoline_requests = nullptr;
607 
608     _consts.initialize_outer(this, SECT_CONSTS);
609     _insts.initialize_outer(this,  SECT_INSTS);
610     _stubs.initialize_outer(this,  SECT_STUBS);
611 
612     // Default is to align on 8 bytes. A compiler can change this
613     // if larger alignment (e.g., 32-byte vector masks) is required.
614     _const_section_alignment = (int) sizeof(jdouble);
615 
616 #ifndef PRODUCT
617     _decode_begin    = nullptr;
618     // Collect block comments, but restrict collection to cases where a disassembly is output.
619     _collect_comments = ( PrintAssembly
620                        || PrintStubCode
621                        || PrintMethodHandleStubs
622                        || PrintInterpreter
623                        || PrintSignatureHandlers
624                        || UnlockDiagnosticVMOptions
625                         );
626 #endif
627   }
628 
629   void initialize(address code_start, csize_t code_size) {
630     _total_start = code_start;
631     _total_size  = code_size;
632     // Initialize the main section:
633     _insts.initialize(code_start, code_size);
634     assert(!_stubs.is_allocated(),  "no garbage here");
635     assert(!_consts.is_allocated(), "no garbage here");
636     _oop_recorder = &_default_oop_recorder;
637   }
638 
639   void initialize_section_size(CodeSection* cs, csize_t size);
640 
641   // helper for CodeBuffer::expand()
642   void take_over_code_from(CodeBuffer* cs);
643 
644   // ensure sections are disjoint, ordered, and contained in the blob
645   void verify_section_allocation();
646 
647   // copies combined relocations to the blob, returns bytes copied
648   // (if target is null, it is a dry run only, just for sizing)
649   csize_t copy_relocations_to(CodeBlob* blob) const;
650   csize_t copy_relocations_to(address buf, csize_t buf_limit) const;
651 
652   // copies combined code to the blob (assumes relocs are already in there)
653   void copy_code_to(CodeBlob* blob);
654 
655   // moves code sections to new buffer (assumes relocs are already in there)
656   void relocate_code_to(CodeBuffer* cb) const;
657 
658   // adjust some internal address during expand
659   void adjust_internal_address(address from, address to);
660 
661   // set up a model of the final layout of my contents
662   void compute_final_layout(CodeBuffer* dest) const;
663 
664   // Expand the given section so at least 'amount' is remaining.
665   // Creates a new, larger BufferBlob, and rewrites the code & relocs.
666   void expand(CodeSection* which_cs, csize_t amount);
667 
668   // Helper for expand.
669   csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
670 
671  public:
672   // (1) code buffer referring to pre-allocated instruction memory
673   CodeBuffer(address code_start, csize_t code_size)
674     DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
675   {
676     assert(code_start != nullptr, "sanity");
677     initialize_misc("static buffer");
678     initialize(code_start, code_size);
679     DEBUG_ONLY(verify_section_allocation();)
680   }
681 
682   // (2) CodeBuffer referring to pre-allocated CodeBlob.
683   CodeBuffer(const CodeBlob* blob);
684 
685   // (3) code buffer allocating codeBlob memory for code & relocation
686   // info but with lazy initialization.  The name must be something
687   // informative.
688   CodeBuffer(const char* name)
689     DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
690   {
691     initialize_misc(name);
692   }
693 
694   // (4) code buffer allocating codeBlob memory for code & relocation
695   // info.  The name must be something informative and code_size must
696   // include both code and stubs sizes.
697   CodeBuffer(const char* name, csize_t code_size, csize_t locs_size)
698     DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
699   {
700     initialize_misc(name);
701     initialize(code_size, locs_size);
702   }
703 
704   ~CodeBuffer();
705 
706   // Initialize a CodeBuffer constructed using constructor 3.  Using
707   // constructor 4 is equivalent to calling constructor 3 and then
708   // calling this method.  It's been factored out for convenience of
709   // construction.
710   void initialize(csize_t code_size, csize_t locs_size);
711 
712   CodeSection* consts() { return &_consts; }
713   CodeSection* insts() { return &_insts; }
714   CodeSection* stubs() { return &_stubs; }
715 
716   const CodeSection* insts() const { return &_insts; }
717 
718   // present sections in order; return null at end; consts is #0, etc.
719   CodeSection* code_section(int n) {
720     // This makes the slightly questionable but portable assumption
721     // that the various members (_consts, _insts, _stubs, etc.) are
722     // adjacent in the layout of CodeBuffer.
723     CodeSection* cs = &_consts + n;
724     assert(cs->index() == n || !cs->is_allocated(), "sanity");
725     return cs;
726   }
727   const CodeSection* code_section(int n) const {  // yucky const stuff
728     return ((CodeBuffer*)this)->code_section(n);
729   }
730   static const char* code_section_name(int n);
731   int section_index_of(address addr) const;
732   bool contains(address addr) const {
733     // handy for debugging
734     return section_index_of(addr) > SECT_NONE;
735   }
736 
737   // A stable mapping between 'locators' (small ints) and addresses.
738   static int locator_pos(int locator)   { return locator >> sect_bits; }
739   static int locator_sect(int locator)  { return locator &  sect_mask; }
740   static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
741   int        locator(address addr) const;
742   address    locator_address(int locator) const {
743     if (locator < 0)  return nullptr;
744     address start = code_section(locator_sect(locator))->start();
745     return start + locator_pos(locator);
746   }
747 
748   // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
749   bool is_backward_branch(Label& L);
750 
751   // Properties
752   const char* name() const                  { return _name; }
753   CodeBuffer* before_expand() const         { return _before_expand; }
754   BufferBlob* blob() const                  { return _blob; }
755   void    set_blob(BufferBlob* blob);
756   void   free_blob();                       // Free the blob, if we own one.
757 
758   // Properties relative to the insts section:
759   address       insts_begin() const      { return _insts.start();      }
760   address       insts_end() const        { return _insts.end();        }
761   void      set_insts_end(address end)   {        _insts.set_end(end); }
762   address       insts_mark() const       { return _insts.mark();       }
763   void      set_insts_mark()             {        _insts.set_mark();   }
764 
765   // is there anything in the buffer other than the current section?
766   bool    is_pure() const                { return insts_size() == total_content_size(); }
767 
768   // size in bytes of output so far in the insts sections
769   csize_t insts_size() const             { return _insts.size(); }
770 
771   // same as insts_size(), except that it asserts there is no non-code here
772   csize_t pure_insts_size() const        { assert(is_pure(), "no non-code");
773                                            return insts_size(); }
774   // capacity in bytes of the insts sections
775   csize_t insts_capacity() const         { return _insts.capacity(); }
776 
777   // number of bytes remaining in the insts section
778   csize_t insts_remaining() const        { return _insts.remaining(); }
779 
780   // is a given address in the insts section?  (2nd version is end-inclusive)
781   bool insts_contains(address pc) const  { return _insts.contains(pc); }
782   bool insts_contains2(address pc) const { return _insts.contains2(pc); }
783 
784   // Record any extra oops required to keep embedded metadata alive
785   void finalize_oop_references(const methodHandle& method);
786 
787   // Allocated size in all sections, when aligned and concatenated
788   // (this is the eventual state of the content in its final
789   // CodeBlob).
790   csize_t total_content_size() const;
791 
792   // Combined offset (relative to start of first section) of given
793   // section, as eventually found in the final CodeBlob.
794   csize_t total_offset_of(const CodeSection* cs) const;
795 
796   // allocated size of all relocation data, including index, rounded up
797   csize_t total_relocation_size() const;
798 
799   int total_skipped_instructions_size() const;
800 
801   // allocated size of any and all recorded oops
802   csize_t total_oop_size() const {
803     OopRecorder* recorder = oop_recorder();
804     return (recorder == nullptr)? 0: recorder->oop_size();
805   }
806 
807   // allocated size of any and all recorded metadata
808   csize_t total_metadata_size() const {
809     OopRecorder* recorder = oop_recorder();
810     return (recorder == nullptr)? 0: recorder->metadata_size();
811   }
812 
813   // Configuration functions, called immediately after the CB is constructed.
814   // The section sizes are subtracted from the original insts section.
815   // Note:  Call them in reverse section order, because each steals from insts.
816   void initialize_consts_size(csize_t size)            { initialize_section_size(&_consts,  size); }
817   void initialize_stubs_size(csize_t size)             { initialize_section_size(&_stubs,   size); }
818   // Override default oop recorder.
819   void initialize_oop_recorder(OopRecorder* r);
820 
821   OopRecorder* oop_recorder() const { return _oop_recorder; }
822 
823   address last_insn() const { return _last_insn; }
824   void set_last_insn(address a) { _last_insn = a; }
825   void clear_last_insn() { set_last_insn(nullptr); }
826 
827   address last_label() const { return _last_label; }
828   void set_last_label(address a) { _last_label = a; }
829 
830 #ifndef PRODUCT
831   AsmRemarks &asm_remarks() { return _asm_remarks; }
832   DbgStrings &dbg_strings() { return _dbg_strings; }
833 
834   void clear_strings() {
835     _asm_remarks.clear();
836     _dbg_strings.clear();
837   }
838 #endif
839 
840   // Code generation
841   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
842     _insts.relocate(at, rspec, format);
843   }
844   void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
845     _insts.relocate(at, rtype, format);
846   }
847 
848   // Management of overflow storage for binding of Labels.
849   GrowableArray<int>* create_patch_overflow();
850 
851   // NMethod generation
852   void copy_code_and_locs_to(CodeBlob* blob) {
853     assert(blob != nullptr, "sane");
854     copy_relocations_to(blob);
855     copy_code_to(blob);
856   }
857   void copy_values_to(nmethod* nm) {
858     if (!oop_recorder()->is_unused()) {
859       oop_recorder()->copy_values_to(nm);
860     }
861   }
862 
863   void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN;
864   const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;);
865 
866   // Log a little info about section usage in the CodeBuffer
867   void log_section_sizes(const char* name);
868 
869   // Make a set of stubs final. It can create/optimize stubs.
870   bool finalize_stubs();
871 
872   // Request for a shared stub to the interpreter
873   void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset);
874 
875   void set_const_section_alignment(int align) {
876     _const_section_alignment = align_up(align, HeapWordSize);
877   }
878 
879 #ifndef PRODUCT
880  public:
881   // Printing / Decoding
882   // decodes from decode_begin() to code_end() and sets decode_begin to end
883   void    decode();
884   void    print_on(outputStream* st);
885 #endif
886   // Directly disassemble code buffer.
887   void    decode(address start, address end);
888 
889   // The following header contains architecture-specific implementations
890 #include CPU_HEADER(codeBuffer)
891 
892 };
893 
894 // A Java method can have calls of Java methods which can be statically bound.
895 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method
896 // can share a stub to the interpreter.
897 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter.
898 class SharedStubToInterpRequest : public ResourceObj {
899  private:
900   ciMethod* _shared_method;
901   CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer
902 
903  public:
904   SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
905       _call_offset(call_offset) {}
906 
907   ciMethod* shared_method() const { return _shared_method; }
908   CodeBuffer::csize_t call_offset() const { return _call_offset; }
909 };
910 
911 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
912   if (remaining() < amount) { _outer->expand(this, amount); return true; }
913   return false;
914 }
915 
916 #endif // SHARE_ASM_CODEBUFFER_HPP