1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_ASM_CODEBUFFER_HPP
 26 #define SHARE_ASM_CODEBUFFER_HPP
 27 
 28 #include "code/oopRecorder.hpp"
 29 #include "code/relocInfo.hpp"
 30 #include "compiler/compiler_globals.hpp"
 31 #include "utilities/align.hpp"
 32 #include "utilities/debug.hpp"
 33 #include "utilities/growableArray.hpp"
 34 #include "utilities/linkedlist.hpp"
 35 #include "utilities/resizeableResourceHash.hpp"
 36 #include "utilities/macros.hpp"
 37 
 38 template <typename T>
 39 static inline void put_native(address p, T x) {
 40     memcpy((void*)p, &x, sizeof x);
 41 }
 42 
 43 class PhaseCFG;
 44 class Compile;
 45 class BufferBlob;
 46 class CodeBuffer;
 47 class Label;
 48 class ciMethod;
 49 class SharedStubToInterpRequest;
 50 
 51 class CodeOffsets: public StackObj {
 52 public:
 53   enum Entries { Entry,
 54                  Verified_Entry,
 55                  Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
 56                  OSR_Entry,
 57                  Exceptions,     // Offset where exception handler lives
 58                  Deopt,          // Offset where deopt handler lives
 59                  DeoptMH,        // Offset where MethodHandle deopt handler lives
 60                  UnwindHandler,  // Offset to default unwind handler
 61                  max_Entries };
 62 
 63   // special value to note codeBlobs where profile (forte) stack walking is
 64   // always dangerous and suspect.
 65 
 66   enum { frame_never_safe = -1 };
 67 
 68 private:
 69   int _values[max_Entries];
 70 
 71 public:
 72   CodeOffsets() {
 73     _values[Entry         ] = 0;
 74     _values[Verified_Entry] = 0;
 75     _values[Frame_Complete] = frame_never_safe;
 76     _values[OSR_Entry     ] = 0;
 77     _values[Exceptions    ] = -1;
 78     _values[Deopt         ] = -1;
 79     _values[DeoptMH       ] = -1;
 80     _values[UnwindHandler ] = -1;
 81   }
 82 
 83   int value(Entries e) { return _values[e]; }
 84   void set_value(Entries e, int val) { _values[e] = val; }
 85 };
 86 
 87 // This class represents a stream of code and associated relocations.
 88 // There are a few in each CodeBuffer.
 89 // They are filled concurrently, and concatenated at the end.
 90 class CodeSection {
 91   friend class CodeBuffer;
 92  public:
 93   typedef int csize_t;  // code size type; would be size_t except for history
 94 
 95  private:
 96   address     _start;           // first byte of contents (instructions)
 97   address     _mark;            // user mark, usually an instruction beginning
 98   address     _end;             // current end address
 99   address     _limit;           // last possible (allocated) end address
100   relocInfo*  _locs_start;      // first byte of relocation information
101   relocInfo*  _locs_end;        // first byte after relocation information
102   relocInfo*  _locs_limit;      // first byte after relocation information buf
103   address     _locs_point;      // last relocated position (grows upward)
104   bool        _locs_own;        // did I allocate the locs myself?
105   bool        _scratch_emit;    // Buffer is used for scratch emit, don't relocate.
106   int         _skipped_instructions_size;
107   int8_t      _index;           // my section number (SECT_INST, etc.)
108   CodeBuffer* _outer;           // enclosing CodeBuffer
109 
110   // (Note:  _locs_point used to be called _last_reloc_offset.)
111 
112   CodeSection() {
113     _start         = nullptr;
114     _mark          = nullptr;
115     _end           = nullptr;
116     _limit         = nullptr;
117     _locs_start    = nullptr;
118     _locs_end      = nullptr;
119     _locs_limit    = nullptr;
120     _locs_point    = nullptr;
121     _locs_own      = false;
122     _scratch_emit  = false;
123     _skipped_instructions_size = 0;
124     debug_only(_index = -1);
125     debug_only(_outer = (CodeBuffer*)badAddress);
126   }
127 
128   void initialize_outer(CodeBuffer* outer, int8_t index) {
129     _outer = outer;
130     _index = index;
131   }
132 
133   void initialize(address start, csize_t size = 0) {
134     assert(_start == nullptr, "only one init step, please");
135     _start         = start;
136     _mark          = nullptr;
137     _end           = start;
138 
139     _limit         = start + size;
140     _locs_point    = start;
141   }
142 
143   void initialize_locs(int locs_capacity);
144   void expand_locs(int new_capacity);
145   void initialize_locs_from(const CodeSection* source_cs);
146 
147   // helper for CodeBuffer::expand()
148   void take_over_code_from(CodeSection* cs) {
149     _start      = cs->_start;
150     _mark       = cs->_mark;
151     _end        = cs->_end;
152     _limit      = cs->_limit;
153     _locs_point = cs->_locs_point;
154     _skipped_instructions_size = cs->_skipped_instructions_size;
155   }
156 
157  public:
158   address     start() const         { return _start; }
159   address     mark() const          { return _mark; }
160   address     end() const           { return _end; }
161   address     limit() const         { return _limit; }
162   csize_t     size() const          { return (csize_t)(_end - _start); }
163   csize_t     mark_off() const      { assert(_mark != nullptr, "not an offset");
164                                       return (csize_t)(_mark - _start); }
165   csize_t     capacity() const      { return (csize_t)(_limit - _start); }
166   csize_t     remaining() const     { return (csize_t)(_limit - _end); }
167 
168   relocInfo*  locs_start() const    { return _locs_start; }
169   relocInfo*  locs_end() const      { return _locs_end; }
170   int         locs_count() const    { return (int)(_locs_end - _locs_start); }
171   relocInfo*  locs_limit() const    { return _locs_limit; }
172   address     locs_point() const    { return _locs_point; }
173   csize_t     locs_point_off() const{ return (csize_t)(_locs_point - _start); }
174   csize_t     locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
175 
176   int8_t      index() const         { return _index; }
177   bool        is_allocated() const  { return _start != nullptr; }
178   bool        is_empty() const      { return _start == _end; }
179   bool        has_locs() const      { return _locs_end != nullptr; }
180 
181   // Mark scratch buffer.
182   void        set_scratch_emit()    { _scratch_emit = true; }
183   void        clear_scratch_emit()  { _scratch_emit = false; }
184   bool        scratch_emit()        { return _scratch_emit; }
185 
186   CodeBuffer* outer() const         { return _outer; }
187 
188   // is a given address in this section?  (2nd version is end-inclusive)
189   bool contains(address pc) const   { return pc >= _start && pc <  _end; }
190   bool contains2(address pc) const  { return pc >= _start && pc <= _end; }
191   bool allocates(address pc) const  { return pc >= _start && pc <  _limit; }
192   bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
193 
194   // checks if two CodeSections are disjoint
195   //
196   // limit is an exclusive address and can be the start of another
197   // section.
198   bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; }
199 
200   void    set_end(address pc)       { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
201   void    set_mark(address pc)      { assert(contains2(pc), "not in codeBuffer");
202                                       _mark = pc; }
203   void    set_mark()                { _mark = _end; }
204   void    clear_mark()              { _mark = nullptr; }
205 
206   void    set_locs_end(relocInfo* p) {
207     assert(p <= locs_limit(), "locs data fits in allocated buffer");
208     _locs_end = p;
209   }
210   void    set_locs_point(address pc) {
211     assert(pc >= locs_point(), "relocation addr may not decrease");
212     assert(allocates2(pc),     "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit));
213     _locs_point = pc;
214   }
215 
216   void register_skipped(int size) {
217     _skipped_instructions_size += size;
218   }
219 
220   // Code emission
221   void emit_int8(uint8_t x1) {
222     address curr = end();
223     *((uint8_t*)  curr++) = x1;
224     set_end(curr);
225   }
226 
227   template <typename T>
228   void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); }
229 
230   void emit_int16(uint16_t x) { emit_native(x); }
231   void emit_int16(uint8_t x1, uint8_t x2) {
232     address curr = end();
233     *((uint8_t*)  curr++) = x1;
234     *((uint8_t*)  curr++) = x2;
235     set_end(curr);
236   }
237 
238   void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3)  {
239     address curr = end();
240     *((uint8_t*)  curr++) = x1;
241     *((uint8_t*)  curr++) = x2;
242     *((uint8_t*)  curr++) = x3;
243     set_end(curr);
244   }
245 
246   void emit_int32(uint32_t x) { emit_native(x); }
247   void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4)  {
248     address curr = end();
249     *((uint8_t*)  curr++) = x1;
250     *((uint8_t*)  curr++) = x2;
251     *((uint8_t*)  curr++) = x3;
252     *((uint8_t*)  curr++) = x4;
253     set_end(curr);
254   }
255 
256   void emit_int64(uint64_t x)  { emit_native(x); }
257   void emit_float(jfloat  x)   { emit_native(x); }
258   void emit_double(jdouble x)  { emit_native(x); }
259   void emit_address(address x) { emit_native(x); }
260 
261   // Share a scratch buffer for relocinfo.  (Hacky; saves a resource allocation.)
262   void initialize_shared_locs(relocInfo* buf, int length);
263 
264   // Manage labels and their addresses.
265   address target(Label& L, address branch_pc);
266 
267   // Emit a relocation.
268   void relocate(address at, RelocationHolder const& rspec, int format = 0);
269   void relocate(address at,    relocInfo::relocType rtype, int format = 0, jint method_index = 0);
270 
271   int alignment() const;
272 
273   // Slop between sections, used only when allocating temporary BufferBlob buffers.
274   static csize_t end_slop()         { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
275 
276   csize_t align_at_start(csize_t off) const {
277     return (csize_t) align_up(off, alignment());
278   }
279 
280   // Ensure there's enough space left in the current section.
281   // Return true if there was an expansion.
282   bool maybe_expand_to_ensure_remaining(csize_t amount);
283 
284 #ifndef PRODUCT
285   void decode();
286   void print(const char* name);
287 #endif //PRODUCT
288 };
289 
290 
291 #ifndef PRODUCT
292 
293 class AsmRemarkCollection;
294 class DbgStringCollection;
295 
296 // The assumption made here is that most code remarks (or comments) added to
297 // the generated assembly code are unique, i.e. there is very little gain in
298 // trying to share the strings between the different offsets tracked in a
299 // buffer (or blob).
300 
301 class AsmRemarks {
302  public:
303   AsmRemarks();
304  ~AsmRemarks();
305 
306   const char* insert(uint offset, const char* remstr);
307 
308   bool is_empty() const;
309 
310   void share(const AsmRemarks &src);
311   void clear();
312   uint print(uint offset, outputStream* strm = tty) const;
313 
314   // For testing purposes only.
315   const AsmRemarkCollection* ref() const { return _remarks; }
316 
317 private:
318   AsmRemarkCollection* _remarks;
319 };
320 
321 // The assumption made here is that the number of debug strings (with a fixed
322 // address requirement) is a rather small set per compilation unit.
323 
324 class DbgStrings {
325  public:
326   DbgStrings();
327  ~DbgStrings();
328 
329   const char* insert(const char* dbgstr);
330 
331   bool is_empty() const;
332 
333   void share(const DbgStrings &src);
334   void clear();
335 
336   // For testing purposes only.
337   const DbgStringCollection* ref() const { return _strings; }
338 
339 private:
340   DbgStringCollection* _strings;
341 };
342 #endif // not PRODUCT
343 
344 
345 #ifdef ASSERT
346 #include "utilities/copy.hpp"
347 
348 class Scrubber {
349  public:
350   Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {}
351  ~Scrubber() {
352     Copy::fill_to_bytes(_addr, _size, badResourceValue);
353   }
354  private:
355   void*  _addr;
356   size_t _size;
357 };
358 #endif // ASSERT
359 
360 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests;
361 
362 // A CodeBuffer describes a memory space into which assembly
363 // code is generated.  This memory space usually occupies the
364 // interior of a single BufferBlob, but in some cases it may be
365 // an arbitrary span of memory, even outside the code cache.
366 //
367 // A code buffer comes in two variants:
368 //
369 // (1) A CodeBuffer referring to an already allocated piece of memory:
370 //     This is used to direct 'static' code generation (e.g. for interpreter
371 //     or stubroutine generation, etc.).  This code comes with NO relocation
372 //     information.
373 //
374 // (2) A CodeBuffer referring to a piece of memory allocated when the
375 //     CodeBuffer is allocated.  This is used for nmethod generation.
376 //
377 // The memory can be divided up into several parts called sections.
378 // Each section independently accumulates code (or data) an relocations.
379 // Sections can grow (at the expense of a reallocation of the BufferBlob
380 // and recopying of all active sections).  When the buffered code is finally
381 // written to an nmethod (or other CodeBlob), the contents (code, data,
382 // and relocations) of the sections are padded to an alignment and concatenated.
383 // Instructions and data in one section can contain relocatable references to
384 // addresses in a sibling section.
385 
386 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
387   friend class CodeSection;
388   friend class StubCodeGenerator;
389 
390  private:
391   // CodeBuffers must be allocated on the stack except for a single
392   // special case during expansion which is handled internally.  This
393   // is done to guarantee proper cleanup of resources.
394   void* operator new(size_t size) throw() { return resource_allocate_bytes(size); }
395   void  operator delete(void* p)          { ShouldNotCallThis(); }
396 
397  public:
398   typedef int csize_t;  // code size type; would be size_t except for history
399   enum : int8_t {
400     // Here is the list of all possible sections.  The order reflects
401     // the final layout.
402     SECT_FIRST = 0,
403     SECT_CONSTS = SECT_FIRST, // Non-instruction data:  Floats, jump tables, etc.
404     SECT_INSTS,               // Executable instructions.
405     SECT_STUBS,               // Outbound trampolines for supporting call sites.
406     SECT_LIMIT, SECT_NONE = -1
407   };
408 
409   typedef LinkedListImpl<int> Offsets;
410   typedef ResizeableResourceHashtable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests;
411 
412  private:
413   enum {
414     sect_bits = 2,      // assert (SECT_LIMIT <= (1<<sect_bits))
415     sect_mask = (1<<sect_bits)-1
416   };
417 
418   const char*  _name;
419 
420   CodeSection  _consts;             // constants, jump tables
421   CodeSection  _insts;              // instructions (the main section)
422   CodeSection  _stubs;              // stubs (call site support), deopt, exception handling
423 
424   CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
425 
426   BufferBlob*  _blob;           // optional buffer in CodeCache for generated code
427   address      _total_start;    // first address of combined memory buffer
428   csize_t      _total_size;     // size in bytes of combined memory buffer
429 
430   // Size of code without stubs generated at the end of instructions section
431   csize_t      _main_code_size;
432 
433   OopRecorder* _oop_recorder;
434 
435   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
436   Arena*       _overflow_arena;
437 
438   address      _last_insn;      // used to merge consecutive memory barriers, loads or stores.
439 
440   SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs
441   SharedTrampolineRequests*   _shared_trampoline_requests;     // used to collect requests for shared trampolines
442   bool         _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final.
443 
444   int          _const_section_alignment;
445 
446 #ifndef PRODUCT
447   AsmRemarks   _asm_remarks;
448   DbgStrings   _dbg_strings;
449   bool         _collect_comments; // Indicate if we need to collect block comments at all.
450   address      _decode_begin;     // start address for decode
451   address      decode_begin();
452 #endif
453 
454   void initialize_misc(const char * name) {
455     // all pointers other than code_start/end and those inside the sections
456     assert(name != nullptr, "must have a name");
457     _name            = name;
458     _before_expand   = nullptr;
459     _blob            = nullptr;
460     _oop_recorder    = nullptr;
461     _overflow_arena  = nullptr;
462     _last_insn       = nullptr;
463     _main_code_size  = 0;
464     _finalize_stubs  = false;
465     _shared_stub_to_interp_requests = nullptr;
466     _shared_trampoline_requests = nullptr;
467 
468     _consts.initialize_outer(this, SECT_CONSTS);
469     _insts.initialize_outer(this,  SECT_INSTS);
470     _stubs.initialize_outer(this,  SECT_STUBS);
471 
472     // Default is to align on 8 bytes. A compiler can change this
473     // if larger alignment (e.g., 32-byte vector masks) is required.
474     _const_section_alignment = (int) sizeof(jdouble);
475 
476 #ifndef PRODUCT
477     _decode_begin    = nullptr;
478     // Collect block comments, but restrict collection to cases where a disassembly is output.
479     _collect_comments = ( PrintAssembly
480                        || PrintStubCode
481                        || PrintMethodHandleStubs
482                        || PrintInterpreter
483                        || PrintSignatureHandlers
484                        || UnlockDiagnosticVMOptions
485                         );
486 #endif
487   }
488 
489   void initialize(address code_start, csize_t code_size) {
490     _total_start = code_start;
491     _total_size  = code_size;
492     // Initialize the main section:
493     _insts.initialize(code_start, code_size);
494     assert(!_stubs.is_allocated(),  "no garbage here");
495     assert(!_consts.is_allocated(), "no garbage here");
496     _oop_recorder = &_default_oop_recorder;
497   }
498 
499   void initialize_section_size(CodeSection* cs, csize_t size);
500 
501   // helper for CodeBuffer::expand()
502   void take_over_code_from(CodeBuffer* cs);
503 
504   // ensure sections are disjoint, ordered, and contained in the blob
505   void verify_section_allocation();
506 
507   // copies combined relocations to the blob, returns bytes copied
508   // (if target is null, it is a dry run only, just for sizing)
509   csize_t copy_relocations_to(CodeBlob* blob) const;
510 
511   // copies combined code to the blob (assumes relocs are already in there)
512   void copy_code_to(CodeBlob* blob);
513 
514   // moves code sections to new buffer (assumes relocs are already in there)
515   void relocate_code_to(CodeBuffer* cb) const;
516 
517   // set up a model of the final layout of my contents
518   void compute_final_layout(CodeBuffer* dest) const;
519 
520   // Expand the given section so at least 'amount' is remaining.
521   // Creates a new, larger BufferBlob, and rewrites the code & relocs.
522   void expand(CodeSection* which_cs, csize_t amount);
523 
524   // Helper for expand.
525   csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
526 
527  public:
528   // (1) code buffer referring to pre-allocated instruction memory
529   CodeBuffer(address code_start, csize_t code_size)
530     DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
531   {
532     assert(code_start != nullptr, "sanity");
533     initialize_misc("static buffer");
534     initialize(code_start, code_size);
535     debug_only(verify_section_allocation();)
536   }
537 
538   // (2) CodeBuffer referring to pre-allocated CodeBlob.
539   CodeBuffer(CodeBlob* blob);
540 
541   // (3) code buffer allocating codeBlob memory for code & relocation
542   // info but with lazy initialization.  The name must be something
543   // informative.
544   CodeBuffer(const char* name)
545     DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
546   {
547     initialize_misc(name);
548   }
549 
550   // (4) code buffer allocating codeBlob memory for code & relocation
551   // info.  The name must be something informative and code_size must
552   // include both code and stubs sizes.
553   CodeBuffer(const char* name, csize_t code_size, csize_t locs_size)
554     DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
555   {
556     initialize_misc(name);
557     initialize(code_size, locs_size);
558   }
559 
560   ~CodeBuffer();
561 
562   // Initialize a CodeBuffer constructed using constructor 3.  Using
563   // constructor 4 is equivalent to calling constructor 3 and then
564   // calling this method.  It's been factored out for convenience of
565   // construction.
566   void initialize(csize_t code_size, csize_t locs_size);
567 
568   CodeSection* consts() { return &_consts; }
569   CodeSection* insts() { return &_insts; }
570   CodeSection* stubs() { return &_stubs; }
571 
572   const CodeSection* insts() const { return &_insts; }
573 
574   // present sections in order; return null at end; consts is #0, etc.
575   CodeSection* code_section(int n) {
576     // This makes the slightly questionable but portable assumption
577     // that the various members (_consts, _insts, _stubs, etc.) are
578     // adjacent in the layout of CodeBuffer.
579     CodeSection* cs = &_consts + n;
580     assert(cs->index() == n || !cs->is_allocated(), "sanity");
581     return cs;
582   }
583   const CodeSection* code_section(int n) const {  // yucky const stuff
584     return ((CodeBuffer*)this)->code_section(n);
585   }
586   static const char* code_section_name(int n);
587   int section_index_of(address addr) const;
588   bool contains(address addr) const {
589     // handy for debugging
590     return section_index_of(addr) > SECT_NONE;
591   }
592 
593   // A stable mapping between 'locators' (small ints) and addresses.
594   static int locator_pos(int locator)   { return locator >> sect_bits; }
595   static int locator_sect(int locator)  { return locator &  sect_mask; }
596   static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
597   int        locator(address addr) const;
598   address    locator_address(int locator) const {
599     if (locator < 0)  return nullptr;
600     address start = code_section(locator_sect(locator))->start();
601     return start + locator_pos(locator);
602   }
603 
604   // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
605   bool is_backward_branch(Label& L);
606 
607   // Properties
608   const char* name() const                  { return _name; }
609   void set_name(const char* name)           { _name = name; }
610   CodeBuffer* before_expand() const         { return _before_expand; }
611   BufferBlob* blob() const                  { return _blob; }
612   void    set_blob(BufferBlob* blob);
613   void   free_blob();                       // Free the blob, if we own one.
614 
615   // Properties relative to the insts section:
616   address       insts_begin() const      { return _insts.start();      }
617   address       insts_end() const        { return _insts.end();        }
618   void      set_insts_end(address end)   {        _insts.set_end(end); }
619   address       insts_mark() const       { return _insts.mark();       }
620   void      set_insts_mark()             {        _insts.set_mark();   }
621 
622   // is there anything in the buffer other than the current section?
623   bool    is_pure() const                { return insts_size() == total_content_size(); }
624 
625   // size in bytes of output so far in the insts sections
626   csize_t insts_size() const             { return _insts.size(); }
627 
628   // same as insts_size(), except that it asserts there is no non-code here
629   csize_t pure_insts_size() const        { assert(is_pure(), "no non-code");
630                                            return insts_size(); }
631   // capacity in bytes of the insts sections
632   csize_t insts_capacity() const         { return _insts.capacity(); }
633 
634   // number of bytes remaining in the insts section
635   csize_t insts_remaining() const        { return _insts.remaining(); }
636 
637   // size of code without stubs in instruction section
638   csize_t main_code_size() const         { return _main_code_size; }
639 
640   // is a given address in the insts section?  (2nd version is end-inclusive)
641   bool insts_contains(address pc) const  { return _insts.contains(pc); }
642   bool insts_contains2(address pc) const { return _insts.contains2(pc); }
643 
644   // Record any extra oops required to keep embedded metadata alive
645   void finalize_oop_references(const methodHandle& method);
646 
647   // Allocated size in all sections, when aligned and concatenated
648   // (this is the eventual state of the content in its final
649   // CodeBlob).
650   csize_t total_content_size() const;
651 
652   // Combined offset (relative to start of first section) of given
653   // section, as eventually found in the final CodeBlob.
654   csize_t total_offset_of(const CodeSection* cs) const;
655 
656   // allocated size of all relocation data, including index, rounded up
657   csize_t total_relocation_size() const;
658 
659   int total_skipped_instructions_size() const;
660 
661   csize_t copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const;
662 
663   // allocated size of any and all recorded oops
664   csize_t total_oop_size() const {
665     OopRecorder* recorder = oop_recorder();
666     return (recorder == nullptr)? 0: recorder->oop_size();
667   }
668 
669   // allocated size of any and all recorded metadata
670   csize_t total_metadata_size() const {
671     OopRecorder* recorder = oop_recorder();
672     return (recorder == nullptr)? 0: recorder->metadata_size();
673   }
674 
675   // Configuration functions, called immediately after the CB is constructed.
676   // The section sizes are subtracted from the original insts section.
677   // Note:  Call them in reverse section order, because each steals from insts.
678   void initialize_consts_size(csize_t size)            { initialize_section_size(&_consts,  size); }
679   void initialize_stubs_size(csize_t size)             { initialize_section_size(&_stubs,   size); }
680   // Override default oop recorder.
681   void initialize_oop_recorder(OopRecorder* r);
682 
683   OopRecorder* oop_recorder() const { return _oop_recorder; }
684 
685   address last_insn() const { return _last_insn; }
686   void set_last_insn(address a) { _last_insn = a; }
687   void clear_last_insn() { set_last_insn(nullptr); }
688 
689 #ifndef PRODUCT
690   AsmRemarks &asm_remarks() { return _asm_remarks; }
691   DbgStrings &dbg_strings() { return _dbg_strings; }
692 
693   void clear_strings() {
694     _asm_remarks.clear();
695     _dbg_strings.clear();
696   }
697 #endif
698 
699   // Code generation
700   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
701     _insts.relocate(at, rspec, format);
702   }
703   void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
704     _insts.relocate(at, rtype, format);
705   }
706 
707   // Management of overflow storage for binding of Labels.
708   GrowableArray<int>* create_patch_overflow();
709 
710   // NMethod generation
711   void copy_code_and_locs_to(CodeBlob* blob) {
712     assert(blob != nullptr, "sane");
713     copy_relocations_to(blob);
714     copy_code_to(blob);
715   }
716   void copy_values_to(nmethod* nm) {
717     if (!oop_recorder()->is_unused()) {
718       oop_recorder()->copy_values_to(nm);
719     }
720   }
721 
722   void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN;
723   const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;);
724 
725   // Log a little info about section usage in the CodeBuffer
726   void log_section_sizes(const char* name);
727 
728   // Make a set of stubs final. It can create/optimize stubs.
729   bool finalize_stubs();
730 
731   // Request for a shared stub to the interpreter
732   void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset);
733 
734   void set_const_section_alignment(int align) {
735     _const_section_alignment = align_up(align, HeapWordSize);
736   }
737 
738 #ifndef PRODUCT
739  public:
740   // Printing / Decoding
741   // decodes from decode_begin() to code_end() and sets decode_begin to end
742   void    decode();
743   void    print();
744 #endif
745   // Directly disassemble code buffer.
746   void    decode(address start, address end);
747 
748   // The following header contains architecture-specific implementations
749 #include CPU_HEADER(codeBuffer)
750 
751 };
752 
753 // A Java method can have calls of Java methods which can be statically bound.
754 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method
755 // can share a stub to the interpreter.
756 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter.
757 class SharedStubToInterpRequest : public ResourceObj {
758  private:
759   ciMethod* _shared_method;
760   CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer
761 
762  public:
763   SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
764       _call_offset(call_offset) {}
765 
766   ciMethod* shared_method() const { return _shared_method; }
767   CodeBuffer::csize_t call_offset() const { return _call_offset; }
768 };
769 
770 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
771   if (remaining() < amount) { _outer->expand(this, amount); return true; }
772   return false;
773 }
774 
775 #endif // SHARE_ASM_CODEBUFFER_HPP