1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_ASM_CODEBUFFER_HPP
26 #define SHARE_ASM_CODEBUFFER_HPP
27
28 #include "code/oopRecorder.hpp"
29 #include "code/relocInfo.hpp"
30 #include "compiler/compiler_globals.hpp"
31 #include "runtime/os.hpp"
32 #include "utilities/align.hpp"
33 #include "utilities/debug.hpp"
34 #include "utilities/growableArray.hpp"
35 #include "utilities/linkedlist.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/resizableHashTable.hpp"
38
39 template <typename T>
40 static inline void put_native(address p, T x) {
41 memcpy((void*)p, &x, sizeof x);
42 }
43
44 class PhaseCFG;
45 class Compile;
46 class BufferBlob;
47 class CodeBuffer;
48 class Label;
49 class ciMethod;
50 class SharedStubToInterpRequest;
51
52 class CodeOffsets: public StackObj {
53 public:
54 enum Entries { Entry,
55 Verified_Entry,
56 Inline_Entry,
57 Verified_Inline_Entry,
58 Verified_Inline_Entry_RO,
59 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
60 OSR_Entry,
61 Exceptions, // Offset where exception handler lives
62 Deopt, // Offset where deopt handler lives
63 UnwindHandler, // Offset to default unwind handler
64 max_Entries };
65
66 // special value to note codeBlobs where profile (forte) stack walking is
67 // always dangerous and suspect.
68
69 enum { frame_never_safe = -1 };
70
71 private:
72 int _values[max_Entries];
73 void check(int e) const { assert(0 <= e && e < max_Entries, "must be"); }
74
75 public:
76 CodeOffsets() {
77 _values[Entry ] = 0;
78 _values[Verified_Entry] = 0;
79 _values[Inline_Entry ] = 0;
80 _values[Verified_Inline_Entry] = -1;
81 _values[Verified_Inline_Entry_RO] = -1;
82 _values[Frame_Complete] = frame_never_safe;
83 _values[OSR_Entry ] = 0;
84 _values[Exceptions ] = -1;
85 _values[Deopt ] = -1;
86 _values[UnwindHandler ] = -1;
87 }
88
89 int value(Entries e) const { check(e); return _values[e]; }
90 void set_value(Entries e, int val) { check(e); _values[e] = val; }
91 };
92
93 // This class represents a stream of code and associated relocations.
94 // There are a few in each CodeBuffer.
95 // They are filled concurrently, and concatenated at the end.
96 class CodeSection {
97 friend class CodeBuffer;
98 friend class AOTCodeReader;
99 public:
100 typedef int csize_t; // code size type; would be size_t except for history
101
102 private:
103 address _start; // first byte of contents (instructions)
104 address _mark; // user mark, usually an instruction beginning
105 address _end; // current end address
106 address _limit; // last possible (allocated) end address
107 relocInfo* _locs_start; // first byte of relocation information
108 relocInfo* _locs_end; // first byte after relocation information
109 relocInfo* _locs_limit; // first byte after relocation information buf
110 address _locs_point; // last relocated position (grows upward)
111 bool _locs_own; // did I allocate the locs myself?
112 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate.
113 int _skipped_instructions_size;
114 int8_t _index; // my section number (SECT_INST, etc.)
115 CodeBuffer* _outer; // enclosing CodeBuffer
116
117 // (Note: _locs_point used to be called _last_reloc_offset.)
118
119 CodeSection() {
120 _start = nullptr;
121 _mark = nullptr;
122 _end = nullptr;
123 _limit = nullptr;
124 _locs_start = nullptr;
125 _locs_end = nullptr;
126 _locs_limit = nullptr;
127 _locs_point = nullptr;
128 _locs_own = false;
129 _scratch_emit = false;
130 _skipped_instructions_size = 0;
131 DEBUG_ONLY(_index = -1);
132 DEBUG_ONLY(_outer = (CodeBuffer*)badAddress);
133 }
134
135 void initialize_outer(CodeBuffer* outer, int8_t index) {
136 _outer = outer;
137 _index = index;
138 }
139
140 void initialize(address start, csize_t size = 0) {
141 assert(_start == nullptr, "only one init step, please");
142 _start = start;
143 _mark = nullptr;
144 _end = start;
145
146 _limit = start + size;
147 _locs_point = start;
148 }
149
150 void initialize_locs(int locs_capacity);
151 void expand_locs(int new_capacity);
152 void initialize_locs_from(const CodeSection* source_cs);
153
154 // helper for CodeBuffer::expand()
155 void take_over_code_from(CodeSection* cs) {
156 _start = cs->_start;
157 _mark = cs->_mark;
158 _end = cs->_end;
159 _limit = cs->_limit;
160 _locs_point = cs->_locs_point;
161 _skipped_instructions_size = cs->_skipped_instructions_size;
162 }
163
164 public:
165 address start() const { return _start; }
166 address mark() const { return _mark; }
167 address end() const { return _end; }
168 address limit() const { return _limit; }
169 csize_t size() const { return (csize_t)(_end - _start); }
170 csize_t mark_off() const { assert(_mark != nullptr, "not an offset");
171 return (csize_t)(_mark - _start); }
172 csize_t capacity() const { return (csize_t)(_limit - _start); }
173 csize_t remaining() const { return (csize_t)(_limit - _end); }
174
175 relocInfo* locs_start() const { return _locs_start; }
176 relocInfo* locs_end() const { return _locs_end; }
177 int locs_count() const { return (int)(_locs_end - _locs_start); }
178 relocInfo* locs_limit() const { return _locs_limit; }
179 address locs_point() const { return _locs_point; }
180 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); }
181 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
182
183 int8_t index() const { return _index; }
184 bool is_allocated() const { return _start != nullptr; }
185 bool is_empty() const { return _start == _end; }
186 bool has_locs() const { return _locs_end != nullptr; }
187
188 // Mark scratch buffer.
189 void set_scratch_emit() { _scratch_emit = true; }
190 void clear_scratch_emit() { _scratch_emit = false; }
191 bool scratch_emit() { return _scratch_emit; }
192
193 CodeBuffer* outer() const { return _outer; }
194
195 // is a given address in this section? (2nd version is end-inclusive)
196 bool contains(address pc) const { return pc >= _start && pc < _end; }
197 bool contains2(address pc) const { return pc >= _start && pc <= _end; }
198 bool allocates(address pc) const { return pc >= _start && pc < _limit; }
199 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
200
201 // checks if two CodeSections are disjoint
202 //
203 // limit is an exclusive address and can be the start of another
204 // section.
205 bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; }
206
207 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
208 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
209 _mark = pc; }
210 void set_mark() { _mark = _end; }
211 void clear_mark() { _mark = nullptr; }
212
213 void set_locs_end(relocInfo* p) {
214 assert(p <= locs_limit(), "locs data fits in allocated buffer");
215 _locs_end = p;
216 }
217 void set_locs_point(address pc) {
218 assert(pc >= locs_point(), "relocation addr may not decrease");
219 assert(allocates2(pc), "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit));
220 _locs_point = pc;
221 }
222
223 void register_skipped(int size) {
224 _skipped_instructions_size += size;
225 }
226
227 // Code emission
228 void emit_int8(uint8_t x1) {
229 address curr = end();
230 *((uint8_t*) curr++) = x1;
231 set_end(curr);
232 }
233
234 template <typename T>
235 void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); }
236
237 void emit_int16(uint16_t x) { emit_native(x); }
238 void emit_int16(uint8_t x1, uint8_t x2) {
239 address curr = end();
240 *((uint8_t*) curr++) = x1;
241 *((uint8_t*) curr++) = x2;
242 set_end(curr);
243 }
244
245 void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3) {
246 address curr = end();
247 *((uint8_t*) curr++) = x1;
248 *((uint8_t*) curr++) = x2;
249 *((uint8_t*) curr++) = x3;
250 set_end(curr);
251 }
252
253 void emit_int32(uint32_t x) { emit_native(x); }
254 void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) {
255 address curr = end();
256 *((uint8_t*) curr++) = x1;
257 *((uint8_t*) curr++) = x2;
258 *((uint8_t*) curr++) = x3;
259 *((uint8_t*) curr++) = x4;
260 set_end(curr);
261 }
262
263 void emit_int64(uint64_t x) { emit_native(x); }
264 void emit_float(jfloat x) { emit_native(x); }
265 void emit_double(jdouble x) { emit_native(x); }
266 void emit_address(address x) { emit_native(x); }
267
268 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
269 void initialize_shared_locs(relocInfo* buf, int length);
270
271 // Manage labels and their addresses.
272 address target(Label& L, address branch_pc);
273
274 // Emit a relocation.
275 void relocate(address at, RelocationHolder const& rspec, int format = 0);
276 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0);
277
278 int alignment() const;
279
280 // Slop between sections, used only when allocating temporary BufferBlob buffers.
281 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
282
283 csize_t align_at_start(csize_t off) const {
284 return (csize_t) align_up(off, alignment());
285 }
286
287 // Ensure there's enough space left in the current section.
288 // Return true if there was an expansion.
289 bool maybe_expand_to_ensure_remaining(csize_t amount);
290
291 #ifndef PRODUCT
292 void decode();
293 void print_on(outputStream* st, const char* name);
294 #endif //PRODUCT
295 };
296
297
298 #ifndef PRODUCT
299
300 // ----- CHeapString -----------------------------------------------------------
301
302 class CHeapString : public CHeapObj<mtCode> {
303 public:
304 CHeapString(const char* str) : _string(os::strdup(str)) {}
305 ~CHeapString();
306 const char* string() const { return _string; }
307
308 private:
309 const char* _string;
310 };
311
312 // ----- AsmRemarkCollection ---------------------------------------------------
313
314 class AsmRemarkCollection : public CHeapObj<mtCode> {
315 public:
316 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {}
317 ~AsmRemarkCollection() {
318 assert(is_empty(), "Must 'clear()' before deleting!");
319 assert(_ref_cnt == 0, "No uses must remain when deleting!");
320 }
321 AsmRemarkCollection* reuse() {
322 precond(_ref_cnt > 0);
323 return _ref_cnt++, this;
324 }
325
326 const char* insert(uint offset, const char* remark);
327 const char* lookup(uint offset) const;
328 const char* next(uint offset) const;
329
330 bool is_empty() const { return _remarks == nullptr; }
331 uint clear();
332
333 template<typename Function>
334 bool iterate(Function function) const { // lambda enabled API
335 if (_remarks != nullptr) {
336 Cell* tmp = _remarks;
337 do {
338 if(!function(tmp->offset, tmp->string())) {
339 return false;
340 }
341 tmp = tmp->next;
342 } while (tmp != _remarks);
343 }
344 return true;
345 }
346
347 private:
348 struct Cell : CHeapString {
349 Cell(const char* remark, uint offset) :
350 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {}
351 void push_back(Cell* cell) {
352 Cell* head = this;
353 Cell* tail = prev;
354 tail->next = cell;
355 cell->next = head;
356 cell->prev = tail;
357 prev = cell;
358 }
359 uint offset;
360 Cell* prev;
361 Cell* next;
362 };
363 uint _ref_cnt;
364 Cell* _remarks;
365 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that
366 // does not change the state of the list per se), supportig a simplistic
367 // iteration scheme.
368 mutable Cell* _next;
369 };
370
371 // ----- DbgStringCollection ---------------------------------------------------
372
373 class DbgStringCollection : public CHeapObj<mtCode> {
374 public:
375 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {}
376 ~DbgStringCollection() {
377 assert(is_empty(), "Must 'clear()' before deleting!");
378 assert(_ref_cnt == 0, "No uses must remain when deleting!");
379 }
380 DbgStringCollection* reuse() {
381 precond(_ref_cnt > 0);
382 return _ref_cnt++, this;
383 }
384
385 const char* insert(const char* str);
386 const char* lookup(const char* str) const;
387
388 bool is_empty() const { return _strings == nullptr; }
389 uint clear();
390
391 template<typename Function>
392 bool iterate(Function function) const { // lambda enabled API
393 if (_strings != nullptr) {
394 Cell* tmp = _strings;
395 do {
396 if (!function(tmp->string())) {
397 return false;
398 }
399 tmp = tmp->next;
400 } while (tmp != _strings);
401 }
402 return true;
403 }
404
405 private:
406 struct Cell : CHeapString {
407 Cell(const char* dbgstr) :
408 CHeapString(dbgstr), prev(nullptr), next(nullptr) {}
409 void push_back(Cell* cell) {
410 Cell* head = this;
411 Cell* tail = prev;
412 tail->next = cell;
413 cell->next = head;
414 cell->prev = tail;
415 prev = cell;
416 }
417 Cell* prev;
418 Cell* next;
419 };
420 uint _ref_cnt;
421 Cell* _strings;
422 };
423
424 // The assumption made here is that most code remarks (or comments) added to
425 // the generated assembly code are unique, i.e. there is very little gain in
426 // trying to share the strings between the different offsets tracked in a
427 // buffer (or blob).
428
429 class AsmRemarks {
430 public:
431 AsmRemarks();
432 ~AsmRemarks();
433
434 void init();
435
436 const char* insert(uint offset, const char* remstr);
437
438 bool is_empty() const;
439
440 void share(const AsmRemarks &src);
441 void clear();
442 uint print(uint offset, outputStream* strm = tty) const;
443
444 // For testing purposes only.
445 const AsmRemarkCollection* ref() const { return _remarks; }
446
447 template<typename Function>
448 inline bool iterate(Function function) const { return _remarks->iterate(function); }
449
450 private:
451 AsmRemarkCollection* _remarks;
452 };
453
454 // The assumption made here is that the number of debug strings (with a fixed
455 // address requirement) is a rather small set per compilation unit.
456
457 class DbgStrings {
458 public:
459 DbgStrings();
460 ~DbgStrings();
461
462 void init();
463
464 const char* insert(const char* dbgstr);
465
466 bool is_empty() const;
467
468 void share(const DbgStrings &src);
469 void clear();
470
471 // For testing purposes only.
472 const DbgStringCollection* ref() const { return _strings; }
473
474 template<typename Function>
475 bool iterate(Function function) const { return _strings->iterate(function); }
476
477 private:
478 DbgStringCollection* _strings;
479 };
480 #endif // not PRODUCT
481
482
483 #ifdef ASSERT
484 #include "utilities/copy.hpp"
485
486 class Scrubber {
487 public:
488 Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {}
489 ~Scrubber() {
490 Copy::fill_to_bytes(_addr, _size, badResourceValue);
491 }
492 private:
493 void* _addr;
494 size_t _size;
495 };
496 #endif // ASSERT
497
498 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests;
499
500 // A CodeBuffer describes a memory space into which assembly
501 // code is generated. This memory space usually occupies the
502 // interior of a single BufferBlob, but in some cases it may be
503 // an arbitrary span of memory, even outside the code cache.
504 //
505 // A code buffer comes in two variants:
506 //
507 // (1) A CodeBuffer referring to an already allocated piece of memory:
508 // This is used to direct 'static' code generation (e.g. for interpreter
509 // or stubroutine generation, etc.). This code comes with NO relocation
510 // information.
511 //
512 // (2) A CodeBuffer referring to a piece of memory allocated when the
513 // CodeBuffer is allocated. This is used for nmethod generation.
514 //
515 // The memory can be divided up into several parts called sections.
516 // Each section independently accumulates code (or data) an relocations.
517 // Sections can grow (at the expense of a reallocation of the BufferBlob
518 // and recopying of all active sections). When the buffered code is finally
519 // written to an nmethod (or other CodeBlob), the contents (code, data,
520 // and relocations) of the sections are padded to an alignment and concatenated.
521 // Instructions and data in one section can contain relocatable references to
522 // addresses in a sibling section.
523
524 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
525 friend class CodeSection;
526 friend class StubCodeGenerator;
527 friend class AOTCodeReader;
528
529 private:
530 // CodeBuffers must be allocated on the stack except for a single
531 // special case during expansion which is handled internally. This
532 // is done to guarantee proper cleanup of resources.
533 void* operator new(size_t size) throw() { return resource_allocate_bytes(size); }
534 void operator delete(void* p) { ShouldNotCallThis(); }
535
536 public:
537 typedef int csize_t; // code size type; would be size_t except for history
538 enum : int8_t {
539 // Here is the list of all possible sections. The order reflects
540 // the final layout.
541 SECT_FIRST = 0,
542 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc.
543 SECT_INSTS, // Executable instructions.
544 SECT_STUBS, // Outbound trampolines for supporting call sites.
545 SECT_LIMIT, SECT_NONE = -1
546 };
547
548 typedef LinkedListImpl<int> Offsets;
549 typedef ResizeableHashTable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests;
550
551 private:
552 enum {
553 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits))
554 sect_mask = (1<<sect_bits)-1
555 };
556
557 const char* _name;
558
559 CodeSection _consts; // constants, jump tables
560 CodeSection _insts; // instructions (the main section)
561 CodeSection _stubs; // stubs (call site support), deopt, exception handling
562
563 CodeBuffer* _before_expand; // dead buffer, from before the last expansion
564
565 BufferBlob* _blob; // optional buffer in CodeCache for generated code
566 address _total_start; // first address of combined memory buffer
567 csize_t _total_size; // size in bytes of combined memory buffer
568
569 OopRecorder* _oop_recorder;
570
571 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
572 Arena* _overflow_arena;
573
574 address _last_insn; // used to merge consecutive memory barriers, loads or stores.
575 address _last_label; // record last bind label address, it's also the start of current bb.
576
577 SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs
578 SharedTrampolineRequests* _shared_trampoline_requests; // used to collect requests for shared trampolines
579 bool _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final.
580
581 int _const_section_alignment;
582
583 #ifndef PRODUCT
584 AsmRemarks _asm_remarks;
585 DbgStrings _dbg_strings;
586 bool _collect_comments; // Indicate if we need to collect block comments at all.
587 address _decode_begin; // start address for decode
588 address decode_begin();
589 #endif
590
591 void initialize_misc(const char * name) {
592 // all pointers other than code_start/end and those inside the sections
593 assert(name != nullptr, "must have a name");
594 _name = name;
595 _before_expand = nullptr;
596 _blob = nullptr;
597 _total_start = nullptr;
598 _total_size = 0;
599 _oop_recorder = nullptr;
600 _overflow_arena = nullptr;
601 _last_insn = nullptr;
602 _last_label = nullptr;
603 _finalize_stubs = false;
604 _shared_stub_to_interp_requests = nullptr;
605 _shared_trampoline_requests = nullptr;
606
607 _consts.initialize_outer(this, SECT_CONSTS);
608 _insts.initialize_outer(this, SECT_INSTS);
609 _stubs.initialize_outer(this, SECT_STUBS);
610
611 // Default is to align on 8 bytes. A compiler can change this
612 // if larger alignment (e.g., 32-byte vector masks) is required.
613 _const_section_alignment = (int) sizeof(jdouble);
614
615 #ifndef PRODUCT
616 _decode_begin = nullptr;
617 // Collect block comments, but restrict collection to cases where a disassembly is output.
618 _collect_comments = ( PrintAssembly
619 || PrintStubCode
620 || PrintMethodHandleStubs
621 || PrintInterpreter
622 || PrintSignatureHandlers
623 || UnlockDiagnosticVMOptions
624 );
625 #endif
626 }
627
628 void initialize(address code_start, csize_t code_size) {
629 _total_start = code_start;
630 _total_size = code_size;
631 // Initialize the main section:
632 _insts.initialize(code_start, code_size);
633 assert(!_stubs.is_allocated(), "no garbage here");
634 assert(!_consts.is_allocated(), "no garbage here");
635 _oop_recorder = &_default_oop_recorder;
636 }
637
638 void initialize_section_size(CodeSection* cs, csize_t size);
639
640 // helper for CodeBuffer::expand()
641 void take_over_code_from(CodeBuffer* cs);
642
643 // ensure sections are disjoint, ordered, and contained in the blob
644 void verify_section_allocation();
645
646 // copies combined relocations to the blob, returns bytes copied
647 // (if target is null, it is a dry run only, just for sizing)
648 csize_t copy_relocations_to(CodeBlob* blob) const;
649 csize_t copy_relocations_to(address buf, csize_t buf_limit) const;
650
651 // copies combined code to the blob (assumes relocs are already in there)
652 void copy_code_to(CodeBlob* blob);
653
654 // moves code sections to new buffer (assumes relocs are already in there)
655 void relocate_code_to(CodeBuffer* cb) const;
656
657 // adjust some internal address during expand
658 void adjust_internal_address(address from, address to);
659
660 // set up a model of the final layout of my contents
661 void compute_final_layout(CodeBuffer* dest) const;
662
663 // Expand the given section so at least 'amount' is remaining.
664 // Creates a new, larger BufferBlob, and rewrites the code & relocs.
665 void expand(CodeSection* which_cs, csize_t amount);
666
667 // Helper for expand.
668 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
669
670 public:
671 // (1) code buffer referring to pre-allocated instruction memory
672 CodeBuffer(address code_start, csize_t code_size)
673 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
674 {
675 assert(code_start != nullptr, "sanity");
676 initialize_misc("static buffer");
677 initialize(code_start, code_size);
678 DEBUG_ONLY(verify_section_allocation();)
679 }
680
681 // (2) CodeBuffer referring to pre-allocated CodeBlob.
682 CodeBuffer(CodeBlob* blob);
683
684 // (3) code buffer allocating codeBlob memory for code & relocation
685 // info but with lazy initialization. The name must be something
686 // informative.
687 CodeBuffer(const char* name)
688 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
689 {
690 initialize_misc(name);
691 }
692
693 // (4) code buffer allocating codeBlob memory for code & relocation
694 // info. The name must be something informative and code_size must
695 // include both code and stubs sizes.
696 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size)
697 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
698 {
699 initialize_misc(name);
700 initialize(code_size, locs_size);
701 }
702
703 ~CodeBuffer();
704
705 // Initialize a CodeBuffer constructed using constructor 3. Using
706 // constructor 4 is equivalent to calling constructor 3 and then
707 // calling this method. It's been factored out for convenience of
708 // construction.
709 void initialize(csize_t code_size, csize_t locs_size);
710
711 CodeSection* consts() { return &_consts; }
712 CodeSection* insts() { return &_insts; }
713 CodeSection* stubs() { return &_stubs; }
714
715 const CodeSection* insts() const { return &_insts; }
716
717 // present sections in order; return null at end; consts is #0, etc.
718 CodeSection* code_section(int n) {
719 // This makes the slightly questionable but portable assumption
720 // that the various members (_consts, _insts, _stubs, etc.) are
721 // adjacent in the layout of CodeBuffer.
722 CodeSection* cs = &_consts + n;
723 assert(cs->index() == n || !cs->is_allocated(), "sanity");
724 return cs;
725 }
726 const CodeSection* code_section(int n) const { // yucky const stuff
727 return ((CodeBuffer*)this)->code_section(n);
728 }
729 static const char* code_section_name(int n);
730 int section_index_of(address addr) const;
731 bool contains(address addr) const {
732 // handy for debugging
733 return section_index_of(addr) > SECT_NONE;
734 }
735
736 // A stable mapping between 'locators' (small ints) and addresses.
737 static int locator_pos(int locator) { return locator >> sect_bits; }
738 static int locator_sect(int locator) { return locator & sect_mask; }
739 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
740 int locator(address addr) const;
741 address locator_address(int locator) const {
742 if (locator < 0) return nullptr;
743 address start = code_section(locator_sect(locator))->start();
744 return start + locator_pos(locator);
745 }
746
747 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
748 bool is_backward_branch(Label& L);
749
750 // Properties
751 const char* name() const { return _name; }
752 CodeBuffer* before_expand() const { return _before_expand; }
753 BufferBlob* blob() const { return _blob; }
754 void set_blob(BufferBlob* blob);
755 void free_blob(); // Free the blob, if we own one.
756
757 // Properties relative to the insts section:
758 address insts_begin() const { return _insts.start(); }
759 address insts_end() const { return _insts.end(); }
760 void set_insts_end(address end) { _insts.set_end(end); }
761 address insts_mark() const { return _insts.mark(); }
762 void set_insts_mark() { _insts.set_mark(); }
763
764 // is there anything in the buffer other than the current section?
765 bool is_pure() const { return insts_size() == total_content_size(); }
766
767 // size in bytes of output so far in the insts sections
768 csize_t insts_size() const { return _insts.size(); }
769
770 // same as insts_size(), except that it asserts there is no non-code here
771 csize_t pure_insts_size() const { assert(is_pure(), "no non-code");
772 return insts_size(); }
773 // capacity in bytes of the insts sections
774 csize_t insts_capacity() const { return _insts.capacity(); }
775
776 // number of bytes remaining in the insts section
777 csize_t insts_remaining() const { return _insts.remaining(); }
778
779 // is a given address in the insts section? (2nd version is end-inclusive)
780 bool insts_contains(address pc) const { return _insts.contains(pc); }
781 bool insts_contains2(address pc) const { return _insts.contains2(pc); }
782
783 // Record any extra oops required to keep embedded metadata alive
784 void finalize_oop_references(const methodHandle& method);
785
786 // Allocated size in all sections, when aligned and concatenated
787 // (this is the eventual state of the content in its final
788 // CodeBlob).
789 csize_t total_content_size() const;
790
791 // Combined offset (relative to start of first section) of given
792 // section, as eventually found in the final CodeBlob.
793 csize_t total_offset_of(const CodeSection* cs) const;
794
795 // allocated size of all relocation data, including index, rounded up
796 csize_t total_relocation_size() const;
797
798 int total_skipped_instructions_size() const;
799
800 // allocated size of any and all recorded oops
801 csize_t total_oop_size() const {
802 OopRecorder* recorder = oop_recorder();
803 return (recorder == nullptr)? 0: recorder->oop_size();
804 }
805
806 // allocated size of any and all recorded metadata
807 csize_t total_metadata_size() const {
808 OopRecorder* recorder = oop_recorder();
809 return (recorder == nullptr)? 0: recorder->metadata_size();
810 }
811
812 // Configuration functions, called immediately after the CB is constructed.
813 // The section sizes are subtracted from the original insts section.
814 // Note: Call them in reverse section order, because each steals from insts.
815 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); }
816 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); }
817 // Override default oop recorder.
818 void initialize_oop_recorder(OopRecorder* r);
819
820 OopRecorder* oop_recorder() const { return _oop_recorder; }
821
822 address last_insn() const { return _last_insn; }
823 void set_last_insn(address a) { _last_insn = a; }
824 void clear_last_insn() { set_last_insn(nullptr); }
825
826 address last_label() const { return _last_label; }
827 void set_last_label(address a) { _last_label = a; }
828
829 #ifndef PRODUCT
830 AsmRemarks &asm_remarks() { return _asm_remarks; }
831 DbgStrings &dbg_strings() { return _dbg_strings; }
832
833 void clear_strings() {
834 _asm_remarks.clear();
835 _dbg_strings.clear();
836 }
837 #endif
838
839 // Code generation
840 void relocate(address at, RelocationHolder const& rspec, int format = 0) {
841 _insts.relocate(at, rspec, format);
842 }
843 void relocate(address at, relocInfo::relocType rtype, int format = 0) {
844 _insts.relocate(at, rtype, format);
845 }
846
847 // Management of overflow storage for binding of Labels.
848 GrowableArray<int>* create_patch_overflow();
849
850 // NMethod generation
851 void copy_code_and_locs_to(CodeBlob* blob) {
852 assert(blob != nullptr, "sane");
853 copy_relocations_to(blob);
854 copy_code_to(blob);
855 }
856 void copy_values_to(nmethod* nm) {
857 if (!oop_recorder()->is_unused()) {
858 oop_recorder()->copy_values_to(nm);
859 }
860 }
861
862 void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN;
863 const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;);
864
865 // Log a little info about section usage in the CodeBuffer
866 void log_section_sizes(const char* name);
867
868 // Make a set of stubs final. It can create/optimize stubs.
869 bool finalize_stubs();
870
871 // Request for a shared stub to the interpreter
872 void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset);
873
874 void set_const_section_alignment(int align) {
875 _const_section_alignment = align_up(align, HeapWordSize);
876 }
877
878 #ifndef PRODUCT
879 public:
880 // Printing / Decoding
881 // decodes from decode_begin() to code_end() and sets decode_begin to end
882 void decode();
883 void print_on(outputStream* st);
884 #endif
885 // Directly disassemble code buffer.
886 void decode(address start, address end);
887
888 // The following header contains architecture-specific implementations
889 #include CPU_HEADER(codeBuffer)
890
891 };
892
893 // A Java method can have calls of Java methods which can be statically bound.
894 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method
895 // can share a stub to the interpreter.
896 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter.
897 class SharedStubToInterpRequest : public ResourceObj {
898 private:
899 ciMethod* _shared_method;
900 CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer
901
902 public:
903 SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
904 _call_offset(call_offset) {}
905
906 ciMethod* shared_method() const { return _shared_method; }
907 CodeBuffer::csize_t call_offset() const { return _call_offset; }
908 };
909
910 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
911 if (remaining() < amount) { _outer->expand(this, amount); return true; }
912 return false;
913 }
914
915 #endif // SHARE_ASM_CODEBUFFER_HPP