1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_ASM_CODEBUFFER_HPP
26 #define SHARE_ASM_CODEBUFFER_HPP
27
28 #include "code/oopRecorder.hpp"
29 #include "code/relocInfo.hpp"
30 #include "compiler/compiler_globals.hpp"
31 #include "runtime/os.hpp"
32 #include "utilities/align.hpp"
33 #include "utilities/debug.hpp"
34 #include "utilities/growableArray.hpp"
35 #include "utilities/linkedlist.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/resizableHashTable.hpp"
38
39 template <typename T>
40 static inline void put_native(address p, T x) {
41 memcpy((void*)p, &x, sizeof x);
42 }
43
44 class PhaseCFG;
45 class Compile;
46 class BufferBlob;
47 class CodeBuffer;
48 class Label;
49 class ciMethod;
50 class SharedStubToInterpRequest;
51
52 class CodeOffsets: public StackObj {
53 public:
54 enum Entries { Entry,
55 Verified_Entry,
56 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
57 OSR_Entry,
58 Exceptions, // Offset where exception handler lives
59 Deopt, // Offset where deopt handler lives
60 UnwindHandler, // Offset to default unwind handler
61 max_Entries };
62
63 // special value to note codeBlobs where profile (forte) stack walking is
64 // always dangerous and suspect.
65
66 enum { frame_never_safe = -1 };
67
68 private:
69 int _values[max_Entries];
70
71 public:
72 CodeOffsets() {
73 _values[Entry ] = 0;
74 _values[Verified_Entry] = 0;
75 _values[Frame_Complete] = frame_never_safe;
76 _values[OSR_Entry ] = 0;
77 _values[Exceptions ] = -1;
78 _values[Deopt ] = -1;
79 _values[UnwindHandler ] = -1;
80 }
81
82 int value(Entries e) { return _values[e]; }
83 void set_value(Entries e, int val) { _values[e] = val; }
84 };
85
86 // This class represents a stream of code and associated relocations.
87 // There are a few in each CodeBuffer.
88 // They are filled concurrently, and concatenated at the end.
89 class CodeSection {
90 friend class CodeBuffer;
91 friend class AOTCodeReader;
92 public:
93 typedef int csize_t; // code size type; would be size_t except for history
94
95 private:
96 address _start; // first byte of contents (instructions)
97 address _mark; // user mark, usually an instruction beginning
98 address _end; // current end address
99 address _limit; // last possible (allocated) end address
100 relocInfo* _locs_start; // first byte of relocation information
101 relocInfo* _locs_end; // first byte after relocation information
102 relocInfo* _locs_limit; // first byte after relocation information buf
103 address _locs_point; // last relocated position (grows upward)
104 bool _locs_own; // did I allocate the locs myself?
105 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate.
106 int _skipped_instructions_size;
107 int8_t _index; // my section number (SECT_INST, etc.)
108 CodeBuffer* _outer; // enclosing CodeBuffer
109
110 // (Note: _locs_point used to be called _last_reloc_offset.)
111
112 CodeSection() {
113 _start = nullptr;
114 _mark = nullptr;
115 _end = nullptr;
116 _limit = nullptr;
117 _locs_start = nullptr;
118 _locs_end = nullptr;
119 _locs_limit = nullptr;
120 _locs_point = nullptr;
121 _locs_own = false;
122 _scratch_emit = false;
123 _skipped_instructions_size = 0;
124 DEBUG_ONLY(_index = -1);
125 DEBUG_ONLY(_outer = (CodeBuffer*)badAddress);
126 }
127
128 void initialize_outer(CodeBuffer* outer, int8_t index) {
129 _outer = outer;
130 _index = index;
131 }
132
133 void initialize(address start, csize_t size = 0) {
134 assert(_start == nullptr, "only one init step, please");
135 _start = start;
136 _mark = nullptr;
137 _end = start;
138
139 _limit = start + size;
140 _locs_point = start;
141 }
142
143 void initialize_locs(int locs_capacity);
144 void expand_locs(int new_capacity);
145 void initialize_locs_from(const CodeSection* source_cs);
146
147 // helper for CodeBuffer::expand()
148 void take_over_code_from(CodeSection* cs) {
149 _start = cs->_start;
150 _mark = cs->_mark;
151 _end = cs->_end;
152 _limit = cs->_limit;
153 _locs_point = cs->_locs_point;
154 _skipped_instructions_size = cs->_skipped_instructions_size;
155 }
156
157 public:
158 address start() const { return _start; }
159 address mark() const { return _mark; }
160 address end() const { return _end; }
161 address limit() const { return _limit; }
162 csize_t size() const { return (csize_t)(_end - _start); }
163 csize_t mark_off() const { assert(_mark != nullptr, "not an offset");
164 return (csize_t)(_mark - _start); }
165 csize_t capacity() const { return (csize_t)(_limit - _start); }
166 csize_t remaining() const { return (csize_t)(_limit - _end); }
167
168 relocInfo* locs_start() const { return _locs_start; }
169 relocInfo* locs_end() const { return _locs_end; }
170 int locs_count() const { return (int)(_locs_end - _locs_start); }
171 relocInfo* locs_limit() const { return _locs_limit; }
172 address locs_point() const { return _locs_point; }
173 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); }
174 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
175
176 int8_t index() const { return _index; }
177 bool is_allocated() const { return _start != nullptr; }
178 bool is_empty() const { return _start == _end; }
179 bool has_locs() const { return _locs_end != nullptr; }
180
181 // Mark scratch buffer.
182 void set_scratch_emit() { _scratch_emit = true; }
183 void clear_scratch_emit() { _scratch_emit = false; }
184 bool scratch_emit() { return _scratch_emit; }
185
186 CodeBuffer* outer() const { return _outer; }
187
188 // is a given address in this section? (2nd version is end-inclusive)
189 bool contains(address pc) const { return pc >= _start && pc < _end; }
190 bool contains2(address pc) const { return pc >= _start && pc <= _end; }
191 bool allocates(address pc) const { return pc >= _start && pc < _limit; }
192 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
193
194 // checks if two CodeSections are disjoint
195 //
196 // limit is an exclusive address and can be the start of another
197 // section.
198 bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; }
199
200 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
201 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
202 _mark = pc; }
203 void set_mark() { _mark = _end; }
204 void clear_mark() { _mark = nullptr; }
205
206 void set_locs_end(relocInfo* p) {
207 assert(p <= locs_limit(), "locs data fits in allocated buffer");
208 _locs_end = p;
209 }
210 void set_locs_point(address pc) {
211 assert(pc >= locs_point(), "relocation addr may not decrease");
212 assert(allocates2(pc), "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit));
213 _locs_point = pc;
214 }
215
216 void register_skipped(int size) {
217 _skipped_instructions_size += size;
218 }
219
220 // Code emission
221 void emit_int8(uint8_t x1) {
222 address curr = end();
223 *((uint8_t*) curr++) = x1;
224 set_end(curr);
225 }
226
227 template <typename T>
228 void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); }
229
230 void emit_int16(uint16_t x) { emit_native(x); }
231 void emit_int16(uint8_t x1, uint8_t x2) {
232 address curr = end();
233 *((uint8_t*) curr++) = x1;
234 *((uint8_t*) curr++) = x2;
235 set_end(curr);
236 }
237
238 void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3) {
239 address curr = end();
240 *((uint8_t*) curr++) = x1;
241 *((uint8_t*) curr++) = x2;
242 *((uint8_t*) curr++) = x3;
243 set_end(curr);
244 }
245
246 void emit_int32(uint32_t x) { emit_native(x); }
247 void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) {
248 address curr = end();
249 *((uint8_t*) curr++) = x1;
250 *((uint8_t*) curr++) = x2;
251 *((uint8_t*) curr++) = x3;
252 *((uint8_t*) curr++) = x4;
253 set_end(curr);
254 }
255
256 void emit_int64(uint64_t x) { emit_native(x); }
257 void emit_float(jfloat x) { emit_native(x); }
258 void emit_double(jdouble x) { emit_native(x); }
259 void emit_address(address x) { emit_native(x); }
260
261 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
262 void initialize_shared_locs(relocInfo* buf, int length);
263
264 // Manage labels and their addresses.
265 address target(Label& L, address branch_pc);
266
267 // Emit a relocation.
268 void relocate(address at, RelocationHolder const& rspec, int format = 0);
269 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0);
270
271 int alignment() const;
272
273 // Slop between sections, used only when allocating temporary BufferBlob buffers.
274 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
275
276 csize_t align_at_start(csize_t off) const {
277 return (csize_t) align_up(off, alignment());
278 }
279
280 // Ensure there's enough space left in the current section.
281 // Return true if there was an expansion.
282 bool maybe_expand_to_ensure_remaining(csize_t amount);
283
284 #ifndef PRODUCT
285 void decode();
286 void print_on(outputStream* st, const char* name);
287 #endif //PRODUCT
288 };
289
290
291 #ifndef PRODUCT
292
293 // ----- CHeapString -----------------------------------------------------------
294
295 class CHeapString : public CHeapObj<mtCode> {
296 public:
297 CHeapString(const char* str) : _string(os::strdup(str)) {}
298 ~CHeapString();
299 const char* string() const { return _string; }
300
301 private:
302 const char* _string;
303 };
304
305 // ----- AsmRemarkCollection ---------------------------------------------------
306
307 class AsmRemarkCollection : public CHeapObj<mtCode> {
308 public:
309 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {}
310 ~AsmRemarkCollection() {
311 assert(is_empty(), "Must 'clear()' before deleting!");
312 assert(_ref_cnt == 0, "No uses must remain when deleting!");
313 }
314 AsmRemarkCollection* reuse() {
315 precond(_ref_cnt > 0);
316 return _ref_cnt++, this;
317 }
318
319 const char* insert(uint offset, const char* remark);
320 const char* lookup(uint offset) const;
321 const char* next(uint offset) const;
322
323 bool is_empty() const { return _remarks == nullptr; }
324 uint clear();
325
326 template<typename Function>
327 bool iterate(Function function) const { // lambda enabled API
328 if (_remarks != nullptr) {
329 Cell* tmp = _remarks;
330 do {
331 if(!function(tmp->offset, tmp->string())) {
332 return false;
333 }
334 tmp = tmp->next;
335 } while (tmp != _remarks);
336 }
337 return true;
338 }
339
340 private:
341 struct Cell : CHeapString {
342 Cell(const char* remark, uint offset) :
343 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {}
344 void push_back(Cell* cell) {
345 Cell* head = this;
346 Cell* tail = prev;
347 tail->next = cell;
348 cell->next = head;
349 cell->prev = tail;
350 prev = cell;
351 }
352 uint offset;
353 Cell* prev;
354 Cell* next;
355 };
356 uint _ref_cnt;
357 Cell* _remarks;
358 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that
359 // does not change the state of the list per se), supportig a simplistic
360 // iteration scheme.
361 mutable Cell* _next;
362 };
363
364 // ----- DbgStringCollection ---------------------------------------------------
365
366 class DbgStringCollection : public CHeapObj<mtCode> {
367 public:
368 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {}
369 ~DbgStringCollection() {
370 assert(is_empty(), "Must 'clear()' before deleting!");
371 assert(_ref_cnt == 0, "No uses must remain when deleting!");
372 }
373 DbgStringCollection* reuse() {
374 precond(_ref_cnt > 0);
375 return _ref_cnt++, this;
376 }
377
378 const char* insert(const char* str);
379 const char* lookup(const char* str) const;
380
381 bool is_empty() const { return _strings == nullptr; }
382 uint clear();
383
384 template<typename Function>
385 bool iterate(Function function) const { // lambda enabled API
386 if (_strings != nullptr) {
387 Cell* tmp = _strings;
388 do {
389 if (!function(tmp->string())) {
390 return false;
391 }
392 tmp = tmp->next;
393 } while (tmp != _strings);
394 }
395 return true;
396 }
397
398 private:
399 struct Cell : CHeapString {
400 Cell(const char* dbgstr) :
401 CHeapString(dbgstr), prev(nullptr), next(nullptr) {}
402 void push_back(Cell* cell) {
403 Cell* head = this;
404 Cell* tail = prev;
405 tail->next = cell;
406 cell->next = head;
407 cell->prev = tail;
408 prev = cell;
409 }
410 Cell* prev;
411 Cell* next;
412 };
413 uint _ref_cnt;
414 Cell* _strings;
415 };
416
417 // The assumption made here is that most code remarks (or comments) added to
418 // the generated assembly code are unique, i.e. there is very little gain in
419 // trying to share the strings between the different offsets tracked in a
420 // buffer (or blob).
421
422 class AsmRemarks {
423 public:
424 AsmRemarks();
425 ~AsmRemarks();
426
427 void init();
428
429 const char* insert(uint offset, const char* remstr);
430
431 bool is_empty() const;
432
433 void share(const AsmRemarks &src);
434 void clear();
435 uint print(uint offset, outputStream* strm = tty) const;
436
437 // For testing purposes only.
438 const AsmRemarkCollection* ref() const { return _remarks; }
439
440 template<typename Function>
441 inline bool iterate(Function function) const { return _remarks->iterate(function); }
442
443 private:
444 AsmRemarkCollection* _remarks;
445 };
446
447 // The assumption made here is that the number of debug strings (with a fixed
448 // address requirement) is a rather small set per compilation unit.
449
450 class DbgStrings {
451 public:
452 DbgStrings();
453 ~DbgStrings();
454
455 void init();
456
457 const char* insert(const char* dbgstr);
458
459 bool is_empty() const;
460
461 void share(const DbgStrings &src);
462 void clear();
463
464 // For testing purposes only.
465 const DbgStringCollection* ref() const { return _strings; }
466
467 template<typename Function>
468 bool iterate(Function function) const { return _strings->iterate(function); }
469
470 private:
471 DbgStringCollection* _strings;
472 };
473 #endif // not PRODUCT
474
475
476 #ifdef ASSERT
477 #include "utilities/copy.hpp"
478
479 class Scrubber {
480 public:
481 Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {}
482 ~Scrubber() {
483 Copy::fill_to_bytes(_addr, _size, badResourceValue);
484 }
485 private:
486 void* _addr;
487 size_t _size;
488 };
489 #endif // ASSERT
490
491 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests;
492
493 // A CodeBuffer describes a memory space into which assembly
494 // code is generated. This memory space usually occupies the
495 // interior of a single BufferBlob, but in some cases it may be
496 // an arbitrary span of memory, even outside the code cache.
497 //
498 // A code buffer comes in two variants:
499 //
500 // (1) A CodeBuffer referring to an already allocated piece of memory:
501 // This is used to direct 'static' code generation (e.g. for interpreter
502 // or stubroutine generation, etc.). This code comes with NO relocation
503 // information.
504 //
505 // (2) A CodeBuffer referring to a piece of memory allocated when the
506 // CodeBuffer is allocated. This is used for nmethod generation.
507 //
508 // The memory can be divided up into several parts called sections.
509 // Each section independently accumulates code (or data) an relocations.
510 // Sections can grow (at the expense of a reallocation of the BufferBlob
511 // and recopying of all active sections). When the buffered code is finally
512 // written to an nmethod (or other CodeBlob), the contents (code, data,
513 // and relocations) of the sections are padded to an alignment and concatenated.
514 // Instructions and data in one section can contain relocatable references to
515 // addresses in a sibling section.
516
517 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
518 friend class CodeSection;
519 friend class StubCodeGenerator;
520 friend class AOTCodeReader;
521
522 private:
523 // CodeBuffers must be allocated on the stack except for a single
524 // special case during expansion which is handled internally. This
525 // is done to guarantee proper cleanup of resources.
526 void* operator new(size_t size) throw() { return resource_allocate_bytes(size); }
527 void operator delete(void* p) { ShouldNotCallThis(); }
528
529 public:
530 typedef int csize_t; // code size type; would be size_t except for history
531 enum : int8_t {
532 // Here is the list of all possible sections. The order reflects
533 // the final layout.
534 SECT_FIRST = 0,
535 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc.
536 SECT_INSTS, // Executable instructions.
537 SECT_STUBS, // Outbound trampolines for supporting call sites.
538 SECT_LIMIT, SECT_NONE = -1
539 };
540
541 typedef LinkedListImpl<int> Offsets;
542 typedef ResizeableHashTable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests;
543
544 private:
545 enum {
546 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits))
547 sect_mask = (1<<sect_bits)-1
548 };
549
550 const char* _name;
551
552 CodeSection _consts; // constants, jump tables
553 CodeSection _insts; // instructions (the main section)
554 CodeSection _stubs; // stubs (call site support), deopt, exception handling
555
556 CodeBuffer* _before_expand; // dead buffer, from before the last expansion
557
558 BufferBlob* _blob; // optional buffer in CodeCache for generated code
559 address _total_start; // first address of combined memory buffer
560 csize_t _total_size; // size in bytes of combined memory buffer
561
562 OopRecorder* _oop_recorder;
563
564 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
565 Arena* _overflow_arena;
566
567 address _last_insn; // used to merge consecutive memory barriers, loads or stores.
568 address _last_label; // record last bind label address, it's also the start of current bb.
569
570 SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs
571 SharedTrampolineRequests* _shared_trampoline_requests; // used to collect requests for shared trampolines
572 bool _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final.
573
574 int _const_section_alignment;
575
576 #ifndef PRODUCT
577 AsmRemarks _asm_remarks;
578 DbgStrings _dbg_strings;
579 bool _collect_comments; // Indicate if we need to collect block comments at all.
580 address _decode_begin; // start address for decode
581 address decode_begin();
582 #endif
583
584 void initialize_misc(const char * name) {
585 // all pointers other than code_start/end and those inside the sections
586 assert(name != nullptr, "must have a name");
587 _name = name;
588 _before_expand = nullptr;
589 _blob = nullptr;
590 _total_start = nullptr;
591 _total_size = 0;
592 _oop_recorder = nullptr;
593 _overflow_arena = nullptr;
594 _last_insn = nullptr;
595 _last_label = nullptr;
596 _finalize_stubs = false;
597 _shared_stub_to_interp_requests = nullptr;
598 _shared_trampoline_requests = nullptr;
599
600 _consts.initialize_outer(this, SECT_CONSTS);
601 _insts.initialize_outer(this, SECT_INSTS);
602 _stubs.initialize_outer(this, SECT_STUBS);
603
604 // Default is to align on 8 bytes. A compiler can change this
605 // if larger alignment (e.g., 32-byte vector masks) is required.
606 _const_section_alignment = (int) sizeof(jdouble);
607
608 #ifndef PRODUCT
609 _decode_begin = nullptr;
610 // Collect block comments, but restrict collection to cases where a disassembly is output.
611 _collect_comments = ( PrintAssembly
612 || PrintStubCode
613 || PrintMethodHandleStubs
614 || PrintInterpreter
615 || PrintSignatureHandlers
616 || UnlockDiagnosticVMOptions
617 );
618 #endif
619 }
620
621 void initialize(address code_start, csize_t code_size) {
622 _total_start = code_start;
623 _total_size = code_size;
624 // Initialize the main section:
625 _insts.initialize(code_start, code_size);
626 assert(!_stubs.is_allocated(), "no garbage here");
627 assert(!_consts.is_allocated(), "no garbage here");
628 _oop_recorder = &_default_oop_recorder;
629 }
630
631 void initialize_section_size(CodeSection* cs, csize_t size);
632
633 // helper for CodeBuffer::expand()
634 void take_over_code_from(CodeBuffer* cs);
635
636 // ensure sections are disjoint, ordered, and contained in the blob
637 void verify_section_allocation();
638
639 // copies combined relocations to the blob, returns bytes copied
640 // (if target is null, it is a dry run only, just for sizing)
641 csize_t copy_relocations_to(CodeBlob* blob) const;
642 csize_t copy_relocations_to(address buf, csize_t buf_limit) const;
643
644 // copies combined code to the blob (assumes relocs are already in there)
645 void copy_code_to(CodeBlob* blob);
646
647 // moves code sections to new buffer (assumes relocs are already in there)
648 void relocate_code_to(CodeBuffer* cb) const;
649
650 // adjust some internal address during expand
651 void adjust_internal_address(address from, address to);
652
653 // set up a model of the final layout of my contents
654 void compute_final_layout(CodeBuffer* dest) const;
655
656 // Expand the given section so at least 'amount' is remaining.
657 // Creates a new, larger BufferBlob, and rewrites the code & relocs.
658 void expand(CodeSection* which_cs, csize_t amount);
659
660 // Helper for expand.
661 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
662
663 public:
664 // (1) code buffer referring to pre-allocated instruction memory
665 CodeBuffer(address code_start, csize_t code_size)
666 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
667 {
668 assert(code_start != nullptr, "sanity");
669 initialize_misc("static buffer");
670 initialize(code_start, code_size);
671 DEBUG_ONLY(verify_section_allocation();)
672 }
673
674 // (2) CodeBuffer referring to pre-allocated CodeBlob.
675 CodeBuffer(CodeBlob* blob);
676
677 // (3) code buffer allocating codeBlob memory for code & relocation
678 // info but with lazy initialization. The name must be something
679 // informative.
680 CodeBuffer(const char* name)
681 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
682 {
683 initialize_misc(name);
684 }
685
686 // (4) code buffer allocating codeBlob memory for code & relocation
687 // info. The name must be something informative and code_size must
688 // include both code and stubs sizes.
689 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size)
690 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
691 {
692 initialize_misc(name);
693 initialize(code_size, locs_size);
694 }
695
696 ~CodeBuffer();
697
698 // Initialize a CodeBuffer constructed using constructor 3. Using
699 // constructor 4 is equivalent to calling constructor 3 and then
700 // calling this method. It's been factored out for convenience of
701 // construction.
702 void initialize(csize_t code_size, csize_t locs_size);
703
704 CodeSection* consts() { return &_consts; }
705 CodeSection* insts() { return &_insts; }
706 CodeSection* stubs() { return &_stubs; }
707
708 const CodeSection* insts() const { return &_insts; }
709
710 // present sections in order; return null at end; consts is #0, etc.
711 CodeSection* code_section(int n) {
712 // This makes the slightly questionable but portable assumption
713 // that the various members (_consts, _insts, _stubs, etc.) are
714 // adjacent in the layout of CodeBuffer.
715 CodeSection* cs = &_consts + n;
716 assert(cs->index() == n || !cs->is_allocated(), "sanity");
717 return cs;
718 }
719 const CodeSection* code_section(int n) const { // yucky const stuff
720 return ((CodeBuffer*)this)->code_section(n);
721 }
722 static const char* code_section_name(int n);
723 int section_index_of(address addr) const;
724 bool contains(address addr) const {
725 // handy for debugging
726 return section_index_of(addr) > SECT_NONE;
727 }
728
729 // A stable mapping between 'locators' (small ints) and addresses.
730 static int locator_pos(int locator) { return locator >> sect_bits; }
731 static int locator_sect(int locator) { return locator & sect_mask; }
732 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
733 int locator(address addr) const;
734 address locator_address(int locator) const {
735 if (locator < 0) return nullptr;
736 address start = code_section(locator_sect(locator))->start();
737 return start + locator_pos(locator);
738 }
739
740 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
741 bool is_backward_branch(Label& L);
742
743 // Properties
744 const char* name() const { return _name; }
745 CodeBuffer* before_expand() const { return _before_expand; }
746 BufferBlob* blob() const { return _blob; }
747 void set_blob(BufferBlob* blob);
748 void free_blob(); // Free the blob, if we own one.
749
750 // Properties relative to the insts section:
751 address insts_begin() const { return _insts.start(); }
752 address insts_end() const { return _insts.end(); }
753 void set_insts_end(address end) { _insts.set_end(end); }
754 address insts_mark() const { return _insts.mark(); }
755 void set_insts_mark() { _insts.set_mark(); }
756
757 // is there anything in the buffer other than the current section?
758 bool is_pure() const { return insts_size() == total_content_size(); }
759
760 // size in bytes of output so far in the insts sections
761 csize_t insts_size() const { return _insts.size(); }
762
763 // same as insts_size(), except that it asserts there is no non-code here
764 csize_t pure_insts_size() const { assert(is_pure(), "no non-code");
765 return insts_size(); }
766 // capacity in bytes of the insts sections
767 csize_t insts_capacity() const { return _insts.capacity(); }
768
769 // number of bytes remaining in the insts section
770 csize_t insts_remaining() const { return _insts.remaining(); }
771
772 // is a given address in the insts section? (2nd version is end-inclusive)
773 bool insts_contains(address pc) const { return _insts.contains(pc); }
774 bool insts_contains2(address pc) const { return _insts.contains2(pc); }
775
776 // Record any extra oops required to keep embedded metadata alive
777 void finalize_oop_references(const methodHandle& method);
778
779 // Allocated size in all sections, when aligned and concatenated
780 // (this is the eventual state of the content in its final
781 // CodeBlob).
782 csize_t total_content_size() const;
783
784 // Combined offset (relative to start of first section) of given
785 // section, as eventually found in the final CodeBlob.
786 csize_t total_offset_of(const CodeSection* cs) const;
787
788 // allocated size of all relocation data, including index, rounded up
789 csize_t total_relocation_size() const;
790
791 int total_skipped_instructions_size() const;
792
793 // allocated size of any and all recorded oops
794 csize_t total_oop_size() const {
795 OopRecorder* recorder = oop_recorder();
796 return (recorder == nullptr)? 0: recorder->oop_size();
797 }
798
799 // allocated size of any and all recorded metadata
800 csize_t total_metadata_size() const {
801 OopRecorder* recorder = oop_recorder();
802 return (recorder == nullptr)? 0: recorder->metadata_size();
803 }
804
805 // Configuration functions, called immediately after the CB is constructed.
806 // The section sizes are subtracted from the original insts section.
807 // Note: Call them in reverse section order, because each steals from insts.
808 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); }
809 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); }
810 // Override default oop recorder.
811 void initialize_oop_recorder(OopRecorder* r);
812
813 OopRecorder* oop_recorder() const { return _oop_recorder; }
814
815 address last_insn() const { return _last_insn; }
816 void set_last_insn(address a) { _last_insn = a; }
817 void clear_last_insn() { set_last_insn(nullptr); }
818
819 address last_label() const { return _last_label; }
820 void set_last_label(address a) { _last_label = a; }
821
822 #ifndef PRODUCT
823 AsmRemarks &asm_remarks() { return _asm_remarks; }
824 DbgStrings &dbg_strings() { return _dbg_strings; }
825
826 void clear_strings() {
827 _asm_remarks.clear();
828 _dbg_strings.clear();
829 }
830 #endif
831
832 // Code generation
833 void relocate(address at, RelocationHolder const& rspec, int format = 0) {
834 _insts.relocate(at, rspec, format);
835 }
836 void relocate(address at, relocInfo::relocType rtype, int format = 0) {
837 _insts.relocate(at, rtype, format);
838 }
839
840 // Management of overflow storage for binding of Labels.
841 GrowableArray<int>* create_patch_overflow();
842
843 // NMethod generation
844 void copy_code_and_locs_to(CodeBlob* blob) {
845 assert(blob != nullptr, "sane");
846 copy_relocations_to(blob);
847 copy_code_to(blob);
848 }
849 void copy_values_to(nmethod* nm) {
850 if (!oop_recorder()->is_unused()) {
851 oop_recorder()->copy_values_to(nm);
852 }
853 }
854
855 void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN;
856 const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;);
857
858 // Log a little info about section usage in the CodeBuffer
859 void log_section_sizes(const char* name);
860
861 // Make a set of stubs final. It can create/optimize stubs.
862 bool finalize_stubs();
863
864 // Request for a shared stub to the interpreter
865 void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset);
866
867 void set_const_section_alignment(int align) {
868 _const_section_alignment = align_up(align, HeapWordSize);
869 }
870
871 #ifndef PRODUCT
872 public:
873 // Printing / Decoding
874 // decodes from decode_begin() to code_end() and sets decode_begin to end
875 void decode();
876 void print_on(outputStream* st);
877 #endif
878 // Directly disassemble code buffer.
879 void decode(address start, address end);
880
881 // The following header contains architecture-specific implementations
882 #include CPU_HEADER(codeBuffer)
883
884 };
885
886 // A Java method can have calls of Java methods which can be statically bound.
887 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method
888 // can share a stub to the interpreter.
889 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter.
890 class SharedStubToInterpRequest : public ResourceObj {
891 private:
892 ciMethod* _shared_method;
893 CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer
894
895 public:
896 SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
897 _call_offset(call_offset) {}
898
899 ciMethod* shared_method() const { return _shared_method; }
900 CodeBuffer::csize_t call_offset() const { return _call_offset; }
901 };
902
903 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
904 if (remaining() < amount) { _outer->expand(this, amount); return true; }
905 return false;
906 }
907
908 #endif // SHARE_ASM_CODEBUFFER_HPP