1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_ASM_CODEBUFFER_HPP
26 #define SHARE_ASM_CODEBUFFER_HPP
27
28 #include "code/oopRecorder.hpp"
29 #include "code/relocInfo.hpp"
30 #include "compiler/compiler_globals.hpp"
31 #include "runtime/os.hpp"
32 #include "utilities/align.hpp"
33 #include "utilities/debug.hpp"
34 #include "utilities/growableArray.hpp"
35 #include "utilities/linkedlist.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/resizableHashTable.hpp"
38
39 template <typename T>
40 static inline void put_native(address p, T x) {
41 memcpy((void*)p, &x, sizeof x);
42 }
43
44 class PhaseCFG;
45 class Compile;
46 class BufferBlob;
47 class CodeBuffer;
48 class Label;
49 class ciMethod;
50 class SharedStubToInterpRequest;
51
52 class CodeOffsets: public StackObj {
53 public:
54 enum Entries { Entry,
55 Verified_Entry,
56 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
57 OSR_Entry,
58 Exceptions, // Offset where exception handler lives
59 Deopt, // Offset where deopt handler lives
60 UnwindHandler, // Offset to default unwind handler
61 max_Entries };
62
63 // special value to note codeBlobs where profile (forte) stack walking is
64 // always dangerous and suspect.
65
66 enum { frame_never_safe = -1 };
67
68 private:
69 int _values[max_Entries];
70
71 public:
72 CodeOffsets() {
73 _values[Entry ] = 0;
74 _values[Verified_Entry] = 0;
75 _values[Frame_Complete] = frame_never_safe;
76 _values[OSR_Entry ] = 0;
77 _values[Exceptions ] = -1;
78 _values[Deopt ] = -1;
79 _values[UnwindHandler ] = -1;
80 }
81
82 int value(Entries e) { return _values[e]; }
83 void set_value(Entries e, int val) { _values[e] = val; }
84 };
85
86 // This class represents a stream of code and associated relocations.
87 // There are a few in each CodeBuffer.
88 // They are filled concurrently, and concatenated at the end.
89 class CodeSection {
90 friend class CodeBuffer;
91 friend class AOTCodeReader;
92 public:
93 typedef int csize_t; // code size type; would be size_t except for history
94
95 private:
96 address _start; // first byte of contents (instructions)
97 address _mark; // user mark, usually an instruction beginning
98 address _end; // current end address
99 address _limit; // last possible (allocated) end address
100 relocInfo* _locs_start; // first byte of relocation information
101 relocInfo* _locs_end; // first byte after relocation information
102 relocInfo* _locs_limit; // first byte after relocation information buf
103 address _locs_point; // last relocated position (grows upward)
104 bool _locs_own; // did I allocate the locs myself?
105 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate.
106 int _skipped_instructions_size;
107 int8_t _index; // my section number (SECT_INST, etc.)
108 CodeBuffer* _outer; // enclosing CodeBuffer
109
110 // (Note: _locs_point used to be called _last_reloc_offset.)
111
112 CodeSection() {
113 _start = nullptr;
114 _mark = nullptr;
115 _end = nullptr;
116 _limit = nullptr;
117 _locs_start = nullptr;
118 _locs_end = nullptr;
119 _locs_limit = nullptr;
120 _locs_point = nullptr;
121 _locs_own = false;
122 _scratch_emit = false;
123 _skipped_instructions_size = 0;
124 DEBUG_ONLY(_index = -1);
125 DEBUG_ONLY(_outer = (CodeBuffer*)badAddress);
126 }
127
128 void initialize_outer(CodeBuffer* outer, int8_t index) {
129 _outer = outer;
130 _index = index;
131 }
132
133 void initialize(address start, csize_t size = 0) {
134 assert(_start == nullptr, "only one init step, please");
135 _start = start;
136 _mark = nullptr;
137 _end = start;
138
139 _limit = start + size;
140 _locs_point = start;
141 }
142
143 void initialize_locs(int locs_capacity);
144 void expand_locs(int new_capacity);
145 void initialize_locs_from(const CodeSection* source_cs);
146
147 // helper for CodeBuffer::expand()
148 void take_over_code_from(CodeSection* cs) {
149 _start = cs->_start;
150 _mark = cs->_mark;
151 _end = cs->_end;
152 _limit = cs->_limit;
153 _locs_point = cs->_locs_point;
154 _skipped_instructions_size = cs->_skipped_instructions_size;
155 }
156
157 public:
158 address start() const { return _start; }
159 address mark() const { return _mark; }
160 address end() const { return _end; }
161 address limit() const { return _limit; }
162 csize_t size() const { return (csize_t)(_end - _start); }
163 csize_t mark_off() const { assert(_mark != nullptr, "not an offset");
164 return (csize_t)(_mark - _start); }
165 csize_t capacity() const { return (csize_t)(_limit - _start); }
166 csize_t remaining() const { return (csize_t)(_limit - _end); }
167
168 relocInfo* locs_start() const { return _locs_start; }
169 relocInfo* locs_end() const { return _locs_end; }
170 int locs_count() const { return (int)(_locs_end - _locs_start); }
171 relocInfo* locs_limit() const { return _locs_limit; }
172 address locs_point() const { return _locs_point; }
173 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); }
174 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
175
176 int8_t index() const { return _index; }
177 bool is_allocated() const { return _start != nullptr; }
178 bool is_empty() const { return _start == _end; }
179 bool has_locs() const { return _locs_end != nullptr; }
180
181 // Mark scratch buffer.
182 void set_scratch_emit() { _scratch_emit = true; }
183 void clear_scratch_emit() { _scratch_emit = false; }
184 bool scratch_emit() { return _scratch_emit; }
185
186 CodeBuffer* outer() const { return _outer; }
187
188 // is a given address in this section? (2nd version is end-inclusive)
189 bool contains(address pc) const { return pc >= _start && pc < _end; }
190 bool contains2(address pc) const { return pc >= _start && pc <= _end; }
191 bool allocates(address pc) const { return pc >= _start && pc < _limit; }
192 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
193
194 // checks if two CodeSections are disjoint
195 //
196 // limit is an exclusive address and can be the start of another
197 // section.
198 bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; }
199
200 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
201 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
202 _mark = pc; }
203 void set_mark() { _mark = _end; }
204 void clear_mark() { _mark = nullptr; }
205
206 void set_locs_end(relocInfo* p) {
207 assert(p <= locs_limit(), "locs data fits in allocated buffer");
208 _locs_end = p;
209 }
210 void set_locs_point(address pc) {
211 assert(pc >= locs_point(), "relocation addr may not decrease");
212 assert(allocates2(pc), "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit));
213 _locs_point = pc;
214 }
215
216 void register_skipped(int size) {
217 _skipped_instructions_size += size;
218 }
219
220 // Code emission
221 void emit_int8(uint8_t x1) {
222 address curr = end();
223 *((uint8_t*) curr++) = x1;
224 set_end(curr);
225 }
226
227 template <typename T>
228 void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); }
229
230 void emit_int16(uint16_t x) { emit_native(x); }
231 void emit_int16(uint8_t x1, uint8_t x2) {
232 address curr = end();
233 *((uint8_t*) curr++) = x1;
234 *((uint8_t*) curr++) = x2;
235 set_end(curr);
236 }
237
238 void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3) {
239 address curr = end();
240 *((uint8_t*) curr++) = x1;
241 *((uint8_t*) curr++) = x2;
242 *((uint8_t*) curr++) = x3;
243 set_end(curr);
244 }
245
246 void emit_int32(uint32_t x) { emit_native(x); }
247 void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) {
248 address curr = end();
249 *((uint8_t*) curr++) = x1;
250 *((uint8_t*) curr++) = x2;
251 *((uint8_t*) curr++) = x3;
252 *((uint8_t*) curr++) = x4;
253 set_end(curr);
254 }
255
256 void emit_int64(uint64_t x) { emit_native(x); }
257 void emit_float(jfloat x) { emit_native(x); }
258 void emit_double(jdouble x) { emit_native(x); }
259 void emit_address(address x) { emit_native(x); }
260
261 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
262 void initialize_shared_locs(relocInfo* buf, int length);
263
264 // Manage labels and their addresses.
265 address target(Label& L, address branch_pc);
266
267 // Emit a relocation.
268 void relocate(address at, RelocationHolder const& rspec, int format = 0);
269 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0);
270
271 int alignment() const;
272
273 // Slop between sections, used only when allocating temporary BufferBlob buffers.
274 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
275
276 csize_t align_at_start(csize_t off) const {
277 return (csize_t) align_up(off, alignment());
278 }
279
280 // Ensure there's enough space left in the current section.
281 // Return true if there was an expansion.
282 bool maybe_expand_to_ensure_remaining(csize_t amount);
283
284 #ifndef PRODUCT
285 void decode();
286 void print_on(outputStream* st, const char* name);
287 #endif //PRODUCT
288 };
289
290
291 #ifndef PRODUCT
292
293 // ----- CHeapString -----------------------------------------------------------
294
295 class CHeapString : public CHeapObj<mtCode> {
296 public:
297 CHeapString(const char* str) : _string(os::strdup(str)) {}
298 ~CHeapString();
299 const char* string() const { return _string; }
300
301 private:
302 const char* _string;
303 };
304
305 // ----- AsmRemarkCollection ---------------------------------------------------
306
307 class AsmRemarkCollection : public CHeapObj<mtCode> {
308 public:
309 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {}
310 ~AsmRemarkCollection() {
311 assert(is_empty(), "Must 'clear()' before deleting!");
312 assert(_ref_cnt == 0, "No uses must remain when deleting!");
313 }
314 AsmRemarkCollection* reuse() {
315 precond(_ref_cnt > 0);
316 return _ref_cnt++, this;
317 }
318
319 const char* insert(uint offset, const char* remark);
320 const char* lookup(uint offset) const;
321 const char* next(uint offset) const;
322
323 bool is_empty() const { return _remarks == nullptr; }
324 uint clear();
325
326 template<typename Function>
327 bool iterate(Function function) const { // lambda enabled API
328 if (_remarks != nullptr) {
329 Cell* tmp = _remarks;
330 do {
331 if(!function(tmp->offset, tmp->string())) {
332 return false;
333 }
334 tmp = tmp->next;
335 } while (tmp != _remarks);
336 }
337 return true;
338 }
339
340 private:
341 struct Cell : CHeapString {
342 Cell(const char* remark, uint offset) :
343 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {}
344 void push_back(Cell* cell) {
345 Cell* head = this;
346 Cell* tail = prev;
347 tail->next = cell;
348 cell->next = head;
349 cell->prev = tail;
350 prev = cell;
351 }
352 uint offset;
353 Cell* prev;
354 Cell* next;
355 };
356 uint _ref_cnt;
357 Cell* _remarks;
358 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that
359 // does not change the state of the list per se), supportig a simplistic
360 // iteration scheme.
361 mutable Cell* _next;
362 };
363
364 // ----- DbgStringCollection ---------------------------------------------------
365
366 class DbgStringCollection : public CHeapObj<mtCode> {
367 public:
368 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {}
369 ~DbgStringCollection() {
370 assert(is_empty(), "Must 'clear()' before deleting!");
371 assert(_ref_cnt == 0, "No uses must remain when deleting!");
372 }
373 DbgStringCollection* reuse() {
374 precond(_ref_cnt > 0);
375 return _ref_cnt++, this;
376 }
377
378 const char* insert(const char* str);
379 const char* lookup(const char* str) const;
380
381 bool is_empty() const { return _strings == nullptr; }
382 uint clear();
383
384 template<typename Function>
385 bool iterate(Function function) const { // lambda enabled API
386 if (_strings != nullptr) {
387 Cell* tmp = _strings;
388 do {
389 if (!function(tmp->string())) {
390 return false;
391 }
392 tmp = tmp->next;
393 } while (tmp != _strings);
394 }
395 return true;
396 }
397
398 private:
399 struct Cell : CHeapString {
400 Cell(const char* dbgstr) :
401 CHeapString(dbgstr), prev(nullptr), next(nullptr) {}
402 void push_back(Cell* cell) {
403 Cell* head = this;
404 Cell* tail = prev;
405 tail->next = cell;
406 cell->next = head;
407 cell->prev = tail;
408 prev = cell;
409 }
410 Cell* prev;
411 Cell* next;
412 };
413 uint _ref_cnt;
414 Cell* _strings;
415 };
416
417 // The assumption made here is that most code remarks (or comments) added to
418 // the generated assembly code are unique, i.e. there is very little gain in
419 // trying to share the strings between the different offsets tracked in a
420 // buffer (or blob).
421
422 class AsmRemarks {
423 public:
424 AsmRemarks();
425 ~AsmRemarks();
426
427 void init();
428
429 const char* insert(uint offset, const char* remstr);
430
431 bool is_empty() const;
432
433 void share(const AsmRemarks &src);
434 void clear();
435 // Clear Collection refence when storing AOT code,
436 // new one will be created during AOT code load.
437 void clear_ref() { _remarks = nullptr; }
438 uint print(uint offset, outputStream* strm = tty) const;
439
440 // For testing purposes only.
441 const AsmRemarkCollection* ref() const { return _remarks; }
442
443 template<typename Function>
444 inline bool iterate(Function function) const { return _remarks->iterate(function); }
445
446 private:
447 AsmRemarkCollection* _remarks;
448 };
449
450 // The assumption made here is that the number of debug strings (with a fixed
451 // address requirement) is a rather small set per compilation unit.
452
453 class DbgStrings {
454 public:
455 DbgStrings();
456 ~DbgStrings();
457
458 void init();
459
460 const char* insert(const char* dbgstr);
461
462 bool is_empty() const;
463
464 void share(const DbgStrings &src);
465 void clear();
466 // Clear Collection refence when storing AOT code,
467 // new one will be created during AOT code load.
468 void clear_ref() { _strings = nullptr; }
469
470 // For testing purposes only.
471 const DbgStringCollection* ref() const { return _strings; }
472
473 template<typename Function>
474 bool iterate(Function function) const { return _strings->iterate(function); }
475
476 private:
477 DbgStringCollection* _strings;
478 };
479 #endif // not PRODUCT
480
481
482 #ifdef ASSERT
483 #include "utilities/copy.hpp"
484
485 class Scrubber {
486 public:
487 Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {}
488 ~Scrubber() {
489 Copy::fill_to_bytes(_addr, _size, badResourceValue);
490 }
491 private:
492 void* _addr;
493 size_t _size;
494 };
495 #endif // ASSERT
496
497 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests;
498
499 // A CodeBuffer describes a memory space into which assembly
500 // code is generated. This memory space usually occupies the
501 // interior of a single BufferBlob, but in some cases it may be
502 // an arbitrary span of memory, even outside the code cache.
503 //
504 // A code buffer comes in two variants:
505 //
506 // (1) A CodeBuffer referring to an already allocated piece of memory:
507 // This is used to direct 'static' code generation (e.g. for interpreter
508 // or stubroutine generation, etc.). This code comes with NO relocation
509 // information.
510 //
511 // (2) A CodeBuffer referring to a piece of memory allocated when the
512 // CodeBuffer is allocated. This is used for nmethod generation.
513 //
514 // The memory can be divided up into several parts called sections.
515 // Each section independently accumulates code (or data) an relocations.
516 // Sections can grow (at the expense of a reallocation of the BufferBlob
517 // and recopying of all active sections). When the buffered code is finally
518 // written to an nmethod (or other CodeBlob), the contents (code, data,
519 // and relocations) of the sections are padded to an alignment and concatenated.
520 // Instructions and data in one section can contain relocatable references to
521 // addresses in a sibling section.
522
523 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
524 friend class CodeSection;
525 friend class StubCodeGenerator;
526 friend class AOTCodeReader;
527
528 private:
529 // CodeBuffers must be allocated on the stack except for a single
530 // special case during expansion which is handled internally. This
531 // is done to guarantee proper cleanup of resources.
532 void* operator new(size_t size) throw() { return resource_allocate_bytes(size); }
533 void operator delete(void* p) { ShouldNotCallThis(); }
534
535 public:
536 typedef int csize_t; // code size type; would be size_t except for history
537 enum : int8_t {
538 // Here is the list of all possible sections. The order reflects
539 // the final layout.
540 SECT_FIRST = 0,
541 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc.
542 SECT_INSTS, // Executable instructions.
543 SECT_STUBS, // Outbound trampolines for supporting call sites.
544 SECT_LIMIT, SECT_NONE = -1
545 };
546
547 typedef LinkedListImpl<int> Offsets;
548 typedef ResizeableHashTable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests;
549
550 private:
551 enum {
552 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits))
553 sect_mask = (1<<sect_bits)-1
554 };
555
556 const char* _name;
557
558 CodeSection _consts; // constants, jump tables
559 CodeSection _insts; // instructions (the main section)
560 CodeSection _stubs; // stubs (call site support), deopt, exception handling
561
562 CodeBuffer* _before_expand; // dead buffer, from before the last expansion
563
564 BufferBlob* _blob; // optional buffer in CodeCache for generated code
565 address _total_start; // first address of combined memory buffer
566 csize_t _total_size; // size in bytes of combined memory buffer
567
568 OopRecorder* _oop_recorder;
569
570 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
571 Arena* _overflow_arena;
572
573 address _last_insn; // used to merge consecutive memory barriers, loads or stores.
574 address _last_label; // record last bind label address, it's also the start of current bb.
575
576 SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs
577 SharedTrampolineRequests* _shared_trampoline_requests; // used to collect requests for shared trampolines
578 bool _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final.
579
580 int _const_section_alignment;
581
582 #ifndef PRODUCT
583 AsmRemarks _asm_remarks;
584 DbgStrings _dbg_strings;
585 bool _collect_comments; // Indicate if we need to collect block comments at all.
586 address _decode_begin; // start address for decode
587 address decode_begin();
588 #endif
589
590 void initialize_misc(const char * name) {
591 // all pointers other than code_start/end and those inside the sections
592 assert(name != nullptr, "must have a name");
593 _name = name;
594 _before_expand = nullptr;
595 _blob = nullptr;
596 _total_start = nullptr;
597 _total_size = 0;
598 _oop_recorder = nullptr;
599 _overflow_arena = nullptr;
600 _last_insn = nullptr;
601 _last_label = nullptr;
602 _finalize_stubs = false;
603 _shared_stub_to_interp_requests = nullptr;
604 _shared_trampoline_requests = nullptr;
605
606 _consts.initialize_outer(this, SECT_CONSTS);
607 _insts.initialize_outer(this, SECT_INSTS);
608 _stubs.initialize_outer(this, SECT_STUBS);
609
610 // Default is to align on 8 bytes. A compiler can change this
611 // if larger alignment (e.g., 32-byte vector masks) is required.
612 _const_section_alignment = (int) sizeof(jdouble);
613
614 #ifndef PRODUCT
615 _decode_begin = nullptr;
616 // Collect block comments, but restrict collection to cases where a disassembly is output.
617 _collect_comments = ( PrintAssembly
618 || PrintStubCode
619 || PrintMethodHandleStubs
620 || PrintInterpreter
621 || PrintSignatureHandlers
622 || UnlockDiagnosticVMOptions
623 );
624 #endif
625 }
626
627 void initialize(address code_start, csize_t code_size) {
628 _total_start = code_start;
629 _total_size = code_size;
630 // Initialize the main section:
631 _insts.initialize(code_start, code_size);
632 assert(!_stubs.is_allocated(), "no garbage here");
633 assert(!_consts.is_allocated(), "no garbage here");
634 _oop_recorder = &_default_oop_recorder;
635 }
636
637 void initialize_section_size(CodeSection* cs, csize_t size);
638
639 // helper for CodeBuffer::expand()
640 void take_over_code_from(CodeBuffer* cs);
641
642 // ensure sections are disjoint, ordered, and contained in the blob
643 void verify_section_allocation();
644
645 // copies combined relocations to the blob, returns bytes copied
646 // (if target is null, it is a dry run only, just for sizing)
647 csize_t copy_relocations_to(CodeBlob* blob) const;
648 csize_t copy_relocations_to(address buf, csize_t buf_limit) const;
649
650 // copies combined code to the blob (assumes relocs are already in there)
651 void copy_code_to(CodeBlob* blob);
652
653 // moves code sections to new buffer (assumes relocs are already in there)
654 void relocate_code_to(CodeBuffer* cb) const;
655
656 // adjust some internal address during expand
657 void adjust_internal_address(address from, address to);
658
659 // set up a model of the final layout of my contents
660 void compute_final_layout(CodeBuffer* dest) const;
661
662 // Expand the given section so at least 'amount' is remaining.
663 // Creates a new, larger BufferBlob, and rewrites the code & relocs.
664 void expand(CodeSection* which_cs, csize_t amount);
665
666 // Helper for expand.
667 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
668
669 public:
670 // (1) code buffer referring to pre-allocated instruction memory
671 CodeBuffer(address code_start, csize_t code_size)
672 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
673 {
674 assert(code_start != nullptr, "sanity");
675 initialize_misc("static buffer");
676 initialize(code_start, code_size);
677 DEBUG_ONLY(verify_section_allocation();)
678 }
679
680 // (2) CodeBuffer referring to pre-allocated CodeBlob.
681 CodeBuffer(CodeBlob* blob);
682
683 // (3) code buffer allocating codeBlob memory for code & relocation
684 // info but with lazy initialization. The name must be something
685 // informative.
686 CodeBuffer(const char* name)
687 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
688 {
689 initialize_misc(name);
690 }
691
692 // (4) code buffer allocating codeBlob memory for code & relocation
693 // info. The name must be something informative and code_size must
694 // include both code and stubs sizes.
695 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size)
696 DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
697 {
698 initialize_misc(name);
699 initialize(code_size, locs_size);
700 }
701
702 ~CodeBuffer();
703
704 // Initialize a CodeBuffer constructed using constructor 3. Using
705 // constructor 4 is equivalent to calling constructor 3 and then
706 // calling this method. It's been factored out for convenience of
707 // construction.
708 void initialize(csize_t code_size, csize_t locs_size);
709
710 CodeSection* consts() { return &_consts; }
711 CodeSection* insts() { return &_insts; }
712 CodeSection* stubs() { return &_stubs; }
713
714 const CodeSection* insts() const { return &_insts; }
715
716 // present sections in order; return null at end; consts is #0, etc.
717 CodeSection* code_section(int n) {
718 // This makes the slightly questionable but portable assumption
719 // that the various members (_consts, _insts, _stubs, etc.) are
720 // adjacent in the layout of CodeBuffer.
721 CodeSection* cs = &_consts + n;
722 assert(cs->index() == n || !cs->is_allocated(), "sanity");
723 return cs;
724 }
725 const CodeSection* code_section(int n) const { // yucky const stuff
726 return ((CodeBuffer*)this)->code_section(n);
727 }
728 static const char* code_section_name(int n);
729 int section_index_of(address addr) const;
730 bool contains(address addr) const {
731 // handy for debugging
732 return section_index_of(addr) > SECT_NONE;
733 }
734
735 // A stable mapping between 'locators' (small ints) and addresses.
736 static int locator_pos(int locator) { return locator >> sect_bits; }
737 static int locator_sect(int locator) { return locator & sect_mask; }
738 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
739 int locator(address addr) const;
740 address locator_address(int locator) const {
741 if (locator < 0) return nullptr;
742 address start = code_section(locator_sect(locator))->start();
743 return start + locator_pos(locator);
744 }
745
746 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
747 bool is_backward_branch(Label& L);
748
749 // Properties
750 const char* name() const { return _name; }
751 CodeBuffer* before_expand() const { return _before_expand; }
752 BufferBlob* blob() const { return _blob; }
753 void set_blob(BufferBlob* blob);
754 void free_blob(); // Free the blob, if we own one.
755
756 // Properties relative to the insts section:
757 address insts_begin() const { return _insts.start(); }
758 address insts_end() const { return _insts.end(); }
759 void set_insts_end(address end) { _insts.set_end(end); }
760 address insts_mark() const { return _insts.mark(); }
761 void set_insts_mark() { _insts.set_mark(); }
762
763 // is there anything in the buffer other than the current section?
764 bool is_pure() const { return insts_size() == total_content_size(); }
765
766 // size in bytes of output so far in the insts sections
767 csize_t insts_size() const { return _insts.size(); }
768
769 // same as insts_size(), except that it asserts there is no non-code here
770 csize_t pure_insts_size() const { assert(is_pure(), "no non-code");
771 return insts_size(); }
772 // capacity in bytes of the insts sections
773 csize_t insts_capacity() const { return _insts.capacity(); }
774
775 // number of bytes remaining in the insts section
776 csize_t insts_remaining() const { return _insts.remaining(); }
777
778 // is a given address in the insts section? (2nd version is end-inclusive)
779 bool insts_contains(address pc) const { return _insts.contains(pc); }
780 bool insts_contains2(address pc) const { return _insts.contains2(pc); }
781
782 // Record any extra oops required to keep embedded metadata alive
783 void finalize_oop_references(const methodHandle& method);
784
785 // Allocated size in all sections, when aligned and concatenated
786 // (this is the eventual state of the content in its final
787 // CodeBlob).
788 csize_t total_content_size() const;
789
790 // Combined offset (relative to start of first section) of given
791 // section, as eventually found in the final CodeBlob.
792 csize_t total_offset_of(const CodeSection* cs) const;
793
794 // allocated size of all relocation data, including index, rounded up
795 csize_t total_relocation_size() const;
796
797 int total_skipped_instructions_size() const;
798
799 // allocated size of any and all recorded oops
800 csize_t total_oop_size() const {
801 OopRecorder* recorder = oop_recorder();
802 return (recorder == nullptr)? 0: recorder->oop_size();
803 }
804
805 // allocated size of any and all recorded metadata
806 csize_t total_metadata_size() const {
807 OopRecorder* recorder = oop_recorder();
808 return (recorder == nullptr)? 0: recorder->metadata_size();
809 }
810
811 // Configuration functions, called immediately after the CB is constructed.
812 // The section sizes are subtracted from the original insts section.
813 // Note: Call them in reverse section order, because each steals from insts.
814 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); }
815 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); }
816 // Override default oop recorder.
817 void initialize_oop_recorder(OopRecorder* r);
818
819 OopRecorder* oop_recorder() const { return _oop_recorder; }
820
821 address last_insn() const { return _last_insn; }
822 void set_last_insn(address a) { _last_insn = a; }
823 void clear_last_insn() { set_last_insn(nullptr); }
824
825 address last_label() const { return _last_label; }
826 void set_last_label(address a) { _last_label = a; }
827
828 #ifndef PRODUCT
829 AsmRemarks &asm_remarks() { return _asm_remarks; }
830 DbgStrings &dbg_strings() { return _dbg_strings; }
831
832 void clear_strings() {
833 _asm_remarks.clear();
834 _dbg_strings.clear();
835 }
836 #endif
837
838 // Code generation
839 void relocate(address at, RelocationHolder const& rspec, int format = 0) {
840 _insts.relocate(at, rspec, format);
841 }
842 void relocate(address at, relocInfo::relocType rtype, int format = 0) {
843 _insts.relocate(at, rtype, format);
844 }
845
846 // Management of overflow storage for binding of Labels.
847 GrowableArray<int>* create_patch_overflow();
848
849 // NMethod generation
850 void copy_code_and_locs_to(CodeBlob* blob) {
851 assert(blob != nullptr, "sane");
852 copy_relocations_to(blob);
853 copy_code_to(blob);
854 }
855 void copy_values_to(nmethod* nm) {
856 if (!oop_recorder()->is_unused()) {
857 oop_recorder()->copy_values_to(nm);
858 }
859 }
860
861 void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN;
862 const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;);
863
864 // Log a little info about section usage in the CodeBuffer
865 void log_section_sizes(const char* name);
866
867 // Make a set of stubs final. It can create/optimize stubs.
868 bool finalize_stubs();
869
870 // Request for a shared stub to the interpreter
871 void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset);
872
873 void set_const_section_alignment(int align) {
874 _const_section_alignment = align_up(align, HeapWordSize);
875 }
876
877 #ifndef PRODUCT
878 public:
879 // Printing / Decoding
880 // decodes from decode_begin() to code_end() and sets decode_begin to end
881 void decode();
882 void print_on(outputStream* st);
883 #endif
884 // Directly disassemble code buffer.
885 void decode(address start, address end);
886
887 // The following header contains architecture-specific implementations
888 #include CPU_HEADER(codeBuffer)
889
890 };
891
892 // A Java method can have calls of Java methods which can be statically bound.
893 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method
894 // can share a stub to the interpreter.
895 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter.
896 class SharedStubToInterpRequest : public ResourceObj {
897 private:
898 ciMethod* _shared_method;
899 CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer
900
901 public:
902 SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
903 _call_offset(call_offset) {}
904
905 ciMethod* shared_method() const { return _shared_method; }
906 CodeBuffer::csize_t call_offset() const { return _call_offset; }
907 };
908
909 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
910 if (remaining() < amount) { _outer->expand(this, amount); return true; }
911 return false;
912 }
913
914 #endif // SHARE_ASM_CODEBUFFER_HPP