1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_ASM_CODEBUFFER_HPP 26 #define SHARE_ASM_CODEBUFFER_HPP 27 28 #include "code/oopRecorder.hpp" 29 #include "code/relocInfo.hpp" 30 #include "compiler/compiler_globals.hpp" 31 #include "runtime/os.hpp" 32 #include "utilities/align.hpp" 33 #include "utilities/debug.hpp" 34 #include "utilities/growableArray.hpp" 35 #include "utilities/linkedlist.hpp" 36 #include "utilities/resizeableResourceHash.hpp" 37 #include "utilities/macros.hpp" 38 39 template <typename T> 40 static inline void put_native(address p, T x) { 41 memcpy((void*)p, &x, sizeof x); 42 } 43 44 class PhaseCFG; 45 class Compile; 46 class BufferBlob; 47 class CodeBuffer; 48 class Label; 49 class ciMethod; 50 class SharedStubToInterpRequest; 51 52 class CodeOffsets: public StackObj { 53 public: 54 enum Entries { Entry, 55 Verified_Entry, 56 Inline_Entry, 57 Verified_Inline_Entry, 58 Verified_Inline_Entry_RO, 59 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete 60 OSR_Entry, 61 Exceptions, // Offset where exception handler lives 62 Deopt, // Offset where deopt handler lives 63 DeoptMH, // Offset where MethodHandle deopt handler lives 64 UnwindHandler, // Offset to default unwind handler 65 max_Entries }; 66 67 // special value to note codeBlobs where profile (forte) stack walking is 68 // always dangerous and suspect. 69 70 enum { frame_never_safe = -1 }; 71 72 private: 73 int _values[max_Entries]; 74 void check(int e) const { assert(0 <= e && e < max_Entries, "must be"); } 75 76 public: 77 CodeOffsets() { 78 _values[Entry ] = 0; 79 _values[Verified_Entry] = 0; 80 _values[Inline_Entry ] = 0; 81 _values[Verified_Inline_Entry] = -1; 82 _values[Verified_Inline_Entry_RO] = -1; 83 _values[Frame_Complete] = frame_never_safe; 84 _values[OSR_Entry ] = 0; 85 _values[Exceptions ] = -1; 86 _values[Deopt ] = -1; 87 _values[DeoptMH ] = -1; 88 _values[UnwindHandler ] = -1; 89 } 90 91 int value(Entries e) const { check(e); return _values[e]; } 92 void set_value(Entries e, int val) { check(e); _values[e] = val; } 93 }; 94 95 // This class represents a stream of code and associated relocations. 96 // There are a few in each CodeBuffer. 97 // They are filled concurrently, and concatenated at the end. 98 class CodeSection { 99 friend class CodeBuffer; 100 friend class AOTCodeReader; 101 public: 102 typedef int csize_t; // code size type; would be size_t except for history 103 104 private: 105 address _start; // first byte of contents (instructions) 106 address _mark; // user mark, usually an instruction beginning 107 address _end; // current end address 108 address _limit; // last possible (allocated) end address 109 relocInfo* _locs_start; // first byte of relocation information 110 relocInfo* _locs_end; // first byte after relocation information 111 relocInfo* _locs_limit; // first byte after relocation information buf 112 address _locs_point; // last relocated position (grows upward) 113 bool _locs_own; // did I allocate the locs myself? 114 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate. 115 int _skipped_instructions_size; 116 int8_t _index; // my section number (SECT_INST, etc.) 117 CodeBuffer* _outer; // enclosing CodeBuffer 118 119 // (Note: _locs_point used to be called _last_reloc_offset.) 120 121 CodeSection() { 122 _start = nullptr; 123 _mark = nullptr; 124 _end = nullptr; 125 _limit = nullptr; 126 _locs_start = nullptr; 127 _locs_end = nullptr; 128 _locs_limit = nullptr; 129 _locs_point = nullptr; 130 _locs_own = false; 131 _scratch_emit = false; 132 _skipped_instructions_size = 0; 133 DEBUG_ONLY(_index = -1); 134 DEBUG_ONLY(_outer = (CodeBuffer*)badAddress); 135 } 136 137 void initialize_outer(CodeBuffer* outer, int8_t index) { 138 _outer = outer; 139 _index = index; 140 } 141 142 void initialize(address start, csize_t size = 0) { 143 assert(_start == nullptr, "only one init step, please"); 144 _start = start; 145 _mark = nullptr; 146 _end = start; 147 148 _limit = start + size; 149 _locs_point = start; 150 } 151 152 void initialize_locs(int locs_capacity); 153 void expand_locs(int new_capacity); 154 void initialize_locs_from(const CodeSection* source_cs); 155 156 // helper for CodeBuffer::expand() 157 void take_over_code_from(CodeSection* cs) { 158 _start = cs->_start; 159 _mark = cs->_mark; 160 _end = cs->_end; 161 _limit = cs->_limit; 162 _locs_point = cs->_locs_point; 163 _skipped_instructions_size = cs->_skipped_instructions_size; 164 } 165 166 public: 167 address start() const { return _start; } 168 address mark() const { return _mark; } 169 address end() const { return _end; } 170 address limit() const { return _limit; } 171 csize_t size() const { return (csize_t)(_end - _start); } 172 csize_t mark_off() const { assert(_mark != nullptr, "not an offset"); 173 return (csize_t)(_mark - _start); } 174 csize_t capacity() const { return (csize_t)(_limit - _start); } 175 csize_t remaining() const { return (csize_t)(_limit - _end); } 176 177 relocInfo* locs_start() const { return _locs_start; } 178 relocInfo* locs_end() const { return _locs_end; } 179 int locs_count() const { return (int)(_locs_end - _locs_start); } 180 relocInfo* locs_limit() const { return _locs_limit; } 181 address locs_point() const { return _locs_point; } 182 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); } 183 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); } 184 185 int8_t index() const { return _index; } 186 bool is_allocated() const { return _start != nullptr; } 187 bool is_empty() const { return _start == _end; } 188 bool has_locs() const { return _locs_end != nullptr; } 189 190 // Mark scratch buffer. 191 void set_scratch_emit() { _scratch_emit = true; } 192 void clear_scratch_emit() { _scratch_emit = false; } 193 bool scratch_emit() { return _scratch_emit; } 194 195 CodeBuffer* outer() const { return _outer; } 196 197 // is a given address in this section? (2nd version is end-inclusive) 198 bool contains(address pc) const { return pc >= _start && pc < _end; } 199 bool contains2(address pc) const { return pc >= _start && pc <= _end; } 200 bool allocates(address pc) const { return pc >= _start && pc < _limit; } 201 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; } 202 203 // checks if two CodeSections are disjoint 204 // 205 // limit is an exclusive address and can be the start of another 206 // section. 207 bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; } 208 209 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; } 210 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer"); 211 _mark = pc; } 212 void set_mark() { _mark = _end; } 213 void clear_mark() { _mark = nullptr; } 214 215 void set_locs_end(relocInfo* p) { 216 assert(p <= locs_limit(), "locs data fits in allocated buffer"); 217 _locs_end = p; 218 } 219 void set_locs_point(address pc) { 220 assert(pc >= locs_point(), "relocation addr may not decrease"); 221 assert(allocates2(pc), "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit)); 222 _locs_point = pc; 223 } 224 225 void register_skipped(int size) { 226 _skipped_instructions_size += size; 227 } 228 229 // Code emission 230 void emit_int8(uint8_t x1) { 231 address curr = end(); 232 *((uint8_t*) curr++) = x1; 233 set_end(curr); 234 } 235 236 template <typename T> 237 void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); } 238 239 void emit_int16(uint16_t x) { emit_native(x); } 240 void emit_int16(uint8_t x1, uint8_t x2) { 241 address curr = end(); 242 *((uint8_t*) curr++) = x1; 243 *((uint8_t*) curr++) = x2; 244 set_end(curr); 245 } 246 247 void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3) { 248 address curr = end(); 249 *((uint8_t*) curr++) = x1; 250 *((uint8_t*) curr++) = x2; 251 *((uint8_t*) curr++) = x3; 252 set_end(curr); 253 } 254 255 void emit_int32(uint32_t x) { emit_native(x); } 256 void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) { 257 address curr = end(); 258 *((uint8_t*) curr++) = x1; 259 *((uint8_t*) curr++) = x2; 260 *((uint8_t*) curr++) = x3; 261 *((uint8_t*) curr++) = x4; 262 set_end(curr); 263 } 264 265 void emit_int64(uint64_t x) { emit_native(x); } 266 void emit_float(jfloat x) { emit_native(x); } 267 void emit_double(jdouble x) { emit_native(x); } 268 void emit_address(address x) { emit_native(x); } 269 270 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.) 271 void initialize_shared_locs(relocInfo* buf, int length); 272 273 // Manage labels and their addresses. 274 address target(Label& L, address branch_pc); 275 276 // Emit a relocation. 277 void relocate(address at, RelocationHolder const& rspec, int format = 0); 278 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0); 279 280 int alignment() const; 281 282 // Slop between sections, used only when allocating temporary BufferBlob buffers. 283 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } 284 285 csize_t align_at_start(csize_t off) const { 286 return (csize_t) align_up(off, alignment()); 287 } 288 289 // Ensure there's enough space left in the current section. 290 // Return true if there was an expansion. 291 bool maybe_expand_to_ensure_remaining(csize_t amount); 292 293 #ifndef PRODUCT 294 void decode(); 295 void print_on(outputStream* st, const char* name); 296 #endif //PRODUCT 297 }; 298 299 300 #ifndef PRODUCT 301 302 // ----- CHeapString ----------------------------------------------------------- 303 304 class CHeapString : public CHeapObj<mtCode> { 305 public: 306 CHeapString(const char* str) : _string(os::strdup(str)) {} 307 ~CHeapString(); 308 const char* string() const { return _string; } 309 310 private: 311 const char* _string; 312 }; 313 314 // ----- AsmRemarkCollection --------------------------------------------------- 315 316 class AsmRemarkCollection : public CHeapObj<mtCode> { 317 public: 318 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {} 319 ~AsmRemarkCollection() { 320 assert(is_empty(), "Must 'clear()' before deleting!"); 321 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 322 } 323 AsmRemarkCollection* reuse() { 324 precond(_ref_cnt > 0); 325 return _ref_cnt++, this; 326 } 327 328 const char* insert(uint offset, const char* remark); 329 const char* lookup(uint offset) const; 330 const char* next(uint offset) const; 331 332 bool is_empty() const { return _remarks == nullptr; } 333 uint clear(); 334 335 template<typename Function> 336 bool iterate(Function function) const { // lambda enabled API 337 if (_remarks != nullptr) { 338 Cell* tmp = _remarks; 339 do { 340 if(!function(tmp->offset, tmp->string())) { 341 return false; 342 } 343 tmp = tmp->next; 344 } while (tmp != _remarks); 345 } 346 return true; 347 } 348 349 private: 350 struct Cell : CHeapString { 351 Cell(const char* remark, uint offset) : 352 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {} 353 void push_back(Cell* cell) { 354 Cell* head = this; 355 Cell* tail = prev; 356 tail->next = cell; 357 cell->next = head; 358 cell->prev = tail; 359 prev = cell; 360 } 361 uint offset; 362 Cell* prev; 363 Cell* next; 364 }; 365 uint _ref_cnt; 366 Cell* _remarks; 367 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that 368 // does not change the state of the list per se), supportig a simplistic 369 // iteration scheme. 370 mutable Cell* _next; 371 }; 372 373 // ----- DbgStringCollection --------------------------------------------------- 374 375 class DbgStringCollection : public CHeapObj<mtCode> { 376 public: 377 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {} 378 ~DbgStringCollection() { 379 assert(is_empty(), "Must 'clear()' before deleting!"); 380 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 381 } 382 DbgStringCollection* reuse() { 383 precond(_ref_cnt > 0); 384 return _ref_cnt++, this; 385 } 386 387 const char* insert(const char* str); 388 const char* lookup(const char* str) const; 389 390 bool is_empty() const { return _strings == nullptr; } 391 uint clear(); 392 393 template<typename Function> 394 bool iterate(Function function) const { // lambda enabled API 395 if (_strings != nullptr) { 396 Cell* tmp = _strings; 397 do { 398 if (!function(tmp->string())) { 399 return false; 400 } 401 tmp = tmp->next; 402 } while (tmp != _strings); 403 } 404 return true; 405 } 406 407 private: 408 struct Cell : CHeapString { 409 Cell(const char* dbgstr) : 410 CHeapString(dbgstr), prev(nullptr), next(nullptr) {} 411 void push_back(Cell* cell) { 412 Cell* head = this; 413 Cell* tail = prev; 414 tail->next = cell; 415 cell->next = head; 416 cell->prev = tail; 417 prev = cell; 418 } 419 Cell* prev; 420 Cell* next; 421 }; 422 uint _ref_cnt; 423 Cell* _strings; 424 }; 425 426 // The assumption made here is that most code remarks (or comments) added to 427 // the generated assembly code are unique, i.e. there is very little gain in 428 // trying to share the strings between the different offsets tracked in a 429 // buffer (or blob). 430 431 class AsmRemarks { 432 public: 433 AsmRemarks(); 434 ~AsmRemarks(); 435 436 void init(); 437 438 const char* insert(uint offset, const char* remstr); 439 440 bool is_empty() const; 441 442 void share(const AsmRemarks &src); 443 void clear(); 444 uint print(uint offset, outputStream* strm = tty) const; 445 446 // For testing purposes only. 447 const AsmRemarkCollection* ref() const { return _remarks; } 448 449 template<typename Function> 450 inline bool iterate(Function function) const { return _remarks->iterate(function); } 451 452 private: 453 AsmRemarkCollection* _remarks; 454 }; 455 456 // The assumption made here is that the number of debug strings (with a fixed 457 // address requirement) is a rather small set per compilation unit. 458 459 class DbgStrings { 460 public: 461 DbgStrings(); 462 ~DbgStrings(); 463 464 void init(); 465 466 const char* insert(const char* dbgstr); 467 468 bool is_empty() const; 469 470 void share(const DbgStrings &src); 471 void clear(); 472 473 // For testing purposes only. 474 const DbgStringCollection* ref() const { return _strings; } 475 476 template<typename Function> 477 bool iterate(Function function) const { return _strings->iterate(function); } 478 479 private: 480 DbgStringCollection* _strings; 481 }; 482 #endif // not PRODUCT 483 484 485 #ifdef ASSERT 486 #include "utilities/copy.hpp" 487 488 class Scrubber { 489 public: 490 Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {} 491 ~Scrubber() { 492 Copy::fill_to_bytes(_addr, _size, badResourceValue); 493 } 494 private: 495 void* _addr; 496 size_t _size; 497 }; 498 #endif // ASSERT 499 500 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests; 501 502 // A CodeBuffer describes a memory space into which assembly 503 // code is generated. This memory space usually occupies the 504 // interior of a single BufferBlob, but in some cases it may be 505 // an arbitrary span of memory, even outside the code cache. 506 // 507 // A code buffer comes in two variants: 508 // 509 // (1) A CodeBuffer referring to an already allocated piece of memory: 510 // This is used to direct 'static' code generation (e.g. for interpreter 511 // or stubroutine generation, etc.). This code comes with NO relocation 512 // information. 513 // 514 // (2) A CodeBuffer referring to a piece of memory allocated when the 515 // CodeBuffer is allocated. This is used for nmethod generation. 516 // 517 // The memory can be divided up into several parts called sections. 518 // Each section independently accumulates code (or data) an relocations. 519 // Sections can grow (at the expense of a reallocation of the BufferBlob 520 // and recopying of all active sections). When the buffered code is finally 521 // written to an nmethod (or other CodeBlob), the contents (code, data, 522 // and relocations) of the sections are padded to an alignment and concatenated. 523 // Instructions and data in one section can contain relocatable references to 524 // addresses in a sibling section. 525 526 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) { 527 friend class CodeSection; 528 friend class StubCodeGenerator; 529 friend class AOTCodeReader; 530 531 private: 532 // CodeBuffers must be allocated on the stack except for a single 533 // special case during expansion which is handled internally. This 534 // is done to guarantee proper cleanup of resources. 535 void* operator new(size_t size) throw() { return resource_allocate_bytes(size); } 536 void operator delete(void* p) { ShouldNotCallThis(); } 537 538 public: 539 typedef int csize_t; // code size type; would be size_t except for history 540 enum : int8_t { 541 // Here is the list of all possible sections. The order reflects 542 // the final layout. 543 SECT_FIRST = 0, 544 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc. 545 SECT_INSTS, // Executable instructions. 546 SECT_STUBS, // Outbound trampolines for supporting call sites. 547 SECT_LIMIT, SECT_NONE = -1 548 }; 549 550 typedef LinkedListImpl<int> Offsets; 551 typedef ResizeableResourceHashtable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests; 552 553 private: 554 enum { 555 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits)) 556 sect_mask = (1<<sect_bits)-1 557 }; 558 559 const char* _name; 560 561 CodeSection _consts; // constants, jump tables 562 CodeSection _insts; // instructions (the main section) 563 CodeSection _stubs; // stubs (call site support), deopt, exception handling 564 565 CodeBuffer* _before_expand; // dead buffer, from before the last expansion 566 567 BufferBlob* _blob; // optional buffer in CodeCache for generated code 568 address _total_start; // first address of combined memory buffer 569 csize_t _total_size; // size in bytes of combined memory buffer 570 571 OopRecorder* _oop_recorder; 572 573 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder 574 Arena* _overflow_arena; 575 576 address _last_insn; // used to merge consecutive memory barriers, loads or stores. 577 address _last_label; // record last bind label address, it's also the start of current bb. 578 579 SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs 580 SharedTrampolineRequests* _shared_trampoline_requests; // used to collect requests for shared trampolines 581 bool _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final. 582 583 int _const_section_alignment; 584 585 #ifndef PRODUCT 586 AsmRemarks _asm_remarks; 587 DbgStrings _dbg_strings; 588 bool _collect_comments; // Indicate if we need to collect block comments at all. 589 address _decode_begin; // start address for decode 590 address decode_begin(); 591 #endif 592 593 void initialize_misc(const char * name) { 594 // all pointers other than code_start/end and those inside the sections 595 assert(name != nullptr, "must have a name"); 596 _name = name; 597 _before_expand = nullptr; 598 _blob = nullptr; 599 _total_start = nullptr; 600 _total_size = 0; 601 _oop_recorder = nullptr; 602 _overflow_arena = nullptr; 603 _last_insn = nullptr; 604 _last_label = nullptr; 605 _finalize_stubs = false; 606 _shared_stub_to_interp_requests = nullptr; 607 _shared_trampoline_requests = nullptr; 608 609 _consts.initialize_outer(this, SECT_CONSTS); 610 _insts.initialize_outer(this, SECT_INSTS); 611 _stubs.initialize_outer(this, SECT_STUBS); 612 613 // Default is to align on 8 bytes. A compiler can change this 614 // if larger alignment (e.g., 32-byte vector masks) is required. 615 _const_section_alignment = (int) sizeof(jdouble); 616 617 #ifndef PRODUCT 618 _decode_begin = nullptr; 619 // Collect block comments, but restrict collection to cases where a disassembly is output. 620 _collect_comments = ( PrintAssembly 621 || PrintStubCode 622 || PrintMethodHandleStubs 623 || PrintInterpreter 624 || PrintSignatureHandlers 625 || UnlockDiagnosticVMOptions 626 ); 627 #endif 628 } 629 630 void initialize(address code_start, csize_t code_size) { 631 _total_start = code_start; 632 _total_size = code_size; 633 // Initialize the main section: 634 _insts.initialize(code_start, code_size); 635 assert(!_stubs.is_allocated(), "no garbage here"); 636 assert(!_consts.is_allocated(), "no garbage here"); 637 _oop_recorder = &_default_oop_recorder; 638 } 639 640 void initialize_section_size(CodeSection* cs, csize_t size); 641 642 // helper for CodeBuffer::expand() 643 void take_over_code_from(CodeBuffer* cs); 644 645 // ensure sections are disjoint, ordered, and contained in the blob 646 void verify_section_allocation(); 647 648 // copies combined relocations to the blob, returns bytes copied 649 // (if target is null, it is a dry run only, just for sizing) 650 csize_t copy_relocations_to(CodeBlob* blob) const; 651 652 // copies combined code to the blob (assumes relocs are already in there) 653 void copy_code_to(CodeBlob* blob); 654 655 // moves code sections to new buffer (assumes relocs are already in there) 656 void relocate_code_to(CodeBuffer* cb) const; 657 658 // adjust some internal address during expand 659 void adjust_internal_address(address from, address to); 660 661 // set up a model of the final layout of my contents 662 void compute_final_layout(CodeBuffer* dest) const; 663 664 // Expand the given section so at least 'amount' is remaining. 665 // Creates a new, larger BufferBlob, and rewrites the code & relocs. 666 void expand(CodeSection* which_cs, csize_t amount); 667 668 // Helper for expand. 669 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity); 670 671 public: 672 // (1) code buffer referring to pre-allocated instruction memory 673 CodeBuffer(address code_start, csize_t code_size) 674 DEBUG_ONLY(: Scrubber(this, sizeof(*this))) 675 { 676 assert(code_start != nullptr, "sanity"); 677 initialize_misc("static buffer"); 678 initialize(code_start, code_size); 679 DEBUG_ONLY(verify_section_allocation();) 680 } 681 682 // (2) CodeBuffer referring to pre-allocated CodeBlob. 683 CodeBuffer(CodeBlob* blob); 684 685 // (3) code buffer allocating codeBlob memory for code & relocation 686 // info but with lazy initialization. The name must be something 687 // informative. 688 CodeBuffer(const char* name) 689 DEBUG_ONLY(: Scrubber(this, sizeof(*this))) 690 { 691 initialize_misc(name); 692 } 693 694 // (4) code buffer allocating codeBlob memory for code & relocation 695 // info. The name must be something informative and code_size must 696 // include both code and stubs sizes. 697 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) 698 DEBUG_ONLY(: Scrubber(this, sizeof(*this))) 699 { 700 initialize_misc(name); 701 initialize(code_size, locs_size); 702 } 703 704 ~CodeBuffer(); 705 706 // Initialize a CodeBuffer constructed using constructor 3. Using 707 // constructor 4 is equivalent to calling constructor 3 and then 708 // calling this method. It's been factored out for convenience of 709 // construction. 710 void initialize(csize_t code_size, csize_t locs_size); 711 712 CodeSection* consts() { return &_consts; } 713 CodeSection* insts() { return &_insts; } 714 CodeSection* stubs() { return &_stubs; } 715 716 const CodeSection* insts() const { return &_insts; } 717 718 // present sections in order; return null at end; consts is #0, etc. 719 CodeSection* code_section(int n) { 720 // This makes the slightly questionable but portable assumption 721 // that the various members (_consts, _insts, _stubs, etc.) are 722 // adjacent in the layout of CodeBuffer. 723 CodeSection* cs = &_consts + n; 724 assert(cs->index() == n || !cs->is_allocated(), "sanity"); 725 return cs; 726 } 727 const CodeSection* code_section(int n) const { // yucky const stuff 728 return ((CodeBuffer*)this)->code_section(n); 729 } 730 static const char* code_section_name(int n); 731 int section_index_of(address addr) const; 732 bool contains(address addr) const { 733 // handy for debugging 734 return section_index_of(addr) > SECT_NONE; 735 } 736 737 // A stable mapping between 'locators' (small ints) and addresses. 738 static int locator_pos(int locator) { return locator >> sect_bits; } 739 static int locator_sect(int locator) { return locator & sect_mask; } 740 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; } 741 int locator(address addr) const; 742 address locator_address(int locator) const { 743 if (locator < 0) return nullptr; 744 address start = code_section(locator_sect(locator))->start(); 745 return start + locator_pos(locator); 746 } 747 748 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch. 749 bool is_backward_branch(Label& L); 750 751 // Properties 752 const char* name() const { return _name; } 753 CodeBuffer* before_expand() const { return _before_expand; } 754 BufferBlob* blob() const { return _blob; } 755 void set_blob(BufferBlob* blob); 756 void free_blob(); // Free the blob, if we own one. 757 758 // Properties relative to the insts section: 759 address insts_begin() const { return _insts.start(); } 760 address insts_end() const { return _insts.end(); } 761 void set_insts_end(address end) { _insts.set_end(end); } 762 address insts_mark() const { return _insts.mark(); } 763 void set_insts_mark() { _insts.set_mark(); } 764 765 // is there anything in the buffer other than the current section? 766 bool is_pure() const { return insts_size() == total_content_size(); } 767 768 // size in bytes of output so far in the insts sections 769 csize_t insts_size() const { return _insts.size(); } 770 771 // same as insts_size(), except that it asserts there is no non-code here 772 csize_t pure_insts_size() const { assert(is_pure(), "no non-code"); 773 return insts_size(); } 774 // capacity in bytes of the insts sections 775 csize_t insts_capacity() const { return _insts.capacity(); } 776 777 // number of bytes remaining in the insts section 778 csize_t insts_remaining() const { return _insts.remaining(); } 779 780 // is a given address in the insts section? (2nd version is end-inclusive) 781 bool insts_contains(address pc) const { return _insts.contains(pc); } 782 bool insts_contains2(address pc) const { return _insts.contains2(pc); } 783 784 // Record any extra oops required to keep embedded metadata alive 785 void finalize_oop_references(const methodHandle& method); 786 787 // Allocated size in all sections, when aligned and concatenated 788 // (this is the eventual state of the content in its final 789 // CodeBlob). 790 csize_t total_content_size() const; 791 792 // Combined offset (relative to start of first section) of given 793 // section, as eventually found in the final CodeBlob. 794 csize_t total_offset_of(const CodeSection* cs) const; 795 796 // allocated size of all relocation data, including index, rounded up 797 csize_t total_relocation_size() const; 798 799 int total_skipped_instructions_size() const; 800 801 csize_t copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const; 802 803 // allocated size of any and all recorded oops 804 csize_t total_oop_size() const { 805 OopRecorder* recorder = oop_recorder(); 806 return (recorder == nullptr)? 0: recorder->oop_size(); 807 } 808 809 // allocated size of any and all recorded metadata 810 csize_t total_metadata_size() const { 811 OopRecorder* recorder = oop_recorder(); 812 return (recorder == nullptr)? 0: recorder->metadata_size(); 813 } 814 815 // Configuration functions, called immediately after the CB is constructed. 816 // The section sizes are subtracted from the original insts section. 817 // Note: Call them in reverse section order, because each steals from insts. 818 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); } 819 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); } 820 // Override default oop recorder. 821 void initialize_oop_recorder(OopRecorder* r); 822 823 OopRecorder* oop_recorder() const { return _oop_recorder; } 824 825 address last_insn() const { return _last_insn; } 826 void set_last_insn(address a) { _last_insn = a; } 827 void clear_last_insn() { set_last_insn(nullptr); } 828 829 address last_label() const { return _last_label; } 830 void set_last_label(address a) { _last_label = a; } 831 832 #ifndef PRODUCT 833 AsmRemarks &asm_remarks() { return _asm_remarks; } 834 DbgStrings &dbg_strings() { return _dbg_strings; } 835 836 void clear_strings() { 837 _asm_remarks.clear(); 838 _dbg_strings.clear(); 839 } 840 #endif 841 842 // Code generation 843 void relocate(address at, RelocationHolder const& rspec, int format = 0) { 844 _insts.relocate(at, rspec, format); 845 } 846 void relocate(address at, relocInfo::relocType rtype, int format = 0) { 847 _insts.relocate(at, rtype, format); 848 } 849 850 // Management of overflow storage for binding of Labels. 851 GrowableArray<int>* create_patch_overflow(); 852 853 // NMethod generation 854 void copy_code_and_locs_to(CodeBlob* blob) { 855 assert(blob != nullptr, "sane"); 856 copy_relocations_to(blob); 857 copy_code_to(blob); 858 } 859 void copy_values_to(nmethod* nm) { 860 if (!oop_recorder()->is_unused()) { 861 oop_recorder()->copy_values_to(nm); 862 } 863 } 864 865 void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN; 866 const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;); 867 868 // Log a little info about section usage in the CodeBuffer 869 void log_section_sizes(const char* name); 870 871 // Make a set of stubs final. It can create/optimize stubs. 872 bool finalize_stubs(); 873 874 // Request for a shared stub to the interpreter 875 void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset); 876 877 void set_const_section_alignment(int align) { 878 _const_section_alignment = align_up(align, HeapWordSize); 879 } 880 881 #ifndef PRODUCT 882 public: 883 // Printing / Decoding 884 // decodes from decode_begin() to code_end() and sets decode_begin to end 885 void decode(); 886 void print_on(outputStream* st); 887 #endif 888 // Directly disassemble code buffer. 889 void decode(address start, address end); 890 891 // The following header contains architecture-specific implementations 892 #include CPU_HEADER(codeBuffer) 893 894 }; 895 896 // A Java method can have calls of Java methods which can be statically bound. 897 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method 898 // can share a stub to the interpreter. 899 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter. 900 class SharedStubToInterpRequest : public ResourceObj { 901 private: 902 ciMethod* _shared_method; 903 CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer 904 905 public: 906 SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method), 907 _call_offset(call_offset) {} 908 909 ciMethod* shared_method() const { return _shared_method; } 910 CodeBuffer::csize_t call_offset() const { return _call_offset; } 911 }; 912 913 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) { 914 if (remaining() < amount) { _outer->expand(this, amount); return true; } 915 return false; 916 } 917 918 #endif // SHARE_ASM_CODEBUFFER_HPP