1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_ASM_CODEBUFFER_HPP 26 #define SHARE_ASM_CODEBUFFER_HPP 27 28 #include "code/oopRecorder.hpp" 29 #include "code/relocInfo.hpp" 30 #include "compiler/compiler_globals.hpp" 31 #include "runtime/os.hpp" 32 #include "utilities/align.hpp" 33 #include "utilities/debug.hpp" 34 #include "utilities/growableArray.hpp" 35 #include "utilities/linkedlist.hpp" 36 #include "utilities/resizeableResourceHash.hpp" 37 #include "utilities/macros.hpp" 38 39 template <typename T> 40 static inline void put_native(address p, T x) { 41 memcpy((void*)p, &x, sizeof x); 42 } 43 44 class PhaseCFG; 45 class Compile; 46 class BufferBlob; 47 class CodeBuffer; 48 class Label; 49 class ciMethod; 50 class SharedStubToInterpRequest; 51 52 class CodeOffsets: public StackObj { 53 public: 54 enum Entries { Entry, 55 Verified_Entry, 56 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete 57 OSR_Entry, 58 Exceptions, // Offset where exception handler lives 59 Deopt, // Offset where deopt handler lives 60 DeoptMH, // Offset where MethodHandle deopt handler lives 61 UnwindHandler, // Offset to default unwind handler 62 max_Entries }; 63 64 // special value to note codeBlobs where profile (forte) stack walking is 65 // always dangerous and suspect. 66 67 enum { frame_never_safe = -1 }; 68 69 private: 70 int _values[max_Entries]; 71 72 public: 73 CodeOffsets() { 74 _values[Entry ] = 0; 75 _values[Verified_Entry] = 0; 76 _values[Frame_Complete] = frame_never_safe; 77 _values[OSR_Entry ] = 0; 78 _values[Exceptions ] = -1; 79 _values[Deopt ] = -1; 80 _values[DeoptMH ] = -1; 81 _values[UnwindHandler ] = -1; 82 } 83 84 int value(Entries e) { return _values[e]; } 85 void set_value(Entries e, int val) { _values[e] = val; } 86 }; 87 88 // This class represents a stream of code and associated relocations. 89 // There are a few in each CodeBuffer. 90 // They are filled concurrently, and concatenated at the end. 91 class CodeSection { 92 friend class CodeBuffer; 93 friend class AOTCodeReader; 94 public: 95 typedef int csize_t; // code size type; would be size_t except for history 96 97 private: 98 address _start; // first byte of contents (instructions) 99 address _mark; // user mark, usually an instruction beginning 100 address _end; // current end address 101 address _limit; // last possible (allocated) end address 102 relocInfo* _locs_start; // first byte of relocation information 103 relocInfo* _locs_end; // first byte after relocation information 104 relocInfo* _locs_limit; // first byte after relocation information buf 105 address _locs_point; // last relocated position (grows upward) 106 bool _locs_own; // did I allocate the locs myself? 107 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate. 108 int _skipped_instructions_size; 109 int8_t _index; // my section number (SECT_INST, etc.) 110 CodeBuffer* _outer; // enclosing CodeBuffer 111 112 // (Note: _locs_point used to be called _last_reloc_offset.) 113 114 CodeSection() { 115 _start = nullptr; 116 _mark = nullptr; 117 _end = nullptr; 118 _limit = nullptr; 119 _locs_start = nullptr; 120 _locs_end = nullptr; 121 _locs_limit = nullptr; 122 _locs_point = nullptr; 123 _locs_own = false; 124 _scratch_emit = false; 125 _skipped_instructions_size = 0; 126 DEBUG_ONLY(_index = -1); 127 DEBUG_ONLY(_outer = (CodeBuffer*)badAddress); 128 } 129 130 void initialize_outer(CodeBuffer* outer, int8_t index) { 131 _outer = outer; 132 _index = index; 133 } 134 135 void initialize(address start, csize_t size = 0) { 136 assert(_start == nullptr, "only one init step, please"); 137 _start = start; 138 _mark = nullptr; 139 _end = start; 140 141 _limit = start + size; 142 _locs_point = start; 143 } 144 145 void initialize_locs(int locs_capacity); 146 void expand_locs(int new_capacity); 147 void initialize_locs_from(const CodeSection* source_cs); 148 149 // helper for CodeBuffer::expand() 150 void take_over_code_from(CodeSection* cs) { 151 _start = cs->_start; 152 _mark = cs->_mark; 153 _end = cs->_end; 154 _limit = cs->_limit; 155 _locs_point = cs->_locs_point; 156 _skipped_instructions_size = cs->_skipped_instructions_size; 157 } 158 159 public: 160 address start() const { return _start; } 161 address mark() const { return _mark; } 162 address end() const { return _end; } 163 address limit() const { return _limit; } 164 csize_t size() const { return (csize_t)(_end - _start); } 165 csize_t mark_off() const { assert(_mark != nullptr, "not an offset"); 166 return (csize_t)(_mark - _start); } 167 csize_t capacity() const { return (csize_t)(_limit - _start); } 168 csize_t remaining() const { return (csize_t)(_limit - _end); } 169 170 relocInfo* locs_start() const { return _locs_start; } 171 relocInfo* locs_end() const { return _locs_end; } 172 int locs_count() const { return (int)(_locs_end - _locs_start); } 173 relocInfo* locs_limit() const { return _locs_limit; } 174 address locs_point() const { return _locs_point; } 175 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); } 176 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); } 177 178 int8_t index() const { return _index; } 179 bool is_allocated() const { return _start != nullptr; } 180 bool is_empty() const { return _start == _end; } 181 bool has_locs() const { return _locs_end != nullptr; } 182 183 // Mark scratch buffer. 184 void set_scratch_emit() { _scratch_emit = true; } 185 void clear_scratch_emit() { _scratch_emit = false; } 186 bool scratch_emit() { return _scratch_emit; } 187 188 CodeBuffer* outer() const { return _outer; } 189 190 // is a given address in this section? (2nd version is end-inclusive) 191 bool contains(address pc) const { return pc >= _start && pc < _end; } 192 bool contains2(address pc) const { return pc >= _start && pc <= _end; } 193 bool allocates(address pc) const { return pc >= _start && pc < _limit; } 194 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; } 195 196 // checks if two CodeSections are disjoint 197 // 198 // limit is an exclusive address and can be the start of another 199 // section. 200 bool disjoint(CodeSection* cs) const { return cs->_limit <= _start || cs->_start >= _limit; } 201 202 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; } 203 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer"); 204 _mark = pc; } 205 void set_mark() { _mark = _end; } 206 void clear_mark() { _mark = nullptr; } 207 208 void set_locs_end(relocInfo* p) { 209 assert(p <= locs_limit(), "locs data fits in allocated buffer"); 210 _locs_end = p; 211 } 212 void set_locs_point(address pc) { 213 assert(pc >= locs_point(), "relocation addr may not decrease"); 214 assert(allocates2(pc), "relocation addr " INTPTR_FORMAT " must be in this section from " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(_start), p2i(_limit)); 215 _locs_point = pc; 216 } 217 218 void register_skipped(int size) { 219 _skipped_instructions_size += size; 220 } 221 222 // Code emission 223 void emit_int8(uint8_t x1) { 224 address curr = end(); 225 *((uint8_t*) curr++) = x1; 226 set_end(curr); 227 } 228 229 template <typename T> 230 void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); } 231 232 void emit_int16(uint16_t x) { emit_native(x); } 233 void emit_int16(uint8_t x1, uint8_t x2) { 234 address curr = end(); 235 *((uint8_t*) curr++) = x1; 236 *((uint8_t*) curr++) = x2; 237 set_end(curr); 238 } 239 240 void emit_int24(uint8_t x1, uint8_t x2, uint8_t x3) { 241 address curr = end(); 242 *((uint8_t*) curr++) = x1; 243 *((uint8_t*) curr++) = x2; 244 *((uint8_t*) curr++) = x3; 245 set_end(curr); 246 } 247 248 void emit_int32(uint32_t x) { emit_native(x); } 249 void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) { 250 address curr = end(); 251 *((uint8_t*) curr++) = x1; 252 *((uint8_t*) curr++) = x2; 253 *((uint8_t*) curr++) = x3; 254 *((uint8_t*) curr++) = x4; 255 set_end(curr); 256 } 257 258 void emit_int64(uint64_t x) { emit_native(x); } 259 void emit_float(jfloat x) { emit_native(x); } 260 void emit_double(jdouble x) { emit_native(x); } 261 void emit_address(address x) { emit_native(x); } 262 263 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.) 264 void initialize_shared_locs(relocInfo* buf, int length); 265 266 // Manage labels and their addresses. 267 address target(Label& L, address branch_pc); 268 269 // Emit a relocation. 270 void relocate(address at, RelocationHolder const& rspec, int format = 0); 271 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0); 272 273 int alignment() const; 274 275 // Slop between sections, used only when allocating temporary BufferBlob buffers. 276 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } 277 278 csize_t align_at_start(csize_t off) const { 279 return (csize_t) align_up(off, alignment()); 280 } 281 282 // Ensure there's enough space left in the current section. 283 // Return true if there was an expansion. 284 bool maybe_expand_to_ensure_remaining(csize_t amount); 285 286 #ifndef PRODUCT 287 void decode(); 288 void print_on(outputStream* st, const char* name); 289 #endif //PRODUCT 290 }; 291 292 293 #ifndef PRODUCT 294 295 // ----- CHeapString ----------------------------------------------------------- 296 297 class CHeapString : public CHeapObj<mtCode> { 298 public: 299 CHeapString(const char* str) : _string(os::strdup(str)) {} 300 ~CHeapString(); 301 const char* string() const { return _string; } 302 303 private: 304 const char* _string; 305 }; 306 307 // ----- AsmRemarkCollection --------------------------------------------------- 308 309 class AsmRemarkCollection : public CHeapObj<mtCode> { 310 public: 311 AsmRemarkCollection() : _ref_cnt(1), _remarks(nullptr), _next(nullptr) {} 312 ~AsmRemarkCollection() { 313 assert(is_empty(), "Must 'clear()' before deleting!"); 314 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 315 } 316 AsmRemarkCollection* reuse() { 317 precond(_ref_cnt > 0); 318 return _ref_cnt++, this; 319 } 320 321 const char* insert(uint offset, const char* remark); 322 const char* lookup(uint offset) const; 323 const char* next(uint offset) const; 324 325 bool is_empty() const { return _remarks == nullptr; } 326 uint clear(); 327 328 template<typename Function> 329 bool iterate(Function function) const { // lambda enabled API 330 if (_remarks != nullptr) { 331 Cell* tmp = _remarks; 332 do { 333 if(!function(tmp->offset, tmp->string())) { 334 return false; 335 } 336 tmp = tmp->next; 337 } while (tmp != _remarks); 338 } 339 return true; 340 } 341 342 private: 343 struct Cell : CHeapString { 344 Cell(const char* remark, uint offset) : 345 CHeapString(remark), offset(offset), prev(nullptr), next(nullptr) {} 346 void push_back(Cell* cell) { 347 Cell* head = this; 348 Cell* tail = prev; 349 tail->next = cell; 350 cell->next = head; 351 cell->prev = tail; 352 prev = cell; 353 } 354 uint offset; 355 Cell* prev; 356 Cell* next; 357 }; 358 uint _ref_cnt; 359 Cell* _remarks; 360 // Using a 'mutable' iteration pointer to allow 'const' on lookup/next (that 361 // does not change the state of the list per se), supportig a simplistic 362 // iteration scheme. 363 mutable Cell* _next; 364 }; 365 366 // ----- DbgStringCollection --------------------------------------------------- 367 368 class DbgStringCollection : public CHeapObj<mtCode> { 369 public: 370 DbgStringCollection() : _ref_cnt(1), _strings(nullptr) {} 371 ~DbgStringCollection() { 372 assert(is_empty(), "Must 'clear()' before deleting!"); 373 assert(_ref_cnt == 0, "No uses must remain when deleting!"); 374 } 375 DbgStringCollection* reuse() { 376 precond(_ref_cnt > 0); 377 return _ref_cnt++, this; 378 } 379 380 const char* insert(const char* str); 381 const char* lookup(const char* str) const; 382 383 bool is_empty() const { return _strings == nullptr; } 384 uint clear(); 385 386 template<typename Function> 387 bool iterate(Function function) const { // lambda enabled API 388 if (_strings != nullptr) { 389 Cell* tmp = _strings; 390 do { 391 if (!function(tmp->string())) { 392 return false; 393 } 394 tmp = tmp->next; 395 } while (tmp != _strings); 396 } 397 return true; 398 } 399 400 private: 401 struct Cell : CHeapString { 402 Cell(const char* dbgstr) : 403 CHeapString(dbgstr), prev(nullptr), next(nullptr) {} 404 void push_back(Cell* cell) { 405 Cell* head = this; 406 Cell* tail = prev; 407 tail->next = cell; 408 cell->next = head; 409 cell->prev = tail; 410 prev = cell; 411 } 412 Cell* prev; 413 Cell* next; 414 }; 415 uint _ref_cnt; 416 Cell* _strings; 417 }; 418 419 // The assumption made here is that most code remarks (or comments) added to 420 // the generated assembly code are unique, i.e. there is very little gain in 421 // trying to share the strings between the different offsets tracked in a 422 // buffer (or blob). 423 424 class AsmRemarks { 425 public: 426 AsmRemarks(); 427 ~AsmRemarks(); 428 429 const char* insert(uint offset, const char* remstr); 430 431 bool is_empty() const; 432 433 void share(const AsmRemarks &src); 434 void clear(); 435 uint print(uint offset, outputStream* strm = tty) const; 436 437 // For testing purposes only. 438 const AsmRemarkCollection* ref() const { return _remarks; } 439 440 template<typename Function> 441 inline bool iterate(Function function) const { return _remarks->iterate(function); } 442 443 private: 444 AsmRemarkCollection* _remarks; 445 }; 446 447 // The assumption made here is that the number of debug strings (with a fixed 448 // address requirement) is a rather small set per compilation unit. 449 450 class DbgStrings { 451 public: 452 DbgStrings(); 453 ~DbgStrings(); 454 455 const char* insert(const char* dbgstr); 456 457 bool is_empty() const; 458 459 void share(const DbgStrings &src); 460 void clear(); 461 462 // For testing purposes only. 463 const DbgStringCollection* ref() const { return _strings; } 464 465 template<typename Function> 466 bool iterate(Function function) const { return _strings->iterate(function); } 467 468 private: 469 DbgStringCollection* _strings; 470 }; 471 #endif // not PRODUCT 472 473 474 #ifdef ASSERT 475 #include "utilities/copy.hpp" 476 477 class Scrubber { 478 public: 479 Scrubber(void* addr, size_t size) : _addr(addr), _size(size) {} 480 ~Scrubber() { 481 Copy::fill_to_bytes(_addr, _size, badResourceValue); 482 } 483 private: 484 void* _addr; 485 size_t _size; 486 }; 487 #endif // ASSERT 488 489 typedef GrowableArray<SharedStubToInterpRequest> SharedStubToInterpRequests; 490 491 // A CodeBuffer describes a memory space into which assembly 492 // code is generated. This memory space usually occupies the 493 // interior of a single BufferBlob, but in some cases it may be 494 // an arbitrary span of memory, even outside the code cache. 495 // 496 // A code buffer comes in two variants: 497 // 498 // (1) A CodeBuffer referring to an already allocated piece of memory: 499 // This is used to direct 'static' code generation (e.g. for interpreter 500 // or stubroutine generation, etc.). This code comes with NO relocation 501 // information. 502 // 503 // (2) A CodeBuffer referring to a piece of memory allocated when the 504 // CodeBuffer is allocated. This is used for nmethod generation. 505 // 506 // The memory can be divided up into several parts called sections. 507 // Each section independently accumulates code (or data) an relocations. 508 // Sections can grow (at the expense of a reallocation of the BufferBlob 509 // and recopying of all active sections). When the buffered code is finally 510 // written to an nmethod (or other CodeBlob), the contents (code, data, 511 // and relocations) of the sections are padded to an alignment and concatenated. 512 // Instructions and data in one section can contain relocatable references to 513 // addresses in a sibling section. 514 515 class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) { 516 friend class CodeSection; 517 friend class StubCodeGenerator; 518 friend class AOTCodeReader; 519 520 private: 521 // CodeBuffers must be allocated on the stack except for a single 522 // special case during expansion which is handled internally. This 523 // is done to guarantee proper cleanup of resources. 524 void* operator new(size_t size) throw() { return resource_allocate_bytes(size); } 525 void operator delete(void* p) { ShouldNotCallThis(); } 526 527 public: 528 typedef int csize_t; // code size type; would be size_t except for history 529 enum : int8_t { 530 // Here is the list of all possible sections. The order reflects 531 // the final layout. 532 SECT_FIRST = 0, 533 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc. 534 SECT_INSTS, // Executable instructions. 535 SECT_STUBS, // Outbound trampolines for supporting call sites. 536 SECT_LIMIT, SECT_NONE = -1 537 }; 538 539 typedef LinkedListImpl<int> Offsets; 540 typedef ResizeableResourceHashtable<address, Offsets, AnyObj::C_HEAP, mtCompiler> SharedTrampolineRequests; 541 542 private: 543 enum { 544 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits)) 545 sect_mask = (1<<sect_bits)-1 546 }; 547 548 const char* _name; 549 550 CodeSection _consts; // constants, jump tables 551 CodeSection _insts; // instructions (the main section) 552 CodeSection _stubs; // stubs (call site support), deopt, exception handling 553 554 CodeBuffer* _before_expand; // dead buffer, from before the last expansion 555 556 BufferBlob* _blob; // optional buffer in CodeCache for generated code 557 address _total_start; // first address of combined memory buffer 558 csize_t _total_size; // size in bytes of combined memory buffer 559 560 OopRecorder* _oop_recorder; 561 562 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder 563 Arena* _overflow_arena; 564 565 address _last_insn; // used to merge consecutive memory barriers, loads or stores. 566 address _last_label; // record last bind label address, it's also the start of current bb. 567 568 SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs 569 SharedTrampolineRequests* _shared_trampoline_requests; // used to collect requests for shared trampolines 570 bool _finalize_stubs; // Indicate if we need to finalize stubs to make CodeBuffer final. 571 572 int _const_section_alignment; 573 574 #ifndef PRODUCT 575 AsmRemarks _asm_remarks; 576 DbgStrings _dbg_strings; 577 bool _collect_comments; // Indicate if we need to collect block comments at all. 578 address _decode_begin; // start address for decode 579 address decode_begin(); 580 #endif 581 582 void initialize_misc(const char * name) { 583 // all pointers other than code_start/end and those inside the sections 584 assert(name != nullptr, "must have a name"); 585 _name = name; 586 _before_expand = nullptr; 587 _blob = nullptr; 588 _total_start = nullptr; 589 _total_size = 0; 590 _oop_recorder = nullptr; 591 _overflow_arena = nullptr; 592 _last_insn = nullptr; 593 _last_label = nullptr; 594 _finalize_stubs = false; 595 _shared_stub_to_interp_requests = nullptr; 596 _shared_trampoline_requests = nullptr; 597 598 _consts.initialize_outer(this, SECT_CONSTS); 599 _insts.initialize_outer(this, SECT_INSTS); 600 _stubs.initialize_outer(this, SECT_STUBS); 601 602 // Default is to align on 8 bytes. A compiler can change this 603 // if larger alignment (e.g., 32-byte vector masks) is required. 604 _const_section_alignment = (int) sizeof(jdouble); 605 606 #ifndef PRODUCT 607 _decode_begin = nullptr; 608 // Collect block comments, but restrict collection to cases where a disassembly is output. 609 _collect_comments = ( PrintAssembly 610 || PrintStubCode 611 || PrintMethodHandleStubs 612 || PrintInterpreter 613 || PrintSignatureHandlers 614 || UnlockDiagnosticVMOptions 615 ); 616 #endif 617 } 618 619 void initialize(address code_start, csize_t code_size) { 620 _total_start = code_start; 621 _total_size = code_size; 622 // Initialize the main section: 623 _insts.initialize(code_start, code_size); 624 assert(!_stubs.is_allocated(), "no garbage here"); 625 assert(!_consts.is_allocated(), "no garbage here"); 626 _oop_recorder = &_default_oop_recorder; 627 } 628 629 void initialize_section_size(CodeSection* cs, csize_t size); 630 631 // helper for CodeBuffer::expand() 632 void take_over_code_from(CodeBuffer* cs); 633 634 // ensure sections are disjoint, ordered, and contained in the blob 635 void verify_section_allocation(); 636 637 // copies combined relocations to the blob, returns bytes copied 638 // (if target is null, it is a dry run only, just for sizing) 639 csize_t copy_relocations_to(CodeBlob* blob) const; 640 641 // copies combined code to the blob (assumes relocs are already in there) 642 void copy_code_to(CodeBlob* blob); 643 644 // moves code sections to new buffer (assumes relocs are already in there) 645 void relocate_code_to(CodeBuffer* cb) const; 646 647 // adjust some internal address during expand 648 void adjust_internal_address(address from, address to); 649 650 // set up a model of the final layout of my contents 651 void compute_final_layout(CodeBuffer* dest) const; 652 653 // Expand the given section so at least 'amount' is remaining. 654 // Creates a new, larger BufferBlob, and rewrites the code & relocs. 655 void expand(CodeSection* which_cs, csize_t amount); 656 657 // Helper for expand. 658 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity); 659 660 public: 661 // (1) code buffer referring to pre-allocated instruction memory 662 CodeBuffer(address code_start, csize_t code_size) 663 DEBUG_ONLY(: Scrubber(this, sizeof(*this))) 664 { 665 assert(code_start != nullptr, "sanity"); 666 initialize_misc("static buffer"); 667 initialize(code_start, code_size); 668 DEBUG_ONLY(verify_section_allocation();) 669 } 670 671 // (2) CodeBuffer referring to pre-allocated CodeBlob. 672 CodeBuffer(CodeBlob* blob); 673 674 // (3) code buffer allocating codeBlob memory for code & relocation 675 // info but with lazy initialization. The name must be something 676 // informative. 677 CodeBuffer(const char* name) 678 DEBUG_ONLY(: Scrubber(this, sizeof(*this))) 679 { 680 initialize_misc(name); 681 } 682 683 // (4) code buffer allocating codeBlob memory for code & relocation 684 // info. The name must be something informative and code_size must 685 // include both code and stubs sizes. 686 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) 687 DEBUG_ONLY(: Scrubber(this, sizeof(*this))) 688 { 689 initialize_misc(name); 690 initialize(code_size, locs_size); 691 } 692 693 ~CodeBuffer(); 694 695 // Initialize a CodeBuffer constructed using constructor 3. Using 696 // constructor 4 is equivalent to calling constructor 3 and then 697 // calling this method. It's been factored out for convenience of 698 // construction. 699 void initialize(csize_t code_size, csize_t locs_size); 700 701 CodeSection* consts() { return &_consts; } 702 CodeSection* insts() { return &_insts; } 703 CodeSection* stubs() { return &_stubs; } 704 705 const CodeSection* insts() const { return &_insts; } 706 707 // present sections in order; return null at end; consts is #0, etc. 708 CodeSection* code_section(int n) { 709 // This makes the slightly questionable but portable assumption 710 // that the various members (_consts, _insts, _stubs, etc.) are 711 // adjacent in the layout of CodeBuffer. 712 CodeSection* cs = &_consts + n; 713 assert(cs->index() == n || !cs->is_allocated(), "sanity"); 714 return cs; 715 } 716 const CodeSection* code_section(int n) const { // yucky const stuff 717 return ((CodeBuffer*)this)->code_section(n); 718 } 719 static const char* code_section_name(int n); 720 int section_index_of(address addr) const; 721 bool contains(address addr) const { 722 // handy for debugging 723 return section_index_of(addr) > SECT_NONE; 724 } 725 726 // A stable mapping between 'locators' (small ints) and addresses. 727 static int locator_pos(int locator) { return locator >> sect_bits; } 728 static int locator_sect(int locator) { return locator & sect_mask; } 729 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; } 730 int locator(address addr) const; 731 address locator_address(int locator) const { 732 if (locator < 0) return nullptr; 733 address start = code_section(locator_sect(locator))->start(); 734 return start + locator_pos(locator); 735 } 736 737 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch. 738 bool is_backward_branch(Label& L); 739 740 // Properties 741 const char* name() const { return _name; } 742 CodeBuffer* before_expand() const { return _before_expand; } 743 BufferBlob* blob() const { return _blob; } 744 void set_blob(BufferBlob* blob); 745 void free_blob(); // Free the blob, if we own one. 746 747 // Properties relative to the insts section: 748 address insts_begin() const { return _insts.start(); } 749 address insts_end() const { return _insts.end(); } 750 void set_insts_end(address end) { _insts.set_end(end); } 751 address insts_mark() const { return _insts.mark(); } 752 void set_insts_mark() { _insts.set_mark(); } 753 754 // is there anything in the buffer other than the current section? 755 bool is_pure() const { return insts_size() == total_content_size(); } 756 757 // size in bytes of output so far in the insts sections 758 csize_t insts_size() const { return _insts.size(); } 759 760 // same as insts_size(), except that it asserts there is no non-code here 761 csize_t pure_insts_size() const { assert(is_pure(), "no non-code"); 762 return insts_size(); } 763 // capacity in bytes of the insts sections 764 csize_t insts_capacity() const { return _insts.capacity(); } 765 766 // number of bytes remaining in the insts section 767 csize_t insts_remaining() const { return _insts.remaining(); } 768 769 // is a given address in the insts section? (2nd version is end-inclusive) 770 bool insts_contains(address pc) const { return _insts.contains(pc); } 771 bool insts_contains2(address pc) const { return _insts.contains2(pc); } 772 773 // Record any extra oops required to keep embedded metadata alive 774 void finalize_oop_references(const methodHandle& method); 775 776 // Allocated size in all sections, when aligned and concatenated 777 // (this is the eventual state of the content in its final 778 // CodeBlob). 779 csize_t total_content_size() const; 780 781 // Combined offset (relative to start of first section) of given 782 // section, as eventually found in the final CodeBlob. 783 csize_t total_offset_of(const CodeSection* cs) const; 784 785 // allocated size of all relocation data, including index, rounded up 786 csize_t total_relocation_size() const; 787 788 int total_skipped_instructions_size() const; 789 790 csize_t copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const; 791 792 // allocated size of any and all recorded oops 793 csize_t total_oop_size() const { 794 OopRecorder* recorder = oop_recorder(); 795 return (recorder == nullptr)? 0: recorder->oop_size(); 796 } 797 798 // allocated size of any and all recorded metadata 799 csize_t total_metadata_size() const { 800 OopRecorder* recorder = oop_recorder(); 801 return (recorder == nullptr)? 0: recorder->metadata_size(); 802 } 803 804 // Configuration functions, called immediately after the CB is constructed. 805 // The section sizes are subtracted from the original insts section. 806 // Note: Call them in reverse section order, because each steals from insts. 807 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); } 808 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); } 809 // Override default oop recorder. 810 void initialize_oop_recorder(OopRecorder* r); 811 812 OopRecorder* oop_recorder() const { return _oop_recorder; } 813 814 address last_insn() const { return _last_insn; } 815 void set_last_insn(address a) { _last_insn = a; } 816 void clear_last_insn() { set_last_insn(nullptr); } 817 818 address last_label() const { return _last_label; } 819 void set_last_label(address a) { _last_label = a; } 820 821 #ifndef PRODUCT 822 AsmRemarks &asm_remarks() { return _asm_remarks; } 823 DbgStrings &dbg_strings() { return _dbg_strings; } 824 825 void clear_strings() { 826 _asm_remarks.clear(); 827 _dbg_strings.clear(); 828 } 829 #endif 830 831 // Code generation 832 void relocate(address at, RelocationHolder const& rspec, int format = 0) { 833 _insts.relocate(at, rspec, format); 834 } 835 void relocate(address at, relocInfo::relocType rtype, int format = 0) { 836 _insts.relocate(at, rtype, format); 837 } 838 839 // Management of overflow storage for binding of Labels. 840 GrowableArray<int>* create_patch_overflow(); 841 842 // NMethod generation 843 void copy_code_and_locs_to(CodeBlob* blob) { 844 assert(blob != nullptr, "sane"); 845 copy_relocations_to(blob); 846 copy_code_to(blob); 847 } 848 void copy_values_to(nmethod* nm) { 849 if (!oop_recorder()->is_unused()) { 850 oop_recorder()->copy_values_to(nm); 851 } 852 } 853 854 void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN; 855 const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;); 856 857 // Log a little info about section usage in the CodeBuffer 858 void log_section_sizes(const char* name); 859 860 // Make a set of stubs final. It can create/optimize stubs. 861 bool finalize_stubs(); 862 863 // Request for a shared stub to the interpreter 864 void shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset); 865 866 void set_const_section_alignment(int align) { 867 _const_section_alignment = align_up(align, HeapWordSize); 868 } 869 870 #ifndef PRODUCT 871 public: 872 // Printing / Decoding 873 // decodes from decode_begin() to code_end() and sets decode_begin to end 874 void decode(); 875 void print_on(outputStream* st); 876 #endif 877 // Directly disassemble code buffer. 878 void decode(address start, address end); 879 880 // The following header contains architecture-specific implementations 881 #include CPU_HEADER(codeBuffer) 882 883 }; 884 885 // A Java method can have calls of Java methods which can be statically bound. 886 // Calls of Java methods need stubs to the interpreter. Calls sharing the same Java method 887 // can share a stub to the interpreter. 888 // A SharedStubToInterpRequest is a request for a shared stub to the interpreter. 889 class SharedStubToInterpRequest : public ResourceObj { 890 private: 891 ciMethod* _shared_method; 892 CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer 893 894 public: 895 SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method), 896 _call_offset(call_offset) {} 897 898 ciMethod* shared_method() const { return _shared_method; } 899 CodeBuffer::csize_t call_offset() const { return _call_offset; } 900 }; 901 902 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) { 903 if (remaining() < amount) { _outer->expand(this, amount); return true; } 904 return false; 905 } 906 907 #endif // SHARE_ASM_CODEBUFFER_HPP