1 /*
   2  * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classFileParser.hpp"
  27 #include "classfile/fieldLayoutBuilder.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "jvm.h"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/array.hpp"
  33 #include "oops/fieldStreams.inline.hpp"
  34 #include "oops/instanceMirrorKlass.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/klass.inline.hpp"
  37 #include "oops/inlineKlass.inline.hpp"
  38 #include "runtime/fieldDescriptor.inline.hpp"
  39 
  40 LayoutRawBlock::LayoutRawBlock(Kind kind, int size) :
  41   _next_block(nullptr),
  42   _prev_block(nullptr),
  43   _inline_klass(nullptr),
  44   _kind(kind),
  45   _offset(-1),
  46   _alignment(1),
  47   _size(size),
  48   _field_index(-1),
  49   _is_reference(false) {
  50   assert(kind == EMPTY || kind == RESERVED || kind == PADDING || kind == INHERITED,
  51          "Otherwise, should use the constructor with a field index argument");
  52   assert(size > 0, "Sanity check");
  53 }
  54 
  55 
  56 LayoutRawBlock::LayoutRawBlock(int index, Kind kind, int size, int alignment, bool is_reference) :
  57  _next_block(nullptr),
  58  _prev_block(nullptr),
  59  _inline_klass(nullptr),
  60  _kind(kind),
  61  _offset(-1),
  62  _alignment(alignment),
  63  _size(size),
  64  _field_index(index),
  65  _is_reference(is_reference) {
  66   assert(kind == REGULAR || kind == FLAT || kind == INHERITED,
  67          "Other kind do not have a field index");
  68   assert(size > 0, "Sanity check");
  69   assert(alignment > 0, "Sanity check");
  70 }
  71 
  72 bool LayoutRawBlock::fit(int size, int alignment) {
  73   int adjustment = 0;
  74   if ((_offset % alignment) != 0) {
  75     adjustment = alignment - (_offset % alignment);
  76   }
  77   return _size >= size + adjustment;
  78 }
  79 
  80 FieldGroup::FieldGroup(int contended_group) :
  81   _next(nullptr),
  82   _small_primitive_fields(nullptr),
  83   _big_primitive_fields(nullptr),
  84   _oop_fields(nullptr),
  85   _contended_group(contended_group),  // -1 means no contended group, 0 means default contended group
  86   _oop_count(0) {}
  87 
  88 void FieldGroup::add_primitive_field(int idx, BasicType type) {
  89   int size = type2aelembytes(type);
  90   LayoutRawBlock* block = new LayoutRawBlock(idx, LayoutRawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
  91   if (size >= oopSize) {
  92     add_to_big_primitive_list(block);
  93   } else {
  94     add_to_small_primitive_list(block);
  95   }
  96 }
  97 
  98 void FieldGroup::add_oop_field(int idx) {
  99   int size = type2aelembytes(T_OBJECT);
 100   LayoutRawBlock* block = new LayoutRawBlock(idx, LayoutRawBlock::REGULAR, size, size /* alignment == size for oops */, true);
 101   if (_oop_fields == nullptr) {
 102     _oop_fields = new GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
 103   }
 104   _oop_fields->append(block);
 105   _oop_count++;
 106 }
 107 
 108 void FieldGroup::add_flat_field(int idx, InlineKlass* vk) {
 109   LayoutRawBlock* block = new LayoutRawBlock(idx, LayoutRawBlock::FLAT, vk->get_exact_size_in_bytes(), vk->get_alignment(), false);
 110   block->set_inline_klass(vk);
 111   if (block->size() >= oopSize) {
 112     add_to_big_primitive_list(block);
 113   } else {
 114     add_to_small_primitive_list(block);
 115   }
 116 }
 117 
 118 void FieldGroup::sort_by_size() {
 119   if (_small_primitive_fields != nullptr) {
 120     _small_primitive_fields->sort(LayoutRawBlock::compare_size_inverted);
 121   }
 122   if (_big_primitive_fields != nullptr) {
 123     _big_primitive_fields->sort(LayoutRawBlock::compare_size_inverted);
 124   }
 125 }
 126 
 127 void FieldGroup::add_to_small_primitive_list(LayoutRawBlock* block) {
 128   if (_small_primitive_fields == nullptr) {
 129     _small_primitive_fields = new GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
 130   }
 131   _small_primitive_fields->append(block);
 132 }
 133 
 134 void FieldGroup::add_to_big_primitive_list(LayoutRawBlock* block) {
 135   if (_big_primitive_fields == nullptr) {
 136     _big_primitive_fields = new GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
 137   }
 138   _big_primitive_fields->append(block);
 139 }
 140 
 141 FieldLayout::FieldLayout(GrowableArray<FieldInfo>* field_info, ConstantPool* cp) :
 142   _field_info(field_info),
 143   _cp(cp),
 144   _blocks(nullptr),
 145   _start(_blocks),
 146   _last(_blocks) {}
 147 
 148 void FieldLayout::initialize_static_layout() {
 149   _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
 150   _blocks->set_offset(0);
 151   _last = _blocks;
 152   _start = _blocks;
 153   // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() could be zero, because
 154   // during bootstrapping, the size of the java.lang.Class is still not known when layout
 155   // of static field is computed. Field offsets are fixed later when the size is known
 156   // (see java_lang_Class::fixup_mirror())
 157   if (InstanceMirrorKlass::offset_of_static_fields() > 0) {
 158     insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
 159     _blocks->set_offset(0);
 160   }
 161 }
 162 
 163 void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klass) {
 164   if (super_klass == nullptr) {
 165     _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
 166     _blocks->set_offset(0);
 167     _last = _blocks;
 168     _start = _blocks;
 169     insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 170   } else {
 171     bool has_fields = reconstruct_layout(super_klass);
 172     fill_holes(super_klass);
 173     if ((UseEmptySlotsInSupers && !super_klass->has_contended_annotations()) || !has_fields) {
 174       _start = _blocks; // Setting _start to _blocks instead of _last would allow subclasses
 175       // to allocate fields in empty slots of their super classes
 176     } else {
 177       _start = _last;    // append fields at the end of the reconstructed layout
 178     }
 179   }
 180 }
 181 
 182 LayoutRawBlock* FieldLayout::first_field_block() {
 183   LayoutRawBlock* block = _blocks;
 184   while (block != nullptr
 185          && block->kind() != LayoutRawBlock::INHERITED
 186          && block->kind() != LayoutRawBlock::REGULAR
 187          && block->kind() != LayoutRawBlock::FLAT) {
 188     block = block->next_block();
 189   }
 190   return block;
 191 }
 192 
 193 // Insert a set of fields into a layout.
 194 // For each field, search for an empty slot able to fit the field
 195 // (satisfying both size and alignment requirements), if none is found,
 196 // add the field at the end of the layout.
 197 // Fields cannot be inserted before the block specified in the "start" argument
 198 void FieldLayout::add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
 199   if (list == nullptr) return;
 200   if (start == nullptr) start = this->_start;
 201   bool last_search_success = false;
 202   int last_size = 0;
 203   int last_alignment = 0;
 204   for (int i = 0; i < list->length(); i ++) {
 205     LayoutRawBlock* b = list->at(i);
 206     LayoutRawBlock* cursor = nullptr;
 207     LayoutRawBlock* candidate = nullptr;
 208     // if start is the last block, just append the field
 209     if (start == last_block()) {
 210       candidate = last_block();
 211     }
 212     // Before iterating over the layout to find an empty slot fitting the field's requirements,
 213     // check if the previous field had the same requirements and if the search for a fitting slot
 214     // was successful. If the requirements were the same but the search failed, a new search will
 215     // fail the same way, so just append the field at the of the layout.
 216     else  if (b->size() == last_size && b->alignment() == last_alignment && !last_search_success) {
 217       candidate = last_block();
 218     } else {
 219       // Iterate over the layout to find an empty slot fitting the field's requirements
 220       last_size = b->size();
 221       last_alignment = b->alignment();
 222       cursor = last_block()->prev_block();
 223       assert(cursor != nullptr, "Sanity check");
 224       last_search_success = true;
 225 
 226       while (cursor != start) {
 227         if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(b->size(), b->alignment())) {
 228           if (candidate == nullptr || cursor->size() < candidate->size()) {
 229             candidate = cursor;
 230           }
 231         }
 232         cursor = cursor->prev_block();
 233       }
 234       if (candidate == nullptr) {
 235         candidate = last_block();
 236         last_search_success = false;
 237       }
 238       assert(candidate != nullptr, "Candidate must not be null");
 239       assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
 240       assert(candidate->fit(b->size(), b->alignment()), "Candidate must be able to store the block");
 241     }
 242     insert_field_block(candidate, b);
 243   }
 244 }
 245 
 246 // Used for classes with hard coded field offsets, insert a field at the specified offset */
 247 void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutRawBlock* start) {
 248   assert(block != nullptr, "Sanity check");
 249   block->set_offset(offset);
 250   if (start == nullptr) {
 251     start = this->_start;
 252   }
 253   LayoutRawBlock* slot = start;
 254   while (slot != nullptr) {
 255     if ((slot->offset() <= block->offset() && (slot->offset() + slot->size()) > block->offset()) ||
 256         slot == _last){
 257       assert(slot->kind() == LayoutRawBlock::EMPTY, "Matching slot must be an empty slot");
 258       assert(slot->size() >= block->offset() + block->size() ,"Matching slot must be big enough");
 259       if (slot->offset() < block->offset()) {
 260         int adjustment = block->offset() - slot->offset();
 261         LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
 262         insert(slot, adj);
 263       }
 264       insert(slot, block);
 265       if (slot->size() == 0) {
 266         remove(slot);
 267       }
 268       _field_info->adr_at(block->field_index())->set_offset(block->offset());
 269       return;
 270     }
 271     slot = slot->next_block();
 272   }
 273   fatal("Should have found a matching slot above, corrupted layout or invalid offset");
 274 }
 275 
 276 // The allocation logic uses a best fit strategy: the set of fields is allocated
 277 // in the first empty slot big enough to contain the whole set ((including padding
 278 // to fit alignment constraints).
 279 void FieldLayout::add_contiguously(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
 280   if (list == nullptr) return;
 281   if (start == nullptr) {
 282     start = _start;
 283   }
 284   // This code assumes that if the first block is well aligned, the following
 285   // blocks would naturally be well aligned (no need for adjustment)
 286   int size = 0;
 287   for (int i = 0; i < list->length(); i++) {
 288     size += list->at(i)->size();
 289   }
 290 
 291   LayoutRawBlock* candidate = nullptr;
 292   if (start == last_block()) {
 293     candidate = last_block();
 294   } else {
 295     LayoutRawBlock* first = list->at(0);
 296     candidate = last_block()->prev_block();
 297     while (candidate->kind() != LayoutRawBlock::EMPTY || !candidate->fit(size, first->alignment())) {
 298       if (candidate == start) {
 299         candidate = last_block();
 300         break;
 301       }
 302       candidate = candidate->prev_block();
 303     }
 304     assert(candidate != nullptr, "Candidate must not be null");
 305     assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
 306     assert(candidate->fit(size, first->alignment()), "Candidate must be able to store the whole contiguous block");
 307   }
 308 
 309   for (int i = 0; i < list->length(); i++) {
 310     LayoutRawBlock* b = list->at(i);
 311     insert_field_block(candidate, b);
 312     assert((candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
 313   }
 314 }
 315 
 316 LayoutRawBlock* FieldLayout::insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block) {
 317   assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 318   if (slot->offset() % block->alignment() != 0) {
 319     int adjustment = block->alignment() - (slot->offset() % block->alignment());
 320     LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
 321     insert(slot, adj);
 322   }
 323   insert(slot, block);
 324   if (slot->size() == 0) {
 325     remove(slot);
 326   }
 327   _field_info->adr_at(block->field_index())->set_offset(block->offset());
 328   return block;
 329 }
 330 
 331 bool FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
 332   bool has_instance_fields = false;
 333   GrowableArray<LayoutRawBlock*>* all_fields = new GrowableArray<LayoutRawBlock*>(32);
 334   while (ik != nullptr) {
 335     for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
 336       BasicType type = Signature::basic_type(fs.signature());
 337       // distinction between static and non-static fields is missing
 338       if (fs.access_flags().is_static()) continue;
 339       has_instance_fields = true;
 340       LayoutRawBlock* block;
 341       if (fs.field_flags().is_null_free_inline_type()) {
 342         InlineKlass* vk = InlineKlass::cast(ik->get_inline_type_field_klass(fs.index()));
 343         block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, vk->get_exact_size_in_bytes(),
 344                                    vk->get_alignment(), false);
 345 
 346       } else {
 347         int size = type2aelembytes(type);
 348         // INHERITED blocks are marked as non-reference because oop_maps are handled by their holder class
 349         block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, size, size, false);
 350       }
 351       block->set_offset(fs.offset());
 352       all_fields->append(block);
 353     }
 354     ik = ik->super() == nullptr ? nullptr : InstanceKlass::cast(ik->super());
 355   }
 356   all_fields->sort(LayoutRawBlock::compare_offset);
 357   _blocks = new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
 358   _blocks->set_offset(0);
 359   _last = _blocks;
 360   for(int i = 0; i < all_fields->length(); i++) {
 361     LayoutRawBlock* b = all_fields->at(i);
 362     _last->set_next_block(b);
 363     b->set_prev_block(_last);
 364     _last = b;
 365   }
 366   _start = _blocks;
 367   return has_instance_fields;
 368 }
 369 
 370 // Called during the reconstruction of a layout, after fields from super
 371 // classes have been inserted. It fills unused slots between inserted fields
 372 // with EMPTY blocks, so the regular field insertion methods would work.
 373 // This method handles classes with @Contended annotations differently
 374 // by inserting PADDING blocks instead of EMPTY block to prevent subclasses'
 375 // fields to interfere with contended fields/classes.
 376 void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
 377   assert(_blocks != nullptr, "Sanity check");
 378   assert(_blocks->offset() == 0, "first block must be at offset zero");
 379   LayoutRawBlock::Kind filling_type = super_klass->has_contended_annotations() ? LayoutRawBlock::PADDING: LayoutRawBlock::EMPTY;
 380   LayoutRawBlock* b = _blocks;
 381   while (b->next_block() != nullptr) {
 382     if (b->next_block()->offset() > (b->offset() + b->size())) {
 383       int size = b->next_block()->offset() - (b->offset() + b->size());
 384       LayoutRawBlock* empty = new LayoutRawBlock(filling_type, size);
 385       empty->set_offset(b->offset() + b->size());
 386       empty->set_next_block(b->next_block());
 387       b->next_block()->set_prev_block(empty);
 388       b->set_next_block(empty);
 389       empty->set_prev_block(b);
 390     }
 391     b = b->next_block();
 392   }
 393   assert(b->next_block() == nullptr, "Invariant at this point");
 394   assert(b->kind() != LayoutRawBlock::EMPTY, "Sanity check");
 395   // If the super class has @Contended annotation, a padding block is
 396   // inserted at the end to ensure that fields from the subclasses won't share
 397   // the cache line of the last field of the contended class
 398   if (super_klass->has_contended_annotations() && ContendedPaddingWidth > 0) {
 399     LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
 400     p->set_offset(b->offset() + b->size());
 401     b->set_next_block(p);
 402     p->set_prev_block(b);
 403     b = p;
 404   }
 405   if (!UseEmptySlotsInSupers) {
 406     // Add an empty slots to align fields of the subclass on a heapOopSize boundary
 407     // in order to emulate the behavior of the previous algorithm
 408     int align = (b->offset() + b->size()) % heapOopSize;
 409     if (align != 0) {
 410       int sz = heapOopSize - align;
 411       LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::EMPTY, sz);
 412       p->set_offset(b->offset() + b->size());
 413       b->set_next_block(p);
 414       p->set_prev_block(b);
 415       b = p;
 416     }
 417   }
 418   LayoutRawBlock* last = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
 419   last->set_offset(b->offset() + b->size());
 420   assert(last->offset() > 0, "Sanity check");
 421   b->set_next_block(last);
 422   last->set_prev_block(b);
 423   _last = last;
 424 }
 425 
 426 LayoutRawBlock* FieldLayout::insert(LayoutRawBlock* slot, LayoutRawBlock* block) {
 427   assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 428   assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
 429   block->set_offset(slot->offset());
 430   slot->set_offset(slot->offset() + block->size());
 431   assert((slot->size() - block->size()) < slot->size(), "underflow checking");
 432   assert(slot->size() - block->size() >= 0, "no negative size allowed");
 433   slot->set_size(slot->size() - block->size());
 434   block->set_prev_block(slot->prev_block());
 435   block->set_next_block(slot);
 436   slot->set_prev_block(block);
 437   if (block->prev_block() != nullptr) {
 438     block->prev_block()->set_next_block(block);
 439   }
 440   if (_blocks == slot) {
 441     _blocks = block;
 442   }
 443   return block;
 444 }
 445 
 446 void FieldLayout::remove(LayoutRawBlock* block) {
 447   assert(block != nullptr, "Sanity check");
 448   assert(block != _last, "Sanity check");
 449   if (_blocks == block) {
 450     _blocks = block->next_block();
 451     if (_blocks != nullptr) {
 452       _blocks->set_prev_block(nullptr);
 453     }
 454   } else {
 455     assert(block->prev_block() != nullptr, "_prev should be set for non-head blocks");
 456     block->prev_block()->set_next_block(block->next_block());
 457     block->next_block()->set_prev_block(block->prev_block());
 458   }
 459   if (block == _start) {
 460     _start = block->prev_block();
 461   }
 462 }
 463 
 464 void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlass* super) {
 465   ResourceMark rm;
 466   LayoutRawBlock* b = _blocks;
 467   while(b != _last) {
 468     switch(b->kind()) {
 469       case LayoutRawBlock::REGULAR: {
 470         FieldInfo* fi = _field_info->adr_at(b->field_index());
 471         output->print_cr(" @%d \"%s\" %s %d/%d %s",
 472                          b->offset(),
 473                          fi->name(_cp)->as_C_string(),
 474                          fi->signature(_cp)->as_C_string(),
 475                          b->size(),
 476                          b->alignment(),
 477                          "REGULAR");
 478         break;
 479       }
 480       case LayoutRawBlock::FLAT: {
 481         FieldInfo* fi = _field_info->adr_at(b->field_index());
 482         output->print_cr(" @%d \"%s\" %s %d/%d %s",
 483                          b->offset(),
 484                          fi->name(_cp)->as_C_string(),
 485                          fi->signature(_cp)->as_C_string(),
 486                          b->size(),
 487                          b->alignment(),
 488                          "FLAT");
 489         break;
 490       }
 491       case LayoutRawBlock::RESERVED: {
 492         output->print_cr(" @%d %d/- %s",
 493                          b->offset(),
 494                          b->size(),
 495                          "RESERVED");
 496         break;
 497       }
 498       case LayoutRawBlock::INHERITED: {
 499         assert(!is_static, "Static fields are not inherited in layouts");
 500         assert(super != nullptr, "super klass must be provided to retrieve inherited fields info");
 501         bool found = false;
 502         const InstanceKlass* ik = super;
 503         while (!found && ik != nullptr) {
 504           for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
 505             if (fs.offset() == b->offset()) {
 506               output->print_cr(" @%d \"%s\" %s %d/%d %s",
 507                   b->offset(),
 508                   fs.name()->as_C_string(),
 509                   fs.signature()->as_C_string(),
 510                   b->size(),
 511                   b->size(), // so far, alignment constraint == size, will change with Valhalla
 512                   "INHERITED");
 513               found = true;
 514               break;
 515             }
 516         }
 517         ik = ik->java_super();
 518       }
 519       break;
 520     }
 521     case LayoutRawBlock::EMPTY:
 522       output->print_cr(" @%d %d/1 %s",
 523                        b->offset(),
 524                        b->size(),
 525                        "EMPTY");
 526       break;
 527     case LayoutRawBlock::PADDING:
 528       output->print_cr(" @%d %d/1 %s",
 529                        b->offset(),
 530                        b->size(),
 531                        "PADDING");
 532       break;
 533     }
 534     b = b->next_block();
 535   }
 536 }
 537 
 538 FieldLayoutBuilder::FieldLayoutBuilder(const Symbol* classname, const InstanceKlass* super_klass, ConstantPool* constant_pool,
 539                                        GrowableArray<FieldInfo>* field_info, bool is_contended, bool is_inline_type,
 540                                        FieldLayoutInfo* info, Array<InlineKlass*>* inline_type_field_klasses) :
 541   _classname(classname),
 542   _super_klass(super_klass),
 543   _constant_pool(constant_pool),
 544   _field_info(field_info),
 545   _info(info),
 546   _inline_type_field_klasses(inline_type_field_klasses),
 547   _root_group(nullptr),
 548   _contended_groups(GrowableArray<FieldGroup*>(8)),
 549   _static_fields(nullptr),
 550   _layout(nullptr),
 551   _static_layout(nullptr),
 552   _nonstatic_oopmap_count(0),
 553   _alignment(-1),
 554   _first_field_offset(-1),
 555   _exact_size_in_bytes(-1),
 556   _atomic_field_count(0),
 557   _fields_size_sum(0),
 558   _has_nonstatic_fields(false),
 559   _has_inline_type_fields(false),
 560   _is_contended(is_contended),
 561   _is_inline_type(is_inline_type),
 562   _has_flattening_information(is_inline_type),
 563   _has_nonatomic_values(false),
 564   _nullable_atomic_flat_candidate(false)
 565  {}
 566 
 567 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
 568   assert(g > 0, "must only be called for named contended groups");
 569   FieldGroup* fg = nullptr;
 570   for (int i = 0; i < _contended_groups.length(); i++) {
 571     fg = _contended_groups.at(i);
 572     if (fg->contended_group() == g) return fg;
 573   }
 574   fg = new FieldGroup(g);
 575   _contended_groups.append(fg);
 576   return fg;
 577 }
 578 
 579 void FieldLayoutBuilder::prologue() {
 580   _layout = new FieldLayout(_field_info, _constant_pool);
 581   const InstanceKlass* super_klass = _super_klass;
 582   _layout->initialize_instance_layout(super_klass);
 583   if (super_klass != nullptr) {
 584     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 585   }
 586   _static_layout = new FieldLayout(_field_info, _constant_pool);
 587   _static_layout->initialize_static_layout();
 588   _static_fields = new FieldGroup();
 589   _root_group = new FieldGroup();
 590 }
 591 
 592 // Field sorting for regular (non-inline) classes:
 593 //   - fields are sorted in static and non-static fields
 594 //   - non-static fields are also sorted according to their contention group
 595 //     (support of the @Contended annotation)
 596 //   - @Contended annotation is ignored for static fields
 597 //   - field flattening decisions are taken in this method
 598 void FieldLayoutBuilder::regular_field_sorting(TRAPS) {
 599   int idx = 0;
 600   for (GrowableArrayIterator<FieldInfo> it = _field_info->begin(); it != _field_info->end(); ++it, ++idx) {
 601     FieldInfo ctrl = _field_info->at(0);
 602     FieldGroup* group = nullptr;
 603     FieldInfo fieldinfo = *it;
 604     if (fieldinfo.access_flags().is_static()) {
 605       group = _static_fields;
 606     } else {
 607       _has_nonstatic_fields = true;
 608       _atomic_field_count++;  // we might decrement this
 609       if (fieldinfo.field_flags().is_contended()) {
 610         int g = fieldinfo.contended_group();
 611         if (g == 0) {
 612           group = new FieldGroup(true);
 613           _contended_groups.append(group);
 614         } else {
 615           group = get_or_create_contended_group(g);
 616         }
 617       } else {
 618         group = _root_group;
 619       }
 620     }
 621     assert(group != nullptr, "invariant");
 622     BasicType type = Signature::basic_type(fieldinfo.signature(_constant_pool));
 623     switch(type) {
 624     case T_BYTE:
 625     case T_CHAR:
 626     case T_DOUBLE:
 627     case T_FLOAT:
 628     case T_INT:
 629     case T_LONG:
 630     case T_SHORT:
 631     case T_BOOLEAN:
 632       group->add_primitive_field(idx, type);
 633       break;
 634     case T_OBJECT:
 635     case T_ARRAY:
 636       if (!fieldinfo.field_flags().is_null_free_inline_type()) {
 637         if (group != _static_fields) _nonstatic_oopmap_count++;
 638         group->add_oop_field(idx);
 639       } else {
 640         assert(type != T_ARRAY, "null free ptr to array not supported");
 641         _has_inline_type_fields = true;
 642         if (group == _static_fields) {
 643           // static fields are never flat
 644           group->add_oop_field(idx);
 645         } else {
 646           // Check below is performed for non-static fields, it should be performed for static fields too but at this stage,
 647           // it is not guaranteed that the klass of the static field has been loaded, so the test for static fields is delayed
 648           // until the linking phase
 649           Klass* klass =  _inline_type_field_klasses->at(idx);
 650           assert(klass != nullptr, "Sanity check");
 651           InlineKlass* vk = InlineKlass::cast(klass);
 652           assert(vk->is_implicitly_constructible(), "must be, should have been checked in post_process_parsed_stream()");
 653           _has_flattening_information = true;
 654           // Flattening decision to be taken here
 655           // This code assumes all verification already have been performed
 656           // (field's type has been loaded and it is an inline klass)
 657           bool too_big_to_flatten = (InlineFieldMaxFlatSize >= 0 &&
 658                                     (vk->size_helper() * HeapWordSize) > InlineFieldMaxFlatSize);
 659           bool too_atomic_to_flatten = vk->must_be_atomic() || AlwaysAtomicAccesses;
 660           bool too_volatile_to_flatten = fieldinfo.access_flags().is_volatile();
 661           if (vk->is_naturally_atomic()) {
 662             too_atomic_to_flatten = false;
 663             //too_volatile_to_flatten = false; //FIXME
 664             // Currently, volatile fields are never flat, this could change in the future
 665           }
 666           if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) {
 667             group->add_flat_field(idx, vk);
 668             _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 669             _field_info->adr_at(idx)->field_flags_addr()->update_flat(true);
 670             if (!vk->is_atomic()) {  // flat and non-atomic: take note
 671               _has_nonatomic_values = true;
 672               _atomic_field_count--;  // every other field is atomic but this one
 673             }
 674           } else {
 675             _nonstatic_oopmap_count++;
 676             group->add_oop_field(idx);
 677           }
 678         }
 679       }
 680       break;
 681     default:
 682       fatal("Something wrong?");
 683     }
 684   }
 685   _root_group->sort_by_size();
 686   _static_fields->sort_by_size();
 687   if (!_contended_groups.is_empty()) {
 688     for (int i = 0; i < _contended_groups.length(); i++) {
 689       _contended_groups.at(i)->sort_by_size();
 690     }
 691   }
 692 }
 693 
 694 /* Field sorting for inline classes:
 695  *   - because inline classes are immutable, the @Contended annotation is ignored
 696  *     when computing their layout (with only read operation, there's no false
 697  *     sharing issue)
 698  *   - this method also records the alignment of the field with the most
 699  *     constraining alignment, this value is then used as the alignment
 700  *     constraint when flattening this inline type into another container
 701  *   - field flattening decisions are taken in this method (those decisions are
 702  *     currently only based in the size of the fields to be flattened, the size
 703  *     of the resulting instance is not considered)
 704  */
 705 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 706   assert(_is_inline_type, "Should only be used for inline classes");
 707   int alignment = 1;
 708   for (GrowableArrayIterator<FieldInfo> it = _field_info->begin(); it != _field_info->end(); ++it) {
 709     FieldGroup* group = nullptr;
 710     FieldInfo fieldinfo = *it;
 711     int field_alignment = 1;
 712     if (fieldinfo.access_flags().is_static()) {
 713       group = _static_fields;
 714     } else {
 715       _has_nonstatic_fields = true;
 716       _atomic_field_count++;  // we might decrement this
 717       group = _root_group;
 718     }
 719     assert(group != nullptr, "invariant");
 720     BasicType type = Signature::basic_type(fieldinfo.signature(_constant_pool));
 721     switch(type) {
 722     case T_BYTE:
 723     case T_CHAR:
 724     case T_DOUBLE:
 725     case T_FLOAT:
 726     case T_INT:
 727     case T_LONG:
 728     case T_SHORT:
 729     case T_BOOLEAN:
 730       if (group != _static_fields) {
 731         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 732       }
 733       group->add_primitive_field(fieldinfo.index(), type);
 734       break;
 735     case T_OBJECT:
 736     case T_ARRAY:
 737       if (!fieldinfo.field_flags().is_null_free_inline_type()) {
 738         if (group != _static_fields) {
 739           _nonstatic_oopmap_count++;
 740           field_alignment = type2aelembytes(type); // alignment == size for oops
 741         }
 742         group->add_oop_field(fieldinfo.index());
 743       } else {
 744         assert(type != T_ARRAY, "null free ptr to array not supported");
 745         _has_inline_type_fields = true;
 746         if (group == _static_fields) {
 747           // static fields are never flat
 748           group->add_oop_field(fieldinfo.index());
 749         } else {
 750           // Check below is performed for non-static fields, it should be performed for static fields too but at this stage,
 751           // it is not guaranteed that the klass of the static field has been loaded, so the test for static fields is delayed
 752           // until the linking phase
 753           Klass* klass =  _inline_type_field_klasses->at(fieldinfo.index());
 754           assert(klass != nullptr, "Sanity check");
 755           InlineKlass* vk = InlineKlass::cast(klass);
 756           assert(vk->is_implicitly_constructible(), "must be, should have been checked in post_process_parsed_stream()");
 757           // Flattening decision to be taken here
 758           // This code assumes all verifications have already been performed
 759           // (field's type has been loaded and it is an inline klass)
 760           bool too_big_to_flatten = (InlineFieldMaxFlatSize >= 0 &&
 761                                     (vk->size_helper() * HeapWordSize) > InlineFieldMaxFlatSize);
 762           bool too_atomic_to_flatten = vk->must_be_atomic() || AlwaysAtomicAccesses;
 763           bool too_volatile_to_flatten = fieldinfo.access_flags().is_volatile();
 764           if (vk->is_naturally_atomic()) {
 765             too_atomic_to_flatten = false;
 766             //too_volatile_to_flatten = false; //FIXME
 767             // Currently, volatile fields are never flat, this could change in the future
 768           }
 769           if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) {
 770             group->add_flat_field(fieldinfo.index(), vk);
 771             _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 772             field_alignment = vk->get_alignment();
 773             _field_info->adr_at(fieldinfo.index())->field_flags_addr()->update_flat(true);
 774             if (!vk->is_atomic()) {  // flat and non-atomic: take note
 775               _has_nonatomic_values = true;
 776               _atomic_field_count--;  // every other field is atomic but this one
 777             }
 778           } else {
 779             _nonstatic_oopmap_count++;
 780             field_alignment = type2aelembytes(T_OBJECT);
 781             group->add_oop_field(fieldinfo.index());
 782           }
 783         }
 784       }
 785       break;
 786     default:
 787       fatal("Unexpected BasicType");
 788     }
 789     if (!fieldinfo.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 790   }
 791   _alignment = alignment;
 792   if (!_has_nonstatic_fields) {
 793     // There are a number of fixes required throughout the type system and JIT
 794     Exceptions::fthrow(THREAD_AND_LOCATION,
 795                        vmSymbols::java_lang_ClassFormatError(),
 796                        "Value Types do not support zero instance size yet");
 797     return;
 798   }
 799 }
 800 
 801 void FieldLayoutBuilder::insert_contended_padding(LayoutRawBlock* slot) {
 802   if (ContendedPaddingWidth > 0) {
 803     LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
 804     _layout->insert(slot, padding);
 805   }
 806 }
 807 
 808 /* Computation of regular classes layout is an evolution of the previous default layout
 809  * (FieldAllocationStyle 1):
 810  *   - primitive fields (both primitive types and flat inline types) are allocated
 811  *     first, from the biggest to the smallest
 812  *   - then oop fields are allocated (to increase chances to have contiguous oops and
 813  *     a simpler oopmap).
 814  */
 815 void FieldLayoutBuilder::compute_regular_layout(TRAPS) {
 816   bool need_tail_padding = false;
 817   prologue();
 818   regular_field_sorting(CHECK);
 819   if (_is_contended) {
 820     _layout->set_start(_layout->last_block());
 821     // insertion is currently easy because the current strategy doesn't try to fill holes
 822     // in super classes layouts => the _start block is by consequence the _last_block
 823     insert_contended_padding(_layout->start());
 824     need_tail_padding = true;
 825   }
 826   _layout->add(_root_group->big_primitive_fields());
 827   _layout->add(_root_group->small_primitive_fields());
 828   _layout->add(_root_group->oop_fields());
 829 
 830   if (!_contended_groups.is_empty()) {
 831     for (int i = 0; i < _contended_groups.length(); i++) {
 832       FieldGroup* cg = _contended_groups.at(i);
 833       LayoutRawBlock* start = _layout->last_block();
 834       insert_contended_padding(start);
 835       _layout->add(cg->big_primitive_fields());
 836       _layout->add(cg->small_primitive_fields(), start);
 837       _layout->add(cg->oop_fields(), start);
 838       need_tail_padding = true;
 839     }
 840   }
 841 
 842   if (need_tail_padding) {
 843     insert_contended_padding(_layout->last_block());
 844   }
 845   // Warning: IntanceMirrorKlass expects static oops to be allocated first
 846   _static_layout->add_contiguously(_static_fields->oop_fields());
 847   _static_layout->add(_static_fields->big_primitive_fields());
 848   _static_layout->add(_static_fields->small_primitive_fields());
 849 
 850   epilogue();
 851 }
 852 
 853 /* Computation of inline classes has a slightly different strategy than for
 854  * regular classes. Regular classes have their oop fields allocated at the end
 855  * of the layout to increase GC performances. Unfortunately, this strategy
 856  * increases the number of empty slots inside an instance. Because the purpose
 857  * of inline classes is to be embedded into other containers, it is critical
 858  * to keep their size as small as possible. For this reason, the allocation
 859  * strategy is:
 860  *   - big primitive fields (primitive types and flat inline type smaller
 861  *     than an oop) are allocated first (from the biggest to the smallest)
 862  *   - then oop fields
 863  *   - then small primitive fields (from the biggest to the smallest)
 864  */
 865 void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) {
 866   prologue();
 867   inline_class_field_sorting(CHECK);
 868   // Inline types are not polymorphic, so they cannot inherit fields.
 869   // By consequence, at this stage, the layout must be composed of a RESERVED
 870   // block, followed by an EMPTY block.
 871   assert(_layout->start()->kind() == LayoutRawBlock::RESERVED, "Unexpected");
 872   assert(_layout->start()->next_block()->kind() == LayoutRawBlock::EMPTY, "Unexpected");
 873   LayoutRawBlock* first_empty = _layout->start()->next_block();
 874   if (first_empty->offset() % _alignment != 0) {
 875     LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, _alignment - (first_empty->offset() % _alignment));
 876     _layout->insert(first_empty, padding);
 877     _layout->set_start(padding->next_block());
 878   }
 879 
 880   _layout->add(_root_group->big_primitive_fields());
 881   _layout->add(_root_group->oop_fields());
 882   _layout->add(_root_group->small_primitive_fields());
 883 
 884   LayoutRawBlock* first_field = _layout->first_field_block();
 885    if (first_field != nullptr) {
 886      _first_field_offset = _layout->first_field_block()->offset();
 887      _exact_size_in_bytes = _layout->last_block()->offset() - _layout->first_field_block()->offset();
 888    } else {
 889      // special case for empty value types
 890      _first_field_offset = _layout->blocks()->size();
 891      _exact_size_in_bytes = 0;
 892    }
 893   _exact_size_in_bytes = _layout->last_block()->offset() - _layout->first_field_block()->offset();
 894 
 895   // Warning:: InstanceMirrorKlass expects static oops to be allocated first
 896   _static_layout->add_contiguously(_static_fields->oop_fields());
 897   _static_layout->add(_static_fields->big_primitive_fields());
 898   _static_layout->add(_static_fields->small_primitive_fields());
 899 
 900   epilogue();
 901 }
 902 
 903 void FieldLayoutBuilder::add_flat_field_oopmap(OopMapBlocksBuilder* nonstatic_oop_maps,
 904                 InlineKlass* vklass, int offset) {
 905   int diff = offset - vklass->first_field_offset();
 906   const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
 907   const OopMapBlock* last_map = map + vklass->nonstatic_oop_map_count();
 908   while (map < last_map) {
 909     nonstatic_oop_maps->add(map->offset() + diff, map->count());
 910     map++;
 911   }
 912 }
 913 
 914 void FieldLayoutBuilder::register_embedded_oops_from_list(OopMapBlocksBuilder* nonstatic_oop_maps, GrowableArray<LayoutRawBlock*>* list) {
 915   if (list != nullptr) {
 916     for (int i = 0; i < list->length(); i++) {
 917       LayoutRawBlock* f = list->at(i);
 918       if (f->kind() == LayoutRawBlock::FLAT) {
 919         InlineKlass* vk = f->inline_klass();
 920         assert(vk != nullptr, "Should have been initialized");
 921         if (vk->contains_oops()) {
 922           add_flat_field_oopmap(nonstatic_oop_maps, vk, f->offset());
 923         }
 924       }
 925     }
 926   }
 927 }
 928 
 929 void FieldLayoutBuilder::register_embedded_oops(OopMapBlocksBuilder* nonstatic_oop_maps, FieldGroup* group) {
 930   if (group->oop_fields() != nullptr) {
 931     for (int i = 0; i < group->oop_fields()->length(); i++) {
 932       LayoutRawBlock* b = group->oop_fields()->at(i);
 933       nonstatic_oop_maps->add(b->offset(), 1);
 934     }
 935   }
 936   register_embedded_oops_from_list(nonstatic_oop_maps, group->big_primitive_fields());
 937   register_embedded_oops_from_list(nonstatic_oop_maps, group->small_primitive_fields());
 938 }
 939 
 940 void FieldLayoutBuilder::epilogue() {
 941   // Computing oopmaps
 942   int super_oop_map_count = (_super_klass == nullptr) ? 0 :_super_klass->nonstatic_oop_map_count();
 943   int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;
 944   OopMapBlocksBuilder* nonstatic_oop_maps =
 945       new OopMapBlocksBuilder(max_oop_map_count);
 946   if (super_oop_map_count > 0) {
 947     nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
 948     _super_klass->nonstatic_oop_map_count());
 949   }
 950   register_embedded_oops(nonstatic_oop_maps, _root_group);
 951   if (!_contended_groups.is_empty()) {
 952     for (int i = 0; i < _contended_groups.length(); i++) {
 953       FieldGroup* cg = _contended_groups.at(i);
 954       if (cg->oop_count() > 0) {
 955         assert(cg->oop_fields() != nullptr && cg->oop_fields()->at(0) != nullptr, "oop_count > 0 but no oop fields found");
 956         register_embedded_oops(nonstatic_oop_maps, cg);
 957       }
 958     }
 959   }
 960   nonstatic_oop_maps->compact();
 961 
 962   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 963   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 964   int static_fields_size = (static_fields_end -
 965       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
 966   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
 967 
 968   // Pass back information needed for InstanceKlass creation
 969 
 970   _info->oop_map_blocks = nonstatic_oop_maps;
 971   _info->_instance_size = align_object_size(instance_end / wordSize);
 972   _info->_static_field_size = static_fields_size;
 973   _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
 974   _info->_has_nonstatic_fields = _has_nonstatic_fields;
 975   _info->_has_inline_fields = _has_inline_type_fields;
 976 
 977   // An inline type is naturally atomic if it has just one field, and
 978   // that field is simple enough.
 979   _info->_is_naturally_atomic = (_is_inline_type &&
 980                                  (_atomic_field_count <= 1) &&
 981                                  !_has_nonatomic_values &&
 982                                  _contended_groups.is_empty());
 983   // This may be too restrictive, since if all the fields fit in 64
 984   // bits we could make the decision to align instances of this class
 985   // to 64-bit boundaries, and load and store them as single words.
 986   // And on machines which supported larger atomics we could similarly
 987   // allow larger values to be atomic, if properly aligned.
 988 
 989 
 990   if (PrintFieldLayout || (PrintInlineLayout && _has_flattening_information)) {
 991     ResourceMark rm;
 992     tty->print_cr("Layout of class %s", _classname->as_C_string());
 993     tty->print_cr("Instance fields:");
 994     _layout->print(tty, false, _super_klass);
 995     tty->print_cr("Static fields:");
 996     _static_layout->print(tty, true, nullptr);
 997     tty->print_cr("Instance size = %d bytes", _info->_instance_size * wordSize);
 998     if (_is_inline_type) {
 999       tty->print_cr("First field offset = %d", _first_field_offset);
1000       tty->print_cr("Alignment = %d bytes", _alignment);
1001       tty->print_cr("Exact size = %d bytes", _exact_size_in_bytes);
1002     }
1003     tty->print_cr("---");
1004   }
1005 }
1006 
1007 void FieldLayoutBuilder::build_layout(TRAPS) {
1008   if (_is_inline_type) {
1009     compute_inline_class_layout(CHECK);
1010   } else {
1011     compute_regular_layout(CHECK);
1012   }
1013 }