1 /*
  2  * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvm.h"
 27 #include "classfile/classFileParser.hpp"
 28 #include "classfile/fieldLayoutBuilder.hpp"
 29 #include "classfile/systemDictionary.hpp"
 30 #include "classfile/vmSymbols.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/array.hpp"
 33 #include "oops/fieldStreams.inline.hpp"
 34 #include "oops/instanceMirrorKlass.hpp"
 35 #include "oops/instanceKlass.inline.hpp"
 36 #include "oops/klass.inline.hpp"
 37 #include "oops/inlineKlass.inline.hpp"
 38 #include "runtime/fieldDescriptor.inline.hpp"
 39 

 40 LayoutRawBlock::LayoutRawBlock(Kind kind, int size) :
 41   _next_block(NULL),
 42   _prev_block(NULL),
 43   _inline_klass(NULL),
 44   _kind(kind),
 45   _offset(-1),
 46   _alignment(1),
 47   _size(size),
 48   _field_index(-1),
 49   _is_reference(false) {
 50   assert(kind == EMPTY || kind == RESERVED || kind == PADDING || kind == INHERITED,
 51          "Otherwise, should use the constructor with a field index argument");
 52   assert(size > 0, "Sanity check");
 53 }
 54 
 55 
 56 LayoutRawBlock::LayoutRawBlock(int index, Kind kind, int size, int alignment, bool is_reference) :
 57  _next_block(NULL),
 58  _prev_block(NULL),
 59  _inline_klass(NULL),
 60  _kind(kind),
 61  _offset(-1),
 62  _alignment(alignment),
 63  _size(size),
 64  _field_index(index),
 65  _is_reference(is_reference) {
 66   assert(kind == REGULAR || kind == INLINED || kind == INHERITED,
 67          "Other kind do not have a field index");
 68   assert(size > 0, "Sanity check");
 69   assert(alignment > 0, "Sanity check");
 70 }
 71 
 72 bool LayoutRawBlock::fit(int size, int alignment) {
 73   int adjustment = 0;
 74   if ((_offset % alignment) != 0) {
 75     adjustment = alignment - (_offset % alignment);
 76   }
 77   return _size >= size + adjustment;
 78 }
 79 
 80 FieldGroup::FieldGroup(int contended_group) :
 81   _next(NULL),
 82   _small_primitive_fields(NULL),
 83   _big_primitive_fields(NULL),
 84   _oop_fields(NULL),
 85   _contended_group(contended_group),  // -1 means no contended group, 0 means default contended group
 86   _oop_count(0) {}
 87 
 88 void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) {
 89   int size = type2aelembytes(type);
 90   LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
 91   if (size >= oopSize) {
 92     add_to_big_primitive_list(block);
 93   } else {
 94     add_to_small_primitive_list(block);
 95   }

 96 }
 97 
 98 void FieldGroup::add_oop_field(AllFieldStream fs) {
 99   int size = type2aelembytes(T_OBJECT);
100   LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::REGULAR, size, size /* alignment == size for oops */, true);
101   if (_oop_fields == NULL) {
102     _oop_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
103   }
104   _oop_fields->append(block);
105   _oop_count++;
106 }
107 
108 void FieldGroup::add_inlined_field(AllFieldStream fs, InlineKlass* vk) {
109   LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INLINED, vk->get_exact_size_in_bytes(), vk->get_alignment(), false);
110   block->set_inline_klass(vk);
111   if (block->size() >= oopSize) {
112     add_to_big_primitive_list(block);
113   } else {
114     add_to_small_primitive_list(block);
115   }
116 }
117 
118 void FieldGroup::sort_by_size() {
119   if (_small_primitive_fields != NULL) {
120     _small_primitive_fields->sort(LayoutRawBlock::compare_size_inverted);
121   }
122   if (_big_primitive_fields != NULL) {
123     _big_primitive_fields->sort(LayoutRawBlock::compare_size_inverted);
124   }
125 }
126 
127 void FieldGroup::add_to_small_primitive_list(LayoutRawBlock* block) {
128   if (_small_primitive_fields == NULL) {
129     _small_primitive_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
130   }
131   _small_primitive_fields->append(block);
132 }
133 
134 void FieldGroup::add_to_big_primitive_list(LayoutRawBlock* block) {
135   if (_big_primitive_fields == NULL) {
136     _big_primitive_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
137   }
138   _big_primitive_fields->append(block);
139 }
140 
141 FieldLayout::FieldLayout(Array<u2>* fields, ConstantPool* cp) :
142   _fields(fields),
143   _cp(cp),
144   _blocks(NULL),
145   _start(_blocks),
146   _last(_blocks) {}
147 
148 void FieldLayout::initialize_static_layout() {
149   _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
150   _blocks->set_offset(0);
151   _last = _blocks;
152   _start = _blocks;
153   // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() could be zero, because
154   // during bootstrapping, the size of the java.lang.Class is still not known when layout
155   // of static field is computed. Field offsets are fixed later when the size is known
156   // (see java_lang_Class::fixup_mirror())
157   if (InstanceMirrorKlass::offset_of_static_fields() > 0) {
158     insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
159     _blocks->set_offset(0);
160   }
161 }
162 
163 void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klass) {
164   if (super_klass == NULL) {
165     _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
166     _blocks->set_offset(0);
167     _last = _blocks;
168     _start = _blocks;
169     insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
170   } else {
171     bool has_fields = reconstruct_layout(super_klass);
172     fill_holes(super_klass);
173     if ((UseEmptySlotsInSupers && !super_klass->has_contended_annotations()) || !has_fields) {
174       _start = _blocks; // Setting _start to _blocks instead of _last would allow subclasses
175       // to allocate fields in empty slots of their super classes
176     } else {
177       _start = _last;    // append fields at the end of the reconstructed layout
178     }
179   }
180 }
181 
182 LayoutRawBlock* FieldLayout::first_field_block() {
183   LayoutRawBlock* block = _blocks;
184   while (block != NULL
185          && block->kind() != LayoutRawBlock::INHERITED
186          && block->kind() != LayoutRawBlock::REGULAR
187          && block->kind() != LayoutRawBlock::INLINED) {
188     block = block->next_block();
189   }
190   return block;
191 }
192 
193 // Insert a set of fields into a layout.
194 // For each field, search for an empty slot able to fit the field

195 // (satisfying both size and alignment requirements), if none is found,
196 // add the field at the end of the layout.
197 // Fields cannot be inserted before the block specified in the "start" argument
198 void FieldLayout::add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
199   if (list == NULL) return;
200   if (start == NULL) start = this->_start;
201   bool last_search_success = false;
202   int last_size = 0;
203   int last_alignment = 0;
204   for (int i = 0; i < list->length(); i ++) {
205     LayoutRawBlock* b = list->at(i);
206     LayoutRawBlock* cursor = NULL;
207     LayoutRawBlock* candidate = NULL;

208     // if start is the last block, just append the field
209     if (start == last_block()) {
210       candidate = last_block();
211     }
212     // Before iterating over the layout to find an empty slot fitting the field's requirements,
213     // check if the previous field had the same requirements and if the search for a fitting slot
214     // was successful. If the requirements were the same but the search failed, a new search will
215     // fail the same way, so just append the field at the of the layout.
216     else  if (b->size() == last_size && b->alignment() == last_alignment && !last_search_success) {
217       candidate = last_block();
218     } else {
219       // Iterate over the layout to find an empty slot fitting the field's requirements
220       last_size = b->size();
221       last_alignment = b->alignment();
222       cursor = last_block()->prev_block();
223       assert(cursor != NULL, "Sanity check");
224       last_search_success = true;
225 
226       while (cursor != start) {
227         if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(b->size(), b->alignment())) {
228           if (candidate == NULL || cursor->size() < candidate->size()) {
229             candidate = cursor;
230           }
231         }
232         cursor = cursor->prev_block();
233       }
234       if (candidate == NULL) {
235         candidate = last_block();
236         last_search_success = false;
237       }
238       assert(candidate != NULL, "Candidate must not be null");
239       assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
240       assert(candidate->fit(b->size(), b->alignment()), "Candidate must be able to store the block");
241     }

242     insert_field_block(candidate, b);
243   }
244 }
245 
246 // Used for classes with hard coded field offsets, insert a field at the specified offset */
247 void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutRawBlock* start) {
248   assert(block != NULL, "Sanity check");
249   block->set_offset(offset);
250   if (start == NULL) {
251     start = this->_start;
252   }
253   LayoutRawBlock* slot = start;
254   while (slot != NULL) {
255     if ((slot->offset() <= block->offset() && (slot->offset() + slot->size()) > block->offset()) ||
256         slot == _last){
257       assert(slot->kind() == LayoutRawBlock::EMPTY, "Matching slot must be an empty slot");
258       assert(slot->size() >= block->offset() + block->size() ,"Matching slot must be big enough");
259       if (slot->offset() < block->offset()) {
260         int adjustment = block->offset() - slot->offset();
261         LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
262         insert(slot, adj);
263       }
264       insert(slot, block);
265       if (slot->size() == 0) {
266         remove(slot);
267       }
268       FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
269       return;
270     }
271     slot = slot->next_block();
272   }
273   fatal("Should have found a matching slot above, corrupted layout or invalid offset");
274 }
275 
276 // The allocation logic uses a best fit strategy: the set of fields is allocated
277 // in the first empty slot big enough to contain the whole set ((including padding
278 // to fit alignment constraints).
279 void FieldLayout::add_contiguously(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
280   if (list == NULL) return;
281   if (start == NULL) {
282     start = _start;
283   }
284   // This code assumes that if the first block is well aligned, the following
285   // blocks would naturally be well aligned (no need for adjustment)
286   int size = 0;
287   for (int i = 0; i < list->length(); i++) {
288     size += list->at(i)->size();
289   }
290 
291   LayoutRawBlock* candidate = NULL;
292   if (start == last_block()) {
293     candidate = last_block();
294   } else {
295     LayoutRawBlock* first = list->at(0);
296     candidate = last_block()->prev_block();
297     while (candidate->kind() != LayoutRawBlock::EMPTY || !candidate->fit(size, first->alignment())) {
298       if (candidate == start) {
299         candidate = last_block();
300         break;
301       }
302       candidate = candidate->prev_block();
303     }
304     assert(candidate != NULL, "Candidate must not be null");
305     assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
306     assert(candidate->fit(size, first->alignment()), "Candidate must be able to store the whole contiguous block");
307   }
308 
309   for (int i = 0; i < list->length(); i++) {
310     LayoutRawBlock* b = list->at(i);
311     insert_field_block(candidate, b);
312     assert((candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
313   }
314 }
315 
316 LayoutRawBlock* FieldLayout::insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block) {
317   assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
318   if (slot->offset() % block->alignment() != 0) {
319     int adjustment = block->alignment() - (slot->offset() % block->alignment());
320     LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
321     insert(slot, adj);
322   }
323   insert(slot, block);
324   if (slot->size() == 0) {
325     remove(slot);
326   }
327   FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
328   return block;
329 }
330 
331 bool FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
332   bool has_instance_fields = false;
333   GrowableArray<LayoutRawBlock*>* all_fields = new GrowableArray<LayoutRawBlock*>(32);
334   while (ik != NULL) {
335     for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
336       BasicType type = Signature::basic_type(fs.signature());
337       // distinction between static and non-static fields is missing
338       if (fs.access_flags().is_static()) continue;
339       has_instance_fields = true;
340       LayoutRawBlock* block;
341       if (type == T_INLINE_TYPE) {
342         InlineKlass* vk = InlineKlass::cast(ik->get_inline_type_field_klass(fs.index()));
343         block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, vk->get_exact_size_in_bytes(),
344                                    vk->get_alignment(), false);
345 
346       } else {
347         int size = type2aelembytes(type);
348         // INHERITED blocks are marked as non-reference because oop_maps are handled by their holder class
349         block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, size, size, false);
350       }
351       block->set_offset(fs.offset());
352       all_fields->append(block);
353     }
354     ik = ik->super() == NULL ? NULL : InstanceKlass::cast(ik->super());
355   }

356   all_fields->sort(LayoutRawBlock::compare_offset);
357   _blocks = new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
358   _blocks->set_offset(0);
359   _last = _blocks;

360   for(int i = 0; i < all_fields->length(); i++) {
361     LayoutRawBlock* b = all_fields->at(i);
362     _last->set_next_block(b);
363     b->set_prev_block(_last);
364     _last = b;
365   }
366   _start = _blocks;
367   return has_instance_fields;
368 }
369 
370 // Called during the reconstruction of a layout, after fields from super
371 // classes have been inserted. It fills unused slots between inserted fields
372 // with EMPTY blocks, so the regular field insertion methods would work.
373 // This method handles classes with @Contended annotations differently
374 // by inserting PADDING blocks instead of EMPTY block to prevent subclasses'
375 // fields to interfere with contended fields/classes.
376 void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
377   assert(_blocks != NULL, "Sanity check");
378   assert(_blocks->offset() == 0, "first block must be at offset zero");
379   LayoutRawBlock::Kind filling_type = super_klass->has_contended_annotations() ? LayoutRawBlock::PADDING: LayoutRawBlock::EMPTY;
380   LayoutRawBlock* b = _blocks;
381   while (b->next_block() != NULL) {
382     if (b->next_block()->offset() > (b->offset() + b->size())) {
383       int size = b->next_block()->offset() - (b->offset() + b->size());
384       LayoutRawBlock* empty = new LayoutRawBlock(filling_type, size);
385       empty->set_offset(b->offset() + b->size());
386       empty->set_next_block(b->next_block());
387       b->next_block()->set_prev_block(empty);
388       b->set_next_block(empty);
389       empty->set_prev_block(b);
390     }
391     b = b->next_block();
392   }
393   assert(b->next_block() == NULL, "Invariant at this point");
394   assert(b->kind() != LayoutRawBlock::EMPTY, "Sanity check");

395   // If the super class has @Contended annotation, a padding block is
396   // inserted at the end to ensure that fields from the subclasses won't share
397   // the cache line of the last field of the contended class
398   if (super_klass->has_contended_annotations() && ContendedPaddingWidth > 0) {
399     LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
400     p->set_offset(b->offset() + b->size());
401     b->set_next_block(p);
402     p->set_prev_block(b);
403     b = p;
404   }

405   if (!UseEmptySlotsInSupers) {
406     // Add an empty slots to align fields of the subclass on a heapOopSize boundary
407     // in order to emulate the behavior of the previous algorithm
408     int align = (b->offset() + b->size()) % heapOopSize;
409     if (align != 0) {
410       int sz = heapOopSize - align;
411       LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::EMPTY, sz);
412       p->set_offset(b->offset() + b->size());
413       b->set_next_block(p);
414       p->set_prev_block(b);
415       b = p;
416     }
417   }

418   LayoutRawBlock* last = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
419   last->set_offset(b->offset() + b->size());
420   assert(last->offset() > 0, "Sanity check");
421   b->set_next_block(last);
422   last->set_prev_block(b);
423   _last = last;
424 }
425 
426 LayoutRawBlock* FieldLayout::insert(LayoutRawBlock* slot, LayoutRawBlock* block) {
427   assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
428   assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
429   block->set_offset(slot->offset());
430   slot->set_offset(slot->offset() + block->size());
431   assert((slot->size() - block->size()) < slot->size(), "underflow checking");
432   assert(slot->size() - block->size() >= 0, "no negative size allowed");
433   slot->set_size(slot->size() - block->size());
434   block->set_prev_block(slot->prev_block());
435   block->set_next_block(slot);
436   slot->set_prev_block(block);
437   if (block->prev_block() != NULL) {
438     block->prev_block()->set_next_block(block);
439   }
440   if (_blocks == slot) {
441     _blocks = block;
442   }
443   return block;
444 }
445 
446 void FieldLayout::remove(LayoutRawBlock* block) {
447   assert(block != NULL, "Sanity check");
448   assert(block != _last, "Sanity check");
449   if (_blocks == block) {
450     _blocks = block->next_block();
451     if (_blocks != NULL) {
452       _blocks->set_prev_block(NULL);
453     }
454   } else {
455     assert(block->prev_block() != NULL, "_prev should be set for non-head blocks");
456     block->prev_block()->set_next_block(block->next_block());
457     block->next_block()->set_prev_block(block->prev_block());
458   }
459   if (block == _start) {
460     _start = block->prev_block();
461   }
462 }
463 
464 void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlass* super) {
465   ResourceMark rm;
466   LayoutRawBlock* b = _blocks;
467   while(b != _last) {
468     switch(b->kind()) {
469     case LayoutRawBlock::REGULAR: {
470       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
471       output->print_cr(" @%d \"%s\" %s %d/%d %s",
472                        b->offset(),
473                        fi->name(_cp)->as_C_string(),
474                        fi->signature(_cp)->as_C_string(),
475                        b->size(),
476                        b->alignment(),
477                        "REGULAR");
478       break;
479     }
480     case LayoutRawBlock::INLINED: {
481       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
482       output->print_cr(" @%d \"%s\" %s %d/%d %s",
483                        b->offset(),
484                        fi->name(_cp)->as_C_string(),
485                        fi->signature(_cp)->as_C_string(),
486                        b->size(),
487                        b->alignment(),
488                        "INLINED");
489       break;
490     }
491     case LayoutRawBlock::RESERVED: {
492       output->print_cr(" @%d %d/- %s",
493                        b->offset(),
494                        b->size(),
495                        "RESERVED");
496       break;
497     }
498     case LayoutRawBlock::INHERITED: {
499       assert(!is_static, "Static fields are not inherited in layouts");
500       assert(super != NULL, "super klass must be provided to retrieve inherited fields info");
501       bool found = false;
502       const InstanceKlass* ik = super;
503       while (!found && ik != NULL) {
504         for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
505           if (fs.offset() == b->offset()) {
506             output->print_cr(" @%d \"%s\" %s %d/%d %s",
507                 b->offset(),
508                 fs.name()->as_C_string(),
509                 fs.signature()->as_C_string(),
510                 b->size(),
511                 b->size(), // so far, alignment constraint == size, will change with Valhalla
512                 "INHERITED");
513             found = true;
514             break;

515           }

516         }
517         ik = ik->java_super();
518       }
519       break;
520     }
521     case LayoutRawBlock::EMPTY:
522       output->print_cr(" @%d %d/1 %s",
523                        b->offset(),
524                        b->size(),
525                        "EMPTY");
526       break;
527     case LayoutRawBlock::PADDING:
528       output->print_cr(" @%d %d/1 %s",
529                        b->offset(),
530                        b->size(),
531                        "PADDING");
532       break;
533     }
534     b = b->next_block();
535   }
536 }
537 
538 FieldLayoutBuilder::FieldLayoutBuilder(const Symbol* classname, const InstanceKlass* super_klass, ConstantPool* constant_pool,
539                                        Array<u2>* fields, bool is_contended, bool is_inline_type,
540                                        FieldLayoutInfo* info, Array<InlineKlass*>* inline_type_field_klasses) :
541   _classname(classname),
542   _super_klass(super_klass),
543   _constant_pool(constant_pool),
544   _fields(fields),
545   _info(info),
546   _inline_type_field_klasses(inline_type_field_klasses),
547   _root_group(NULL),
548   _contended_groups(GrowableArray<FieldGroup*>(8)),
549   _static_fields(NULL),
550   _layout(NULL),
551   _static_layout(NULL),
552   _nonstatic_oopmap_count(0),
553   _alignment(-1),
554   _first_field_offset(-1),
555   _exact_size_in_bytes(-1),
556   _has_nonstatic_fields(false),
557   _has_inline_type_fields(false),
558   _is_contended(is_contended),
559   _is_inline_type(is_inline_type),
560   _has_flattening_information(is_inline_type),
561   _has_nonatomic_values(false),
562   _atomic_field_count(0)
563  {}
564 
565 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
566   assert(g > 0, "must only be called for named contended groups");
567   FieldGroup* fg = NULL;
568   for (int i = 0; i < _contended_groups.length(); i++) {
569     fg = _contended_groups.at(i);
570     if (fg->contended_group() == g) return fg;
571   }
572   fg = new FieldGroup(g);
573   _contended_groups.append(fg);
574   return fg;
575 }
576 
577 void FieldLayoutBuilder::prologue() {
578   _layout = new FieldLayout(_fields, _constant_pool);
579   const InstanceKlass* super_klass = _super_klass;
580   _layout->initialize_instance_layout(super_klass);
581   if (super_klass != NULL) {
582     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
583   }
584   _static_layout = new FieldLayout(_fields, _constant_pool);
585   _static_layout->initialize_static_layout();
586   _static_fields = new FieldGroup();
587   _root_group = new FieldGroup();
588 }
589 
590 // Field sorting for regular (non-inline) classes:
591 //   - fields are sorted in static and non-static fields
592 //   - non-static fields are also sorted according to their contention group
593 //     (support of the @Contended annotation)
594 //   - @Contended annotation is ignored for static fields
595 //   - field flattening decisions are taken in this method
596 void FieldLayoutBuilder::regular_field_sorting() {
597   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
598     FieldGroup* group = NULL;
599     if (fs.access_flags().is_static()) {
600       group = _static_fields;
601     } else {
602       _has_nonstatic_fields = true;
603       _atomic_field_count++;  // we might decrement this
604       if (fs.is_contended()) {
605         int g = fs.contended_group();
606         if (g == 0) {
607           group = new FieldGroup(true);
608           _contended_groups.append(group);
609         } else {
610           group = get_or_create_contended_group(g);
611         }
612       } else {
613         group = _root_group;
614       }
615     }
616     assert(group != NULL, "invariant");
617     BasicType type = Signature::basic_type(fs.signature());
618     switch(type) {
619     case T_BYTE:
620     case T_CHAR:
621     case T_DOUBLE:
622     case T_FLOAT:
623     case T_INT:
624     case T_LONG:
625     case T_SHORT:
626     case T_BOOLEAN:
627       group->add_primitive_field(fs, type);
628       break;
629     case T_OBJECT:
630     case T_ARRAY:
631       if (group != _static_fields) _nonstatic_oopmap_count++;
632       group->add_oop_field(fs);
633       break;
634     case T_INLINE_TYPE:
635       _has_inline_type_fields = true;
636       if (group == _static_fields) {
637         // static fields are never inlined
638         group->add_oop_field(fs);
639       } else {
640         _has_flattening_information = true;
641         // Flattening decision to be taken here
642         // This code assumes all verification already have been performed
643         // (field's type has been loaded and it is an inline klass)
644         JavaThread* THREAD = JavaThread::current();
645         Klass* klass =  _inline_type_field_klasses->at(fs.index());
646         assert(klass != NULL, "Sanity check");
647         InlineKlass* vk = InlineKlass::cast(klass);
648         bool too_big_to_flatten = (InlineFieldMaxFlatSize >= 0 &&
649                                    (vk->size_helper() * HeapWordSize) > InlineFieldMaxFlatSize);
650         bool too_atomic_to_flatten = vk->is_declared_atomic() || AlwaysAtomicAccesses;
651         bool too_volatile_to_flatten = fs.access_flags().is_volatile();
652         if (vk->is_naturally_atomic()) {
653           too_atomic_to_flatten = false;
654           //too_volatile_to_flatten = false; //FIXME
655           // volatile fields are currently never inlined, this could change in the future
656         }
657         if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten) || fs.access_flags().is_final()) {
658           group->add_inlined_field(fs, vk);
659           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
660           fs.set_inlined(true);
661           if (!vk->is_atomic()) {  // flat and non-atomic: take note
662             _has_nonatomic_values = true;
663             _atomic_field_count--;  // every other field is atomic but this one
664           }
665         } else {
666           _nonstatic_oopmap_count++;
667           group->add_oop_field(fs);
668         }
669       }
670       break;
671     default:
672       fatal("Something wrong?");
673     }
674   }
675   _root_group->sort_by_size();
676   _static_fields->sort_by_size();
677   if (!_contended_groups.is_empty()) {
678     for (int i = 0; i < _contended_groups.length(); i++) {
679       _contended_groups.at(i)->sort_by_size();
680     }
681   }
682 }
683 
684 /* Field sorting for inline classes:
685  *   - because inline classes are immutable, the @Contended annotation is ignored
686  *     when computing their layout (with only read operation, there's no false
687  *     sharing issue)
688  *   - this method also records the alignment of the field with the most
689  *     constraining alignment, this value is then used as the alignment
690  *     constraint when flattening this inline type into another container
691  *   - field flattening decisions are taken in this method (those decisions are
692  *     currently only based in the size of the fields to be inlined, the size
693  *     of the resulting instance is not considered)
694  */
695 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
696   assert(_is_inline_type, "Should only be used for inline classes");
697   int alignment = 1;
698   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
699     FieldGroup* group = NULL;
700     int field_alignment = 1;
701     if (fs.access_flags().is_static()) {
702       group = _static_fields;
703     } else {
704       _has_nonstatic_fields = true;
705       _atomic_field_count++;  // we might decrement this
706       group = _root_group;
707     }
708     assert(group != NULL, "invariant");
709     BasicType type = Signature::basic_type(fs.signature());
710     switch(type) {
711     case T_BYTE:
712     case T_CHAR:
713     case T_DOUBLE:
714     case T_FLOAT:
715     case T_INT:
716     case T_LONG:
717     case T_SHORT:
718     case T_BOOLEAN:
719       if (group != _static_fields) {
720         field_alignment = type2aelembytes(type); // alignment == size for primitive types
721       }
722       group->add_primitive_field(fs, type);
723       break;
724     case T_OBJECT:
725     case T_ARRAY:
726       if (group != _static_fields) {
727         _nonstatic_oopmap_count++;
728         field_alignment = type2aelembytes(type); // alignment == size for oops
729       }
730       group->add_oop_field(fs);
731       break;
732     case T_INLINE_TYPE: {
733 //      fs.set_inline(true);
734       _has_inline_type_fields = true;
735       if (group == _static_fields) {
736         // static fields are never inlined
737         group->add_oop_field(fs);
738       } else {
739         // Flattening decision to be taken here
740         // This code assumes all verifications have already been performed
741         // (field's type has been loaded and it is an inline klass)
742         JavaThread* THREAD = JavaThread::current();
743         Klass* klass =  _inline_type_field_klasses->at(fs.index());
744         assert(klass != NULL, "Sanity check");
745         InlineKlass* vk = InlineKlass::cast(klass);
746         bool too_big_to_flatten = (InlineFieldMaxFlatSize >= 0 &&
747                                    (vk->size_helper() * HeapWordSize) > InlineFieldMaxFlatSize);
748         bool too_atomic_to_flatten = vk->is_declared_atomic() || AlwaysAtomicAccesses;
749         bool too_volatile_to_flatten = fs.access_flags().is_volatile();
750         if (vk->is_naturally_atomic()) {
751           too_atomic_to_flatten = false;
752           //too_volatile_to_flatten = false; //FIXME
753           // volatile fields are currently never inlined, this could change in the future
754         }
755         if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten) || fs.access_flags().is_final()) {
756           group->add_inlined_field(fs, vk);
757           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
758           field_alignment = vk->get_alignment();
759           fs.set_inlined(true);
760           if (!vk->is_atomic()) {  // flat and non-atomic: take note
761             _has_nonatomic_values = true;
762             _atomic_field_count--;  // every other field is atomic but this one
763           }
764         } else {
765           _nonstatic_oopmap_count++;
766           field_alignment = type2aelembytes(T_OBJECT);
767           group->add_oop_field(fs);
768         }
769       }
770       break;
771     }
772     default:
773       fatal("Unexpected BasicType");
774     }
775     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
776   }
777   _alignment = alignment;
778   if (!_has_nonstatic_fields) {
779     // There are a number of fixes required throughout the type system and JIT
780     Exceptions::fthrow(THREAD_AND_LOCATION,
781                        vmSymbols::java_lang_ClassFormatError(),
782                        "Value Types do not support zero instance size yet");
783     return;
784   }
785 }
786 
787 void FieldLayoutBuilder::insert_contended_padding(LayoutRawBlock* slot) {
788   if (ContendedPaddingWidth > 0) {
789     LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
790     _layout->insert(slot, padding);
791   }
792 }
793 
794 /* Computation of regular classes layout is an evolution of the previous default layout
795  * (FieldAllocationStyle 1):
796  *   - primitive fields (both primitive types and flattened inline types) are allocated
797  *     first, from the biggest to the smallest
798  *   - then oop fields are allocated (to increase chances to have contiguous oops and
799  *     a simpler oopmap).
800  */
801 void FieldLayoutBuilder::compute_regular_layout() {
802   bool need_tail_padding = false;
803   prologue();
804   regular_field_sorting();

805   if (_is_contended) {
806     _layout->set_start(_layout->last_block());
807     // insertion is currently easy because the current strategy doesn't try to fill holes
808     // in super classes layouts => the _start block is by consequence the _last_block
809     insert_contended_padding(_layout->start());
810     need_tail_padding = true;
811   }
812   _layout->add(_root_group->big_primitive_fields());
813   _layout->add(_root_group->small_primitive_fields());
814   _layout->add(_root_group->oop_fields());
815 
816   if (!_contended_groups.is_empty()) {
817     for (int i = 0; i < _contended_groups.length(); i++) {
818       FieldGroup* cg = _contended_groups.at(i);
819       LayoutRawBlock* start = _layout->last_block();
820       insert_contended_padding(start);
821       _layout->add(cg->big_primitive_fields());
822       _layout->add(cg->small_primitive_fields(), start);
823       _layout->add(cg->oop_fields(), start);
824       need_tail_padding = true;
825     }
826   }
827 
828   if (need_tail_padding) {
829     insert_contended_padding(_layout->last_block());
830   }
831   // Warning: IntanceMirrorKlass expects static oops to be allocated first
832   _static_layout->add_contiguously(_static_fields->oop_fields());
833   _static_layout->add(_static_fields->big_primitive_fields());
834   _static_layout->add(_static_fields->small_primitive_fields());
835 
836   epilogue();
837 }
838 
839 /* Computation of inline classes has a slightly different strategy than for
840  * regular classes. Regular classes have their oop fields allocated at the end
841  * of the layout to increase GC performances. Unfortunately, this strategy
842  * increases the number of empty slots inside an instance. Because the purpose
843  * of inline classes is to be embedded into other containers, it is critical
844  * to keep their size as small as possible. For this reason, the allocation
845  * strategy is:
846  *   - big primitive fields (primitive types and flattened inline type smaller
847  *     than an oop) are allocated first (from the biggest to the smallest)
848  *   - then oop fields
849  *   - then small primitive fields (from the biggest to the smallest)
850  */
851 void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) {
852   prologue();
853   inline_class_field_sorting(CHECK);
854   // Inline types are not polymorphic, so they cannot inherit fields.
855   // By consequence, at this stage, the layout must be composed of a RESERVED
856   // block, followed by an EMPTY block.
857   assert(_layout->start()->kind() == LayoutRawBlock::RESERVED, "Unexpected");
858   assert(_layout->start()->next_block()->kind() == LayoutRawBlock::EMPTY, "Unexpected");
859   LayoutRawBlock* first_empty = _layout->start()->next_block();
860   if (first_empty->offset() % _alignment != 0) {
861     LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, _alignment - (first_empty->offset() % _alignment));
862     _layout->insert(first_empty, padding);
863     _layout->set_start(padding->next_block());
864   }
865 
866   _layout->add(_root_group->big_primitive_fields());
867   _layout->add(_root_group->oop_fields());
868   _layout->add(_root_group->small_primitive_fields());
869 
870   LayoutRawBlock* first_field = _layout->first_field_block();
871    if (first_field != NULL) {
872      _first_field_offset = _layout->first_field_block()->offset();
873      _exact_size_in_bytes = _layout->last_block()->offset() - _layout->first_field_block()->offset();
874    } else {
875      // special case for empty value types
876      _first_field_offset = _layout->blocks()->size();
877      _exact_size_in_bytes = 0;
878    }
879   _exact_size_in_bytes = _layout->last_block()->offset() - _layout->first_field_block()->offset();
880 
881   // Warning:: InstanceMirrorKlass expects static oops to be allocated first
882   _static_layout->add_contiguously(_static_fields->oop_fields());
883   _static_layout->add(_static_fields->big_primitive_fields());
884   _static_layout->add(_static_fields->small_primitive_fields());
885 
886   epilogue();
887 }
888 
889 void FieldLayoutBuilder::add_inlined_field_oopmap(OopMapBlocksBuilder* nonstatic_oop_maps,
890                 InlineKlass* vklass, int offset) {
891   int diff = offset - vklass->first_field_offset();
892   const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
893   const OopMapBlock* last_map = map + vklass->nonstatic_oop_map_count();
894   while (map < last_map) {
895     nonstatic_oop_maps->add(map->offset() + diff, map->count());
896     map++;
897   }
898 }
899 
900 void FieldLayoutBuilder::register_embedded_oops_from_list(OopMapBlocksBuilder* nonstatic_oop_maps, GrowableArray<LayoutRawBlock*>* list) {
901   if (list != NULL) {
902     for (int i = 0; i < list->length(); i++) {
903       LayoutRawBlock* f = list->at(i);
904       if (f->kind() == LayoutRawBlock::INLINED) {
905         InlineKlass* vk = f->inline_klass();
906         assert(vk != NULL, "Should have been initialized");
907         if (vk->contains_oops()) {
908           add_inlined_field_oopmap(nonstatic_oop_maps, vk, f->offset());
909         }
910       }
911     }
912   }
913 }
914 
915 void FieldLayoutBuilder::register_embedded_oops(OopMapBlocksBuilder* nonstatic_oop_maps, FieldGroup* group) {
916   if (group->oop_fields() != NULL) {
917     for (int i = 0; i < group->oop_fields()->length(); i++) {
918       LayoutRawBlock* b = group->oop_fields()->at(i);
919       nonstatic_oop_maps->add(b->offset(), 1);
920     }
921   }
922   register_embedded_oops_from_list(nonstatic_oop_maps, group->big_primitive_fields());
923   register_embedded_oops_from_list(nonstatic_oop_maps, group->small_primitive_fields());
924 }
925 
926 void FieldLayoutBuilder::epilogue() {
927   // Computing oopmaps
928   int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
929   int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;

930   OopMapBlocksBuilder* nonstatic_oop_maps =
931       new OopMapBlocksBuilder(max_oop_map_count);
932   if (super_oop_map_count > 0) {
933     nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
934     _super_klass->nonstatic_oop_map_count());
935   }
936   register_embedded_oops(nonstatic_oop_maps, _root_group);







937   if (!_contended_groups.is_empty()) {
938     for (int i = 0; i < _contended_groups.length(); i++) {
939       FieldGroup* cg = _contended_groups.at(i);
940       if (cg->oop_count() > 0) {
941         assert(cg->oop_fields() != NULL && cg->oop_fields()->at(0) != NULL, "oop_count > 0 but no oop fields found");
942         register_embedded_oops(nonstatic_oop_maps, cg);
943       }
944     }
945   }

946   nonstatic_oop_maps->compact();
947 
948   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
949   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
950   int static_fields_size = (static_fields_end -
951       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
952   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
953 
954   // Pass back information needed for InstanceKlass creation
955 
956   _info->oop_map_blocks = nonstatic_oop_maps;
957   _info->_instance_size = align_object_size(instance_end / wordSize);
958   _info->_static_field_size = static_fields_size;
959   _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
960   _info->_has_nonstatic_fields = _has_nonstatic_fields;
961   _info->_has_inline_fields = _has_inline_type_fields;
962 
963   // An inline type is naturally atomic if it has just one field, and
964   // that field is simple enough.
965   _info->_is_naturally_atomic = (_is_inline_type &&
966                                  (_atomic_field_count <= 1) &&
967                                  !_has_nonatomic_values &&
968                                  _contended_groups.is_empty());
969   // This may be too restrictive, since if all the fields fit in 64
970   // bits we could make the decision to align instances of this class
971   // to 64-bit boundaries, and load and store them as single words.
972   // And on machines which supported larger atomics we could similarly
973   // allow larger values to be atomic, if properly aligned.
974 
975 
976   if (PrintFieldLayout || (PrintInlineLayout && _has_flattening_information)) {
977     ResourceMark rm;
978     tty->print_cr("Layout of class %s", _classname->as_C_string());
979     tty->print_cr("Instance fields:");
980     _layout->print(tty, false, _super_klass);
981     tty->print_cr("Static fields:");
982     _static_layout->print(tty, true, NULL);
983     tty->print_cr("Instance size = %d bytes", _info->_instance_size * wordSize);
984     if (_is_inline_type) {
985       tty->print_cr("First field offset = %d", _first_field_offset);
986       tty->print_cr("Alignment = %d bytes", _alignment);
987       tty->print_cr("Exact size = %d bytes", _exact_size_in_bytes);
988     }
989     tty->print_cr("---");
990   }
991 }
992 
993 void FieldLayoutBuilder::build_layout(TRAPS) {
994   if (_is_inline_type) {
995     compute_inline_class_layout(CHECK);
996   } else {
997     compute_regular_layout();
998   }
999 }
--- EOF ---