< prev index next >

src/hotspot/share/compiler/oopMap.cpp

Print this page




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "compiler/oopMap.hpp"


  31 #include "gc/shared/collectedHeap.hpp"


  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/iterator.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/compressedOops.hpp"

  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/signature.hpp"
  40 #include "utilities/align.hpp"
  41 #include "utilities/lockFreeStack.hpp"
  42 #ifdef COMPILER1
  43 #include "c1/c1_Defs.hpp"
  44 #endif
  45 #ifdef COMPILER2
  46 #include "opto/optoreg.hpp"
  47 #endif
  48 
  49 // OopMapStream
  50 
  51 OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) {
  52   _stream = new CompressedReadStream(oop_map->write_stream()->buffer());

  53   _mask = oop_types_mask;
  54   _size = oop_map->omv_count();
  55   _position = 0;
  56   _valid_omv = false;
  57 }
  58 
  59 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) {
  60   _stream = new CompressedReadStream(oop_map->data_addr());

  61   _mask = oop_types_mask;
  62   _size = oop_map->count();
  63   _position = 0;
  64   _valid_omv = false;
  65 }
  66 
  67 void OopMapStream::find_next() {
  68   while(_position++ < _size) {
  69     _omv.read_from(_stream);
  70     if(((int)_omv.type() & _mask) > 0) {
  71       _valid_omv = true;
  72       return;
  73     }
  74   }
  75   _valid_omv = false;
  76 }
  77 
  78 
  79 // OopMap
  80 
  81 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
  82 // slots to hold 4-byte values like ints and floats in the LP64 build.
  83 OopMap::OopMap(int frame_size, int arg_count) {
  84   // OopMaps are usually quite so small, so pick a small initial size
  85   set_write_stream(new CompressedWriteStream(32));
  86   set_omv_count(0);


  87 
  88 #ifdef ASSERT
  89   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
  90   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
  91   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
  92 #endif
  93 }
  94 
  95 
  96 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
  97   // This constructor does a deep copy
  98   // of the source OopMap.
  99   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
 100   set_omv_count(0);
 101   set_offset(source->offset());


 102 
 103 #ifdef ASSERT
 104   _locs_length = source->_locs_length;
 105   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 106   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 107 #endif
 108 
 109   // We need to copy the entries too.
 110   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
 111     OopMapValue omv = oms.current();
 112     omv.write_on(write_stream());
 113     increment_count();
 114   }
 115 }
 116 
 117 
 118 OopMap* OopMap::deep_copy() {
 119   return new OopMap(_deep_copy_token, this);
 120 }
 121 
 122 void OopMap::copy_data_to(address addr) const {
 123   memcpy(addr, write_stream()->buffer(), write_stream()->position());
 124 }
 125 




























































































































































 126 int OopMap::heap_size() const {
 127   int size = sizeof(OopMap);
 128   int align = sizeof(void *) - 1;
 129   size += write_stream()->position();
 130   // Align to a reasonable ending point
 131   size = ((size+align) & ~align);
 132   return size;
 133 }
 134 
 135 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 136 // slots to hold 4-byte values like ints and floats in the LP64 build.
 137 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
 138 
 139   assert(reg->value() < _locs_length, "too big reg value for stack size");
 140   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
 141   debug_only( _locs_used[reg->value()] = x; )
 142 
 143   OopMapValue o(reg, x);
 144 
 145   if(x == OopMapValue::callee_saved_value) {
 146     // This can never be a stack location, so we don't need to transform it.
 147     assert(optional->is_reg(), "Trying to callee save a stack location");
 148     o.set_content_reg(optional);
 149   } else if(x == OopMapValue::derived_oop_value) {
 150     o.set_content_reg(optional);
 151   }
 152 
 153   o.write_on(write_stream());
 154   increment_count();


 155 }
 156 
 157 
 158 void OopMap::set_oop(VMReg reg) {
 159   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
 160 }
 161 
 162 
 163 void OopMap::set_value(VMReg reg) {
 164   // At this time, we don't need value entries in our OopMap.

 165 }
 166 
 167 
 168 void OopMap::set_narrowoop(VMReg reg) {
 169   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
 170 }
 171 
 172 
 173 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
 174   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
 175 }
 176 
 177 
 178 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
 179   if( reg == derived_from_local_register ) {
 180     // Actually an oop, derived shares storage with base,
 181     set_oop(reg);
 182   } else {
 183     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
 184   }
 185 }
 186 
 187 // OopMapSet
 188 
 189 OopMapSet::OopMapSet() {
 190   set_om_size(MinOopMapAllocation);
 191   set_om_count(0);
 192   OopMap** temp = NEW_RESOURCE_ARRAY(OopMap*, om_size());
 193   set_om_data(temp);
 194 }
 195 
 196 
 197 void OopMapSet::grow_om_data() {
 198   int new_size = om_size() * 2;
 199   OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size);
 200   memcpy(new_data,om_data(),om_size() * sizeof(OopMap*));
 201   set_om_size(new_size);
 202   set_om_data(new_data);
 203 }
 204 
 205 void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
 206   assert(om_size() != -1,"Cannot grow a fixed OopMapSet");
 207 
 208   if(om_count() >= om_size()) {
 209     grow_om_data();
 210   }
 211   map->set_offset(pc_offset);
 212 
 213 #ifdef ASSERT
 214   if(om_count() > 0) {
 215     OopMap* last = at(om_count()-1);
 216     if (last->offset() == map->offset() ) {
 217       fatal("OopMap inserted twice");
 218     }
 219     if(last->offset() > map->offset()) {
 220       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
 221                       om_count(),last->offset(),om_count()+1,map->offset());
 222     }
 223   }
 224 #endif // ASSERT
 225 
 226   set(om_count(),map);


 227   increment_count();

 228 }
 229 
 230 
 231 int OopMapSet::heap_size() const {
 232   // The space we use
 233   int size = sizeof(OopMap);
 234   int align = sizeof(void *) - 1;
 235   size = ((size+align) & ~align);
 236   size += om_count() * sizeof(OopMap*);
 237 
 238   // Now add in the space needed for the indivdiual OopMaps
 239   for(int i=0; i < om_count(); i++) {
 240     size += at(i)->heap_size();
 241   }
 242   // We don't need to align this, it will be naturally pointer aligned
 243   return size;
 244 }
 245 
 246 
 247 OopMap* OopMapSet::singular_oop_map() {


 251 
 252 
 253 OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
 254   int i, len = om_count();
 255   assert( len > 0, "must have pointer maps" );
 256 
 257   // Scan through oopmaps. Stop when current offset is either equal or greater
 258   // than the one we are looking for.
 259   for( i = 0; i < len; i++) {
 260     if( at(i)->offset() >= pc_offset )
 261       break;
 262   }
 263 
 264   assert( i < len, "oopmap not found" );
 265 
 266   OopMap* m = at(i);
 267   assert( m->offset() == pc_offset, "oopmap not found" );
 268   return m;
 269 }
 270 
 271 static void add_derived_oop(oop* base, oop* derived) {



 272 #if !defined(TIERED) && !INCLUDE_JVMCI
 273   COMPILER1_PRESENT(ShouldNotReachHere();)
 274 #endif // !defined(TIERED) && !INCLUDE_JVMCI
 275 #if COMPILER2_OR_JVMCI
 276   DerivedPointerTable::add(derived, base);
 277 #endif // COMPILER2_OR_JVMCI
 278 }
 279 
 280 
 281 #ifndef PRODUCT
 282 static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
 283   // Print oopmap and regmap
 284   tty->print_cr("------ ");
 285   CodeBlob* cb = fr->cb();
 286   const ImmutableOopMapSet* maps = cb->oop_maps();
 287   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 288   map->print();
 289   if( cb->is_nmethod() ) {
 290     nmethod* nm = (nmethod*)cb;
 291     // native wrappers have no scope data, it is implied
 292     if (nm->is_native_method()) {
 293       tty->print("bci: 0 (native)");
 294     } else {
 295       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
 296       tty->print("bci: %d ",scope->bci());
 297     }
 298   }
 299   tty->cr();
 300   fr->print_on(tty);
 301   tty->print("     ");
 302   cb->print_value_on(tty);  tty->cr();
 303   reg_map->print();
 304   tty->print_cr("------ ");
 305 





























 306 }
 307 #endif // PRODUCT
 308 
 309 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
 310   // add derived oops to a table
 311   all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl);








 312 }
 313 








 314 
 315 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
 316                        OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
 317                        OopClosure* value_fn) {
 318   CodeBlob* cb = fr->cb();
 319   assert(cb != NULL, "no codeblob");
 320 
 321   NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)





 322 
 323   const ImmutableOopMapSet* maps = cb->oop_maps();
 324   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 325   assert(map != NULL, "no ptr map found");



 326 
 327   // handle derived pointers first (otherwise base pointer may be
 328   // changed before derived pointer offset has been collected)
 329   OopMapValue omv;
 330   {
 331     OopMapStream oms(map,OopMapValue::derived_oop_value);
 332     if (!oms.is_done()) {
 333 #ifndef TIERED
 334       COMPILER1_PRESENT(ShouldNotReachHere();)
 335 #if INCLUDE_JVMCI
 336       if (UseJVMCICompiler) {
 337         ShouldNotReachHere();



 338       }
 339 #endif
 340 #endif // !TIERED
 341       do {
 342         omv = oms.current();
 343         oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
 344         guarantee(loc != NULL, "missing saved register");
 345         oop *derived_loc = loc;
 346         oop *base_loc    = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
 347         // Ignore NULL oops and decoded NULL narrow oops which
 348         // equal to CompressedOops::base() when a narrow oop
 349         // implicit null check is used in compiled code.
 350         // The narrow_oop_base could be NULL or be the address
 351         // of the page below heap depending on compressed oops mode.
 352         if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
 353           derived_oop_fn(base_loc, derived_loc);
 354         }
 355         oms.next();
 356       }  while (!oms.is_done());
 357     }
 358   }

 359 
 360   // We want coop and oop oop_types
 361   int mask = OopMapValue::oop_value | OopMapValue::narrowoop_value;
 362   {
 363     for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {










 364       omv = oms.current();
 365       oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
 366       // It should be an error if no location can be found for a
 367       // register mentioned as contained an oop of some kind.  Maybe
 368       // this was allowed previously because value_value items might
 369       // be missing?
 370       guarantee(loc != NULL, "missing saved register");
 371       if ( omv.type() == OopMapValue::oop_value ) {
 372         oop val = *loc;
 373         if (val == NULL || CompressedOops::is_base(val)) {
 374           // Ignore NULL oops and decoded NULL narrow oops which
 375           // equal to CompressedOops::base() when a narrow oop
 376           // implicit null check is used in compiled code.
 377           // The narrow_oop_base could be NULL or be the address
 378           // of the page below heap depending on compressed oops mode.
 379           continue;
 380         }
 381 #ifdef ASSERT
 382         if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
 383             !Universe::heap()->is_in_or_null(*loc)) {
 384           tty->print_cr("# Found non oop pointer.  Dumping state at failure");
 385           // try to dump out some helpful debugging information
 386           trace_codeblob_maps(fr, reg_map);
 387           omv.print();
 388           tty->print_cr("register r");
 389           omv.reg()->print();
 390           tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
 391           // do the real assert.
 392           assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
 393         }
 394 #endif // ASSERT
 395         oop_fn->do_oop(loc);
 396       } else if ( omv.type() == OopMapValue::narrowoop_value ) {
 397         narrowOop *nl = (narrowOop*)loc;
 398 #ifndef VM_LITTLE_ENDIAN
 399         VMReg vmReg = omv.reg();
 400         // Don't do this on SPARC float registers as they can be individually addressed
 401         if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) {
 402           // compressed oops in registers only take up 4 bytes of an
 403           // 8 byte register but they are in the wrong part of the
 404           // word so adjust loc to point at the right place.
 405           nl = (narrowOop*)((address)nl + 4);
 406         }
 407 #endif
 408         oop_fn->do_oop(nl);
 409       }
 410     }
 411   }
 412 }
 413 







 414 
 415 // Update callee-saved register info for the following frame
 416 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
 417   ResourceMark rm;










 418   CodeBlob* cb = fr->cb();
 419   assert(cb != NULL, "no codeblob");
 420 
 421   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
 422   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
 423          "already updated this map; do not 'update' it twice!" );
 424   debug_only(reg_map->_update_for_id = fr->id());
 425 

 426   // Check if caller must update oop argument
 427   assert((reg_map->include_argument_oops() ||
 428           !cb->caller_must_gc_arguments(reg_map->thread())),
 429          "include_argument_oops should already be set");
 430 
 431   // Scan through oopmap and find location of all callee-saved registers
 432   // (we do not do update in place, since info could be overwritten)
 433 
 434   address pc = fr->pc();
 435   const ImmutableOopMap* map  = cb->oop_map_for_return_address(pc);
 436   assert(map != NULL, "no ptr map found");
 437   DEBUG_ONLY(int nof_callee = 0;)





 438 
 439   for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {

 440     OopMapValue omv = oms.current();
 441     VMReg reg = omv.content_reg();
 442     oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
 443     reg_map->set_location(reg, (address) loc);
 444     DEBUG_ONLY(nof_callee++;)
 445   }

 446 
 447   // Check that runtime stubs save all callee-saved registers
 448 #ifdef COMPILER2
 449   assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
 450          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
 451          "must save all");
 452 #endif // COMPILER2
 453 }
 454 
















 455 //=============================================================================
 456 // Non-Product code
 457 
 458 #ifndef PRODUCT
 459 
 460 bool ImmutableOopMap::has_derived_pointer() const {
 461 #if !defined(TIERED) && !INCLUDE_JVMCI
 462   COMPILER1_PRESENT(return false);
 463 #endif // !TIERED
 464 #if COMPILER2_OR_JVMCI
 465   OopMapStream oms(this,OopMapValue::derived_oop_value);
 466   return oms.is_done();
 467 #else
 468   return false;
 469 #endif // COMPILER2_OR_JVMCI
 470 }
 471 































 472 #endif //PRODUCT
 473 
 474 // Printing code is present in product build for -XX:+PrintAssembly.
 475 
 476 static
 477 void print_register_type(OopMapValue::oop_types x, VMReg optional,
 478                          outputStream* st) {
 479   switch( x ) {
 480   case OopMapValue::oop_value:
 481     st->print("Oop");
 482     break;
 483   case OopMapValue::narrowoop_value:
 484     st->print("NarrowOop");
 485     break;
 486   case OopMapValue::callee_saved_value:
 487     st->print("Callers_");
 488     optional->print_on(st);
 489     break;
 490   case OopMapValue::derived_oop_value:
 491     st->print("Derived_oop_");


 564     st->cr();
 565   }
 566   st->cr();
 567 }
 568 
 569 void OopMapSet::print() const { print_on(tty); }
 570 
 571 bool OopMap::equals(const OopMap* other) const {
 572   if (other->_omv_count != _omv_count) {
 573     return false;
 574   }
 575   if (other->write_stream()->position() != write_stream()->position()) {
 576     return false;
 577   }
 578   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
 579     return false;
 580   }
 581   return true;
 582 }
 583 















 584 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
 585   ImmutableOopMapPair* pairs = get_pairs();
 586   ImmutableOopMapPair* last  = NULL;
 587 
 588   for (int i = 0; i < _count; ++i) {
 589     if (pairs[i].pc_offset() >= pc_offset) {
 590       last = &pairs[i];
 591       break;
 592     }
 593   }
 594 
 595   // Heal Coverity issue: potential index out of bounds access.
 596   guarantee(last != NULL, "last may not be null");
 597   assert(last->pc_offset() == pc_offset, "oopmap not found");
 598   return last->get_from(this);
 599 }
 600 
 601 const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const {
 602   return set->oopmap_at_offset(_oopmap_offset);
 603 }
 604 
 605 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) {
 606   address addr = data_addr();
 607   oopmap->copy_data_to(addr);

 608 }
 609 
 610 #ifdef ASSERT
 611 int ImmutableOopMap::nr_of_bytes() const {
 612   OopMapStream oms(this);
 613 
 614   while (!oms.is_done()) {
 615     oms.next();
 616   }
 617   return sizeof(ImmutableOopMap) + oms.stream_position();
 618 }
 619 #endif
 620 
 621 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
 622   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
 623 }
 624 
 625 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
 626   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
 627 }


 680 
 681   new (addr) ImmutableOopMap(map);
 682   return size_for(map);
 683 }
 684 
 685 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
 686   ImmutableOopMapPair* pairs = set->get_pairs();
 687 
 688   for (int i = 0; i < set->count(); ++i) {
 689     const OopMap* map = _mapping[i]._map;
 690     ImmutableOopMapPair* pair = NULL;
 691     int size = 0;
 692 
 693     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
 694       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
 695     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
 696       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
 697     }
 698 
 699     const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
 700     assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
 701   }
 702 }
 703 
 704 #ifdef ASSERT
 705 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
 706   for (int i = 0; i < 8; ++i) {
 707     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
 708   }
 709 
 710   for (int i = 0; i < set->count(); ++i) {
 711     const ImmutableOopMapPair* pair = set->pair_at(i);
 712     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
 713     const ImmutableOopMap* map = pair->get_from(set);
 714     int nr_of_bytes = map->nr_of_bytes();
 715     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
 716   }
 717 }
 718 #endif
 719 
 720 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {


 778   // empty.  If not, then we have probably forgotton to call
 779   // update_pointers after last GC/Scavenge.
 780   assert (!_active, "should not be active");
 781   assert(is_empty(), "table not empty");
 782   if (Entry::_list == NULL) {
 783     void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
 784     Entry::_list = ::new (mem) Entry::List();
 785   }
 786   _active = true;
 787 }
 788 
 789 // Returns value of location as an int
 790 inline intptr_t value_of_loc(oop *pointer) {
 791   return cast_from_oop<intptr_t>((*pointer));
 792 }
 793 
 794 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
 795   assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
 796   assert(derived_loc != base_loc, "Base and derived in same location");
 797   if (_active) {
 798     assert(*derived_loc != (void*)base_loc, "location already added");
 799     assert(Entry::_list != NULL, "list must exist");
 800     intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
 801     // This assert is invalid because derived pointers can be
 802     // arbitrarily far away from their base.
 803     // assert(offset >= -1000000, "wrong derived pointer info");
 804 
 805     if (TraceDerivedPointers) {
 806       tty->print_cr(
 807         "Add derived pointer@" INTPTR_FORMAT
 808         " - Derived: " INTPTR_FORMAT
 809         " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
 810         p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset
 811       );
 812     }
 813     // Set derived oop location to point to base.
 814     *derived_loc = (oop)base_loc;
 815     Entry* entry = new Entry(derived_loc, offset);
 816     Entry::_list->push(*entry);
 817   }
 818 }




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "compiler/oopMap.inline.hpp"
  32 #include "compiler/oopMapStubGenerator.hpp"
  33 #include "gc/shared/collectedHeap.hpp"
  34 #include "logging/log.hpp"
  35 #include "logging/logStream.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/resourceArea.hpp"

  39 #include "oops/compressedOops.hpp"
  40 #include "runtime/atomic.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/signature.hpp"
  44 #include "utilities/align.hpp"
  45 #include "utilities/lockFreeStack.hpp"
  46 #ifdef COMPILER1
  47 #include "c1/c1_Defs.hpp"
  48 #endif
  49 #ifdef COMPILER2
  50 #include "opto/optoreg.hpp"
  51 #endif
  52 
  53 // OopMapStream
  54 
  55 OopMapStream::OopMapStream(const OopMap* oop_map, int oop_types_mask)
  56   : _stream(oop_map->write_stream()->buffer()) {
  57   // _stream = new CompressedReadStream();
  58   _mask = oop_types_mask;
  59   _size = oop_map->omv_count();
  60   _position = 0;
  61   _valid_omv = false;
  62 }
  63 
  64 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask)
  65   : _stream(oop_map->data_addr()) {
  66   // _stream = new CompressedReadStream(oop_map->data_addr());
  67   _mask = oop_types_mask;
  68   _size = oop_map->count();
  69   _position = 0;
  70   _valid_omv = false;
  71 }
  72 
  73 void OopMapStream::find_next() {
  74   while(_position++ < _size) {
  75     _omv.read_from(&_stream);
  76     if(((int)_omv.type() & _mask) > 0) {
  77       _valid_omv = true;
  78       return;
  79     }
  80   }
  81   _valid_omv = false;
  82 }
  83 
  84 
  85 // OopMap
  86 
  87 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
  88 // slots to hold 4-byte values like ints and floats in the LP64 build.
  89 OopMap::OopMap(int frame_size, int arg_count) {
  90   // OopMaps are usually quite so small, so pick a small initial size
  91   set_write_stream(new CompressedWriteStream(32));
  92   set_omv_count(0);
  93   _num_oops = 0;
  94   _index = -1;
  95 
  96 #ifdef ASSERT
  97   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
  98   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
  99   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 100 #endif
 101 }
 102 
 103 
 104 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
 105   // This constructor does a deep copy
 106   // of the source OopMap.
 107   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
 108   set_omv_count(0);
 109   set_offset(source->offset());
 110   _num_oops = source->num_oops();
 111   _index = -1;
 112 
 113 #ifdef ASSERT
 114   _locs_length = source->_locs_length;
 115   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 116   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 117 #endif
 118 
 119   // We need to copy the entries too.
 120   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
 121     OopMapValue omv = oms.current();
 122     omv.write_on(write_stream());
 123     increment_count();
 124   }
 125 }
 126 
 127 
 128 OopMap* OopMap::deep_copy() {
 129   return new OopMap(_deep_copy_token, this);
 130 }
 131 
 132 void OopMap::copy_data_to(address addr) const {
 133   memcpy(addr, write_stream()->buffer(), write_stream()->position());
 134 }
 135 
 136 class OopMapSort {
 137 private:
 138   const OopMap* _map;
 139   OopMapValue* _values;
 140   int _count;
 141 
 142 public:
 143   OopMapSort(const OopMap* map) : _map(map), _count(0) {
 144     _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
 145   }
 146 
 147   void sort();
 148 
 149   void print();
 150 
 151   void write(CompressedWriteStream* stream) {
 152     for (int i = 0; i < _count; ++i) {
 153       _values[i].write_on(stream);
 154     }
 155   }
 156 
 157 private:
 158   int find_derived_position(OopMapValue omv, int start) {
 159     assert(omv.type() == OopMapValue::derived_oop_value, "");
 160 
 161     VMReg base = omv.content_reg();
 162     int i = start;
 163 
 164     for (; i < _count; ++i) {
 165       if (base == _values[i].reg()) {
 166 
 167         for (int n = i + 1; n < _count; ++n) {
 168           if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
 169             return n;
 170           }
 171 
 172           if (derived_cost(_values[i]) > derived_cost(omv)) {
 173             return n;
 174           }
 175         }
 176         return _count;
 177       }
 178     }
 179 
 180     assert(false, "failed to find base");
 181     return -1;
 182   }
 183 
 184   int find_position(OopMapValue omv, int start) {
 185     assert(omv.type() != OopMapValue::derived_oop_value, "");
 186 
 187     int i = start;
 188     for (; i < _count; ++i) {
 189       if (omv_cost(_values[i]) > omv_cost(omv)) {
 190         return i;
 191       }
 192     }
 193     assert(i < _map->omv_count(), "bounds check");
 194     return i;
 195   }
 196 
 197   void insert(OopMapValue value, int pos) {
 198     assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
 199     assert(pos <= _count, "sanity");
 200 
 201     if (pos < _count) {
 202       OopMapValue prev = _values[pos];
 203 
 204       for (int i = pos; i < _count; ++i) {
 205         OopMapValue tmp = _values[i+1];
 206         _values[i+1] = prev;
 207         prev = tmp;
 208       }
 209     }
 210     _values[pos] = value;
 211 
 212     ++_count;
 213   }
 214 
 215   int omv_cost(OopMapValue omv) {
 216     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
 217     return reg_cost(omv.reg());
 218   }
 219 
 220   int reg_cost(VMReg reg) {
 221     if (reg->is_reg()) {
 222       return 0;
 223     }
 224     return reg->reg2stack() * VMRegImpl::stack_slot_size;
 225   }
 226 
 227   int derived_cost(OopMapValue omv) {
 228     return reg_cost(omv.reg());
 229   }
 230 };
 231 
 232 void OopMapSort::sort() {
 233   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
 234     OopMapValue omv = oms.current();
 235     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
 236   }
 237 
 238   for (OopMapStream oms(_map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
 239     insert(oms.current(), _count);
 240   }
 241 
 242   int start = _count;
 243   for (OopMapStream oms(_map, OopMapValue::oop_value | OopMapValue::narrowoop_value); !oms.is_done(); oms.next()) {
 244     OopMapValue omv = oms.current();
 245     int pos = find_position(omv, start);
 246     insert(omv, pos);
 247   }
 248 
 249   for (OopMapStream oms(_map, OopMapValue::derived_oop_value); !oms.is_done(); oms.next()) {
 250     OopMapValue omv = oms.current();
 251     int pos = find_derived_position(omv, start);
 252     assert(pos > 0, "");
 253     insert(omv, pos);
 254   }
 255 }
 256 
 257 void OopMapSort::print() {
 258   for (int i = 0; i < _count; ++i) {
 259     OopMapValue omv = _values[i];
 260     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
 261       if (omv.reg()->is_reg()) {
 262         tty->print_cr("[%c][%d] -> reg (%ld)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
 263       } else {
 264         tty->print_cr("[%c][%d] -> stack (%lx)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
 265       }
 266     } else {
 267       if (omv.content_reg()->is_reg()) {
 268         tty->print_cr("[d][%d] -> reg (%ld) stack (%lx)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
 269       } else if (omv.reg()->is_reg()) {
 270         tty->print_cr("[d][%d] -> stack (%lx) reg (%ld)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
 271       } else {
 272         int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
 273         int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
 274         tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
 275       }
 276     }
 277   }
 278 }
 279 
 280 void OopMap::copy_and_sort_data_to(address addr) const {
 281   OopMapSort sort(this);
 282   sort.sort();
 283   CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
 284   sort.write(stream);
 285 
 286   assert(stream->position() == write_stream()->position(), "");
 287   memcpy(addr, stream->buffer(), stream->position());
 288   //copy_data_to(addr);
 289   //sort.print();
 290 }
 291 
 292 int OopMap::heap_size() const {
 293   int size = sizeof(OopMap);
 294   int align = sizeof(void *) - 1;
 295   size += write_stream()->position();
 296   // Align to a reasonable ending point
 297   size = ((size+align) & ~align);
 298   return size;
 299 }
 300 
 301 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 302 // slots to hold 4-byte values like ints and floats in the LP64 build.
 303 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
 304 
 305   assert(reg->value() < _locs_length, "too big reg value for stack size");
 306   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
 307   debug_only( _locs_used[reg->value()] = x; )
 308 
 309   OopMapValue o(reg, x);
 310 
 311   if(x == OopMapValue::callee_saved_value) {
 312     // This can never be a stack location, so we don't need to transform it.
 313     assert(optional->is_reg(), "Trying to callee save a stack location");
 314     o.set_content_reg(optional);
 315   } else if(x == OopMapValue::derived_oop_value) {
 316     o.set_content_reg(optional);
 317   }
 318 
 319   o.write_on(write_stream());
 320   increment_count();
 321   if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value)
 322     increment_num_oops();
 323 }
 324 
 325 
 326 void OopMap::set_oop(VMReg reg) {
 327   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
 328 }
 329 
 330 
 331 void OopMap::set_value(VMReg reg) {
 332   // At this time, we don't need value entries in our OopMap.
 333   // set_xxx(reg, OopMapValue::live_value, VMRegImpl::Bad());
 334 }
 335 
 336 
 337 void OopMap::set_narrowoop(VMReg reg) {
 338   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
 339 }
 340 
 341 
 342 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
 343   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
 344 }
 345 
 346 
 347 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
 348   if( reg == derived_from_local_register ) {
 349     // Actually an oop, derived shares storage with base,
 350     set_oop(reg);
 351   } else {
 352     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
 353   }
 354 }
 355 
 356 // OopMapSet
 357 
 358 OopMapSet::OopMapSet() {
 359   set_om_size(MinOopMapAllocation);
 360   set_om_count(0);
 361   OopMap** temp = NEW_RESOURCE_ARRAY(OopMap*, om_size());
 362   set_om_data(temp);
 363 }
 364 
 365 
 366 void OopMapSet::grow_om_data() {
 367   int new_size = om_size() * 2;
 368   OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size);
 369   memcpy(new_data,om_data(),om_size() * sizeof(OopMap*));
 370   set_om_size(new_size);
 371   set_om_data(new_data);
 372 }
 373 
 374 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
 375   assert(om_size() != -1,"Cannot grow a fixed OopMapSet");
 376 
 377   if(om_count() >= om_size()) {
 378     grow_om_data();
 379   }
 380   map->set_offset(pc_offset);
 381 
 382 #ifdef ASSERT
 383   if(om_count() > 0) {
 384     OopMap* last = at(om_count()-1);
 385     if (last->offset() == map->offset() ) {
 386       fatal("OopMap inserted twice");
 387     }
 388     if(last->offset() > map->offset()) {
 389       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
 390                       om_count(),last->offset(),om_count()+1,map->offset());
 391     }
 392   }
 393 #endif // ASSERT
 394 
 395   int index = om_count();
 396   set(index,map);
 397   map->_index = index;
 398   increment_count();
 399   return index;
 400 }
 401 
 402 
 403 int OopMapSet::heap_size() const {
 404   // The space we use
 405   int size = sizeof(OopMap);
 406   int align = sizeof(void *) - 1;
 407   size = ((size+align) & ~align);
 408   size += om_count() * sizeof(OopMap*);
 409 
 410   // Now add in the space needed for the indivdiual OopMaps
 411   for(int i=0; i < om_count(); i++) {
 412     size += at(i)->heap_size();
 413   }
 414   // We don't need to align this, it will be naturally pointer aligned
 415   return size;
 416 }
 417 
 418 
 419 OopMap* OopMapSet::singular_oop_map() {


 423 
 424 
 425 OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
 426   int i, len = om_count();
 427   assert( len > 0, "must have pointer maps" );
 428 
 429   // Scan through oopmaps. Stop when current offset is either equal or greater
 430   // than the one we are looking for.
 431   for( i = 0; i < len; i++) {
 432     if( at(i)->offset() >= pc_offset )
 433       break;
 434   }
 435 
 436   assert( i < len, "oopmap not found" );
 437 
 438   OopMap* m = at(i);
 439   assert( m->offset() == pc_offset, "oopmap not found" );
 440   return m;
 441 }
 442 
 443 class AddDerivedOop : public DerivedOopClosure {
 444 public:
 445   enum { SkipNull = true, NeedsLock = true };
 446   virtual void do_derived_oop(oop* base, oop* derived) {
 447 #if !defined(TIERED) && !INCLUDE_JVMCI
 448     COMPILER1_PRESENT(ShouldNotReachHere();)
 449 #endif // !defined(TIERED) && !INCLUDE_JVMCI
 450 #if COMPILER2_OR_JVMCI
 451       DerivedPointerTable::add(derived, base);
 452 #endif // COMPILER2_OR_JVMCI




















 453   }
 454 };





 455 
 456 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
 457   // add_derived_oop: add derived oops to a table
 458   find_map(fr)->oops_do(fr, reg_map, f, df);
 459   // all_do(fr, reg_map, f, df != NULL ? df : &add_derived_oop, &do_nothing_cl);
 460 }
 461 
 462 // void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
 463 //                        OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn,
 464 //                        OopClosure* value_fn) {
 465 //   find_map(fr)->oops_do(fr, reg_map, oop_fn, derived_oop_fn, value_fn);
 466 // }
 467 
 468 ExplodedOopMap::ExplodedOopMap(const ImmutableOopMap* oopMap) {
 469   _oopValues = copyOopMapValues(oopMap, OopMapValue::oop_value | OopMapValue::narrowoop_value, &_nrOopValues);
 470   _calleeSavedValues = copyOopMapValues(oopMap, OopMapValue::callee_saved_value, &_nrCalleeSavedValuesCount);
 471   _derivedValues = copyOopMapValues(oopMap, OopMapValue::derived_oop_value, &_nrDerivedValues);
 472 }
 473 
 474 OopMapValue* ExplodedOopMap::values(int mask) {
 475   if (mask == (OopMapValue::oop_value | OopMapValue::narrowoop_value)) {
 476     return _oopValues;
 477   } else if (mask == OopMapValue::callee_saved_value) {
 478     return _calleeSavedValues;
 479   } else if (mask == OopMapValue::derived_oop_value) {
 480     return _derivedValues;
 481   } else {
 482     guarantee(false, "new type?");
 483     return NULL;
 484   }
 485 }

 486 
 487 int ExplodedOopMap::count(int mask) {
 488   if (mask == (OopMapValue::oop_value | OopMapValue::narrowoop_value)) {
 489     return _nrOopValues;
 490   } else if (mask == OopMapValue::callee_saved_value) {
 491     return _nrCalleeSavedValuesCount;
 492   } else if (mask == OopMapValue::derived_oop_value) {
 493     return _nrDerivedValues;
 494   } else {
 495     guarantee(false, "new type?");
 496     return 0;
 497   }
 498 }
 499 
 500 OopMapValue* ExplodedOopMap::copyOopMapValues(const ImmutableOopMap* oopMap, int mask, int* nr) {
 501   OopMapValue omv;
 502   int count = 0;
 503   // We want coop and oop oop_types
 504   for (OopMapStream oms(oopMap,mask); !oms.is_done(); oms.next()) {
 505     ++count;
 506   }
 507   *nr = count;
 508 
 509   OopMapValue* values = (OopMapValue*) NEW_C_HEAP_ARRAY(unsigned char, sizeof(OopMapValue) * count, mtCode);




 510 
 511   int i = 0;
 512   for (OopMapStream oms(oopMap,mask); !oms.is_done(); oms.next()) {
 513     assert(i < count, "overflow");
 514     values[i] = oms.current();
 515     i++;
 516   }
 517 
 518   i = 0;
 519   for (OopMapStream oms(oopMap,mask); !oms.is_done(); oms.next()) {
 520     assert(i < count, "overflow");
 521     assert(values[i].equals(oms.current()), "must");
 522     i++;
 523   }
 524 
 525   return values;
 526 }
 527 
 528 // NULL, fail, success (address)
 529 void ImmutableOopMap::generate_stub(const CodeBlob* cb) const {
 530   /* The address of the ImmutableOopMap is put into the _freeze_stub and _thaw_stub 
 531    * if we can't generate the stub for some reason */
 532   if (_freeze_stub == NULL) {
 533     OopMapStubGenerator cgen(cb, *this);
 534     if (Atomic::cmpxchg((address) this, &_freeze_stub, (address) NULL) == NULL) {
 535       if (!cgen.generate()) {
 536         Atomic::store((address) this, &_thaw_stub);
 537         cgen.free();
 538         return;
 539       }
 540 
 541       Atomic::store(cgen.freeze_stub(), &_freeze_stub);
 542       Atomic::store(cgen.thaw_stub(), &_thaw_stub);















 543     }
 544   }
 545 }
 546 
 547 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
 548                               OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
 549   AddDerivedOop add_derived_oop;
 550   if (derived_oop_fn == NULL) {
 551     derived_oop_fn = &add_derived_oop;
 552   }
 553   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
 554   visitor.oops_do(fr, reg_map, this);
 555 }
 556 
 557 template<typename T>
 558 static void iterate_all_do(const frame *fr, int mask, OopMapClosure* fn, const ImmutableOopMap* oopmap) {
 559   OopMapValue omv;
 560   for (T oms(oopmap,mask); !oms.is_done(); oms.next()) {
 561       omv = oms.current();
 562       fn->do_value(omv.reg(), omv.type());













































 563   }
 564 }
 565 
 566 void ImmutableOopMap::all_do(const frame *fr, int mask, OopMapClosure* fn) const {
 567   if (_exploded != NULL) {
 568     iterate_all_do<ExplodedOopMapStream>(fr, mask, fn, this);
 569   } else {
 570     iterate_all_do<OopMapStream>(fr, mask, fn, this);
 571   }
 572 }
 573 
 574 template <typename T>
 575 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
 576   for (T oms(oopmap, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
 577     OopMapValue omv = oms.current();
 578     VMReg reg = omv.content_reg();
 579     oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
 580     reg_map->set_location(reg, (address) loc);
 581     //DEBUG_ONLY(nof_callee++;)
 582   }
 583 }
 584 
 585 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
 586   // ResourceMark rm;
 587   CodeBlob* cb = fr->cb();
 588   assert(cb != NULL, "no codeblob");

 589   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
 590   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
 591          "already updated this map; do not 'update' it twice!" );
 592   debug_only(reg_map->_update_for_id = fr->id());
 593 
 594 
 595   // Check if caller must update oop argument
 596   assert((reg_map->include_argument_oops() ||
 597           !cb->caller_must_gc_arguments(reg_map->thread())),
 598          "include_argument_oops should already be set");
 599 
 600   // Scan through oopmap and find location of all callee-saved registers
 601   // (we do not do update in place, since info could be overwritten)
 602 



 603   DEBUG_ONLY(int nof_callee = 0;)
 604   if (_exploded != NULL) {
 605     update_register_map1<ExplodedOopMapStream>(this, fr, reg_map);
 606   } else {
 607     update_register_map1<OopMapStream>(this, fr, reg_map);
 608   }
 609 
 610   /*
 611   for (OopMapStream oms(this, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
 612     OopMapValue omv = oms.current();
 613     VMReg reg = omv.content_reg();
 614     oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
 615     reg_map->set_location(reg, (address) loc);
 616     DEBUG_ONLY(nof_callee++;)
 617   }
 618   */
 619 
 620   // Check that runtime stubs save all callee-saved registers
 621 #ifdef COMPILER2
 622   assert(cb == NULL || cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
 623          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
 624          "must save all");
 625 #endif // COMPILER2
 626 }
 627 
 628 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) { 
 629   return find_map(fr->cb(), fr->pc()); 
 630 }
 631 
 632 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
 633   assert(cb != NULL, "no codeblob");
 634   const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
 635   assert(map != NULL, "no ptr map found");
 636   return map;
 637 }
 638 
 639 // Update callee-saved register info for the following frame
 640 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
 641   find_map(fr)->update_register_map(fr, reg_map);
 642 }
 643 
 644 //=============================================================================
 645 // Non-Product code
 646 
 647 #ifndef PRODUCT
 648 
 649 bool ImmutableOopMap::has_derived_pointer() const {
 650 #if !defined(TIERED) && !INCLUDE_JVMCI
 651   COMPILER1_PRESENT(return false);
 652 #endif // !TIERED
 653 #if COMPILER2_OR_JVMCI
 654   OopMapStream oms(this,OopMapValue::derived_oop_value);
 655   return oms.is_done();
 656 #else
 657   return false;
 658 #endif // COMPILER2_OR_JVMCI
 659 }
 660 
 661 #ifndef PRODUCT
 662 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
 663   // Print oopmap and regmap
 664   tty->print_cr("------ ");
 665   CodeBlob* cb = fr->cb();
 666   const ImmutableOopMapSet* maps = cb->oop_maps();
 667   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 668   map->print();
 669   if( cb->is_nmethod() ) {
 670     nmethod* nm = (nmethod*)cb;
 671     // native wrappers have no scope data, it is implied
 672     if (nm->is_native_method()) {
 673       tty->print("bci: 0 (native)");
 674     } else {
 675       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
 676       tty->print("bci: %d ",scope->bci());
 677     }
 678   }
 679   tty->cr();
 680   fr->print_on(tty);
 681   tty->print("     ");
 682   cb->print_value_on(tty);  tty->cr();
 683   if (reg_map != NULL) {
 684     reg_map->print();
 685   }
 686   tty->print_cr("------ ");
 687 
 688 }
 689 #endif // PRODUCT
 690 
 691 
 692 #endif //PRODUCT
 693 
 694 // Printing code is present in product build for -XX:+PrintAssembly.
 695 
 696 static
 697 void print_register_type(OopMapValue::oop_types x, VMReg optional,
 698                          outputStream* st) {
 699   switch( x ) {
 700   case OopMapValue::oop_value:
 701     st->print("Oop");
 702     break;
 703   case OopMapValue::narrowoop_value:
 704     st->print("NarrowOop");
 705     break;
 706   case OopMapValue::callee_saved_value:
 707     st->print("Callers_");
 708     optional->print_on(st);
 709     break;
 710   case OopMapValue::derived_oop_value:
 711     st->print("Derived_oop_");


 784     st->cr();
 785   }
 786   st->cr();
 787 }
 788 
 789 void OopMapSet::print() const { print_on(tty); }
 790 
 791 bool OopMap::equals(const OopMap* other) const {
 792   if (other->_omv_count != _omv_count) {
 793     return false;
 794   }
 795   if (other->write_stream()->position() != write_stream()->position()) {
 796     return false;
 797   }
 798   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
 799     return false;
 800   }
 801   return true;
 802 }
 803 
 804 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
 805   ImmutableOopMapPair* pairs = get_pairs();
 806 
 807   for (int i = 0; i < _count; ++i) {
 808     if (pairs[i].pc_offset() >= pc_offset) {
 809       ImmutableOopMapPair* last = &pairs[i];
 810       assert(last->pc_offset() == pc_offset, "oopmap not found");
 811       return i;
 812     }
 813   }
 814 
 815   guarantee(false, "failed to find oopmap for pc");
 816   return -1;
 817 }
 818 
 819 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
 820   ImmutableOopMapPair* pairs = get_pairs();
 821   ImmutableOopMapPair* last  = NULL;
 822 
 823   for (int i = 0; i < _count; ++i) {
 824     if (pairs[i].pc_offset() >= pc_offset) {
 825       last = &pairs[i];
 826       break;
 827     }
 828   }
 829 
 830   // Heal Coverity issue: potential index out of bounds access.
 831   guarantee(last != NULL, "last may not be null");
 832   assert(last->pc_offset() == pc_offset, "oopmap not found");
 833   return last->get_from(this);
 834 }
 835 
 836 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _exploded(NULL), _freeze_stub(NULL), _thaw_stub(NULL), _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
 837   _num_oops = oopmap->num_oops();



 838   address addr = data_addr();
 839   //oopmap->copy_data_to(addr);
 840   oopmap->copy_and_sort_data_to(addr);
 841 }
 842 
 843 #ifdef ASSERT
 844 int ImmutableOopMap::nr_of_bytes() const {
 845   OopMapStream oms(this);
 846 
 847   while (!oms.is_done()) {
 848     oms.next();
 849   }
 850   return sizeof(ImmutableOopMap) + oms.stream_position();
 851 }
 852 #endif
 853 
 854 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
 855   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
 856 }
 857 
 858 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
 859   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
 860 }


 913 
 914   new (addr) ImmutableOopMap(map);
 915   return size_for(map);
 916 }
 917 
 918 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
 919   ImmutableOopMapPair* pairs = set->get_pairs();
 920 
 921   for (int i = 0; i < set->count(); ++i) {
 922     const OopMap* map = _mapping[i]._map;
 923     ImmutableOopMapPair* pair = NULL;
 924     int size = 0;
 925 
 926     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
 927       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
 928     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
 929       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
 930     }
 931 
 932     const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
 933     //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
 934   }
 935 }
 936 
 937 #ifdef ASSERT
 938 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
 939   for (int i = 0; i < 8; ++i) {
 940     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
 941   }
 942 
 943   for (int i = 0; i < set->count(); ++i) {
 944     const ImmutableOopMapPair* pair = set->pair_at(i);
 945     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
 946     const ImmutableOopMap* map = pair->get_from(set);
 947     int nr_of_bytes = map->nr_of_bytes();
 948     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
 949   }
 950 }
 951 #endif
 952 
 953 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {


1011   // empty.  If not, then we have probably forgotton to call
1012   // update_pointers after last GC/Scavenge.
1013   assert (!_active, "should not be active");
1014   assert(is_empty(), "table not empty");
1015   if (Entry::_list == NULL) {
1016     void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
1017     Entry::_list = ::new (mem) Entry::List();
1018   }
1019   _active = true;
1020 }
1021 
1022 // Returns value of location as an int
1023 inline intptr_t value_of_loc(oop *pointer) {
1024   return cast_from_oop<intptr_t>((*pointer));
1025 }
1026 
1027 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
1028   assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
1029   assert(derived_loc != base_loc, "Base and derived in same location");
1030   if (_active) {
1031     assert(*derived_loc != (oop)base_loc, "location already added");
1032     assert(Entry::_list != NULL, "list must exist");
1033     intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
1034     // This assert is invalid because derived pointers can be
1035     // arbitrarily far away from their base.
1036     // assert(offset >= -1000000, "wrong derived pointer info");
1037 
1038     if (TraceDerivedPointers) {
1039       tty->print_cr(
1040         "Add derived pointer@" INTPTR_FORMAT
1041         " - Derived: " INTPTR_FORMAT
1042         " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
1043         p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset
1044       );
1045     }
1046     // Set derived oop location to point to base.
1047     *derived_loc = (oop)base_loc;
1048     Entry* entry = new Entry(derived_loc, offset);
1049     Entry::_list->push(*entry);
1050   }
1051 }


< prev index next >