< prev index next >

src/hotspot/share/compiler/oopMap.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "compiler/oopMap.hpp"

  31 #include "gc/shared/collectedHeap.hpp"


  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/iterator.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/compressedOops.hpp"

  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/signature.hpp"
  40 #include "runtime/stackWatermarkSet.inline.hpp"
  41 #include "utilities/align.hpp"
  42 #include "utilities/lockFreeStack.hpp"
  43 #ifdef COMPILER1
  44 #include "c1/c1_Defs.hpp"
  45 #endif
  46 #ifdef COMPILER2
  47 #include "opto/optoreg.hpp"
  48 #endif
  49 #if INCLUDE_JVMCI
  50 #include "jvmci/jvmci_globals.hpp"
  51 #endif
  52 
  53 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
  54 
  55 static inline intptr_t derived_pointer_value(derived_pointer p) {
  56   return static_cast<intptr_t>(p);
  57 }
  58 
  59 static inline derived_pointer to_derived_pointer(oop obj) {
  60   return static_cast<derived_pointer>(cast_from_oop<intptr_t>(obj));
  61 }
  62 
  63 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
  64   return derived_pointer_value(p) - derived_pointer_value(p1);
  65 }
  66 
  67 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
  68   return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
  69 }
  70 
  71 // OopMapStream
  72 
  73 OopMapStream::OopMapStream(OopMap* oop_map) {
  74   _stream = new CompressedReadStream(oop_map->write_stream()->buffer());

  75   _size = oop_map->omv_count();
  76   _position = 0;
  77   _valid_omv = false;
  78 }
  79 
  80 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) {
  81   _stream = new CompressedReadStream(oop_map->data_addr());

  82   _size = oop_map->count();
  83   _position = 0;
  84   _valid_omv = false;
  85 }
  86 
  87 void OopMapStream::find_next() {
  88   if (_position++ < _size) {
  89     _omv.read_from(_stream);
  90     _valid_omv = true;
  91     return;
  92   }
  93   _valid_omv = false;
  94 }
  95 
  96 
  97 // OopMap
  98 
  99 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 100 // slots to hold 4-byte values like ints and floats in the LP64 build.
 101 OopMap::OopMap(int frame_size, int arg_count) {
 102   // OopMaps are usually quite so small, so pick a small initial size
 103   set_write_stream(new CompressedWriteStream(32));
 104   set_omv_count(0);



 105 
 106 #ifdef ASSERT
 107   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
 108   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 109   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 110 #endif
 111 }
 112 
 113 
 114 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
 115   // This constructor does a deep copy
 116   // of the source OopMap.
 117   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
 118   set_omv_count(0);
 119   set_offset(source->offset());



 120 
 121 #ifdef ASSERT
 122   _locs_length = source->_locs_length;
 123   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 124   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 125 #endif
 126 
 127   // We need to copy the entries too.
 128   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
 129     OopMapValue omv = oms.current();
 130     omv.write_on(write_stream());
 131     increment_count();
 132   }
 133 }
 134 
 135 
 136 OopMap* OopMap::deep_copy() {
 137   return new OopMap(_deep_copy_token, this);
 138 }
 139 
 140 void OopMap::copy_data_to(address addr) const {
 141   memcpy(addr, write_stream()->buffer(), write_stream()->position());
 142 }
 143 


































































































































































 144 int OopMap::heap_size() const {
 145   int size = sizeof(OopMap);
 146   int align = sizeof(void *) - 1;
 147   size += write_stream()->position();
 148   // Align to a reasonable ending point
 149   size = ((size+align) & ~align);
 150   return size;
 151 }
 152 
 153 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 154 // slots to hold 4-byte values like ints and floats in the LP64 build.
 155 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
 156 
 157   assert(reg->value() < _locs_length, "too big reg value for stack size");
 158   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
 159   debug_only( _locs_used[reg->value()] = x; )
 160 
 161   OopMapValue o(reg, x, optional);
 162   o.write_on(write_stream());
 163   increment_count();





 164 }
 165 
 166 
 167 void OopMap::set_oop(VMReg reg) {
 168   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
 169 }
 170 
 171 





 172 void OopMap::set_narrowoop(VMReg reg) {
 173   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
 174 }
 175 
 176 
 177 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
 178   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
 179 }
 180 
 181 
 182 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
 183   if( reg == derived_from_local_register ) {
 184     // Actually an oop, derived shares storage with base,
 185     set_oop(reg);
 186   } else {
 187     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
 188   }
 189 }
 190 
 191 // OopMapSet
 192 
 193 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
 194 
 195 void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
 196   map->set_offset(pc_offset);
 197 
 198 #ifdef ASSERT
 199   if(_list.length() > 0) {
 200     OopMap* last = _list.last();
 201     if (last->offset() == map->offset() ) {
 202       fatal("OopMap inserted twice");
 203     }
 204     if (last->offset() > map->offset()) {
 205       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
 206                       _list.length(),last->offset(),_list.length()+1,map->offset());
 207     }
 208   }
 209 #endif // ASSERT
 210 
 211   add(map);


 212 }
 213 
 214 static void add_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {






 215 #if COMPILER2_OR_JVMCI
 216   DerivedPointerTable::add(derived, base);
 217 #endif // COMPILER2_OR_JVMCI
 218 }
 219 
 220 static void ignore_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
 221 }
 222 
 223 static void process_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
 224   // All derived pointers must be processed before the base pointer of any derived pointer is processed.
 225   // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
 226   // offset, if the base pointer is processed in the first derived pointer.
 227   derived_pointer derived_base = to_derived_pointer(*base);
 228   intptr_t offset = *derived - derived_base;
 229   *derived = derived_base;
 230   oop_fn->do_oop((oop*)derived);
 231   *derived = *derived + offset;
 232 }
 233 
 234 
 235 #ifndef PRODUCT
 236 static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
 237   // Print oopmap and regmap
 238   tty->print_cr("------ ");
 239   CodeBlob* cb = fr->cb();
 240   const ImmutableOopMapSet* maps = cb->oop_maps();
 241   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 242   map->print();
 243   if( cb->is_nmethod() ) {
 244     nmethod* nm = (nmethod*)cb;
 245     // native wrappers have no scope data, it is implied
 246     if (nm->is_native_method()) {
 247       tty->print("bci: 0 (native)");
 248     } else {
 249       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
 250       tty->print("bci: %d ",scope->bci());
 251     }
 252   }
 253   tty->cr();
 254   fr->print_on(tty);
 255   tty->print("     ");
 256   cb->print_value_on(tty);  tty->cr();
 257   reg_map->print();
 258   tty->print_cr("------ ");
 259 
 260 }
 261 #endif // PRODUCT
 262 
 263 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
 264   switch (mode) {











































































 265   case DerivedPointerIterationMode::_directly:
 266     all_do(fr, reg_map, f, process_derived_oop);
 267     break;
 268   case DerivedPointerIterationMode::_with_table:
 269     all_do(fr, reg_map, f, add_derived_oop);
 270     break;
 271   case DerivedPointerIterationMode::_ignore:
 272     all_do(fr, reg_map, f, ignore_derived_oop);
 273     break;
 274   }


 275 }
 276 
 277 
 278 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
 279                        OopClosure* oop_fn, void derived_oop_fn(oop*, derived_pointer*, OopClosure*)) {
 280   CodeBlob* cb = fr->cb();
 281   assert(cb != NULL, "no codeblob");
 282 
 283   NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
 284 
 285   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 286   assert(map != NULL, "no ptr map found");
 287 
 288   // handle derived pointers first (otherwise base pointer may be
 289   // changed before derived pointer offset has been collected)
 290   {
 291     for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
 292       OopMapValue omv = oms.current();
 293       if (omv.type() != OopMapValue::derived_oop_value) {
 294         continue;
 295       }
 296 
 297 #ifndef COMPILER2
 298       COMPILER1_PRESENT(ShouldNotReachHere();)
 299 #if INCLUDE_JVMCI
 300       if (UseJVMCICompiler) {
 301         ShouldNotReachHere();
 302       }
 303 #endif
 304 #endif // !COMPILER2
 305       derived_pointer* derived_loc = (derived_pointer*)fr->oopmapreg_to_location(omv.reg(),reg_map);
 306       guarantee(derived_loc != NULL, "missing saved register");
 307       oop* base_loc = fr->oopmapreg_to_oop_location(omv.content_reg(), reg_map);
 308       // Ignore NULL oops and decoded NULL narrow oops which
 309       // equal to CompressedOops::base() when a narrow oop
 310       // implicit null check is used in compiled code.
 311       // The narrow_oop_base could be NULL or be the address
 312       // of the page below heap depending on compressed oops mode.
 313       if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
 314         derived_oop_fn(base_loc, derived_loc, oop_fn);
 315       }
 316     }
 317   }

 318 
 319   {
 320     // We want coop and oop oop_types
 321     for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
 322       OopMapValue omv = oms.current();
 323       oop* loc = fr->oopmapreg_to_oop_location(omv.reg(),reg_map);
 324       // It should be an error if no location can be found for a
 325       // register mentioned as contained an oop of some kind.  Maybe
 326       // this was allowed previously because value_value items might
 327       // be missing?
 328       guarantee(loc != NULL, "missing saved register");
 329       if ( omv.type() == OopMapValue::oop_value ) {
 330         oop val = *loc;
 331         if (val == NULL || CompressedOops::is_base(val)) {
 332           // Ignore NULL oops and decoded NULL narrow oops which
 333           // equal to CompressedOops::base() when a narrow oop
 334           // implicit null check is used in compiled code.
 335           // The narrow_oop_base could be NULL or be the address
 336           // of the page below heap depending on compressed oops mode.
 337           continue;
 338         }
 339         oop_fn->do_oop(loc);
 340       } else if ( omv.type() == OopMapValue::narrowoop_value ) {
 341         narrowOop *nl = (narrowOop*)loc;
 342 #ifndef VM_LITTLE_ENDIAN
 343         VMReg vmReg = omv.reg();
 344         if (!vmReg->is_stack()) {
 345           // compressed oops in registers only take up 4 bytes of an
 346           // 8 byte register but they are in the wrong part of the
 347           // word so adjust loc to point at the right place.
 348           nl = (narrowOop*)((address)nl + 4);
 349         }
 350 #endif
 351         oop_fn->do_oop(nl);
 352       }
 353     }
 354   }
 355 }
 356 











 357 
 358 // Update callee-saved register info for the following frame
 359 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
 360   ResourceMark rm;
 361   CodeBlob* cb = fr->cb();
 362   assert(cb != NULL, "no codeblob");
 363 
 364   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
 365   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
 366          "already updated this map; do not 'update' it twice!" );
 367   debug_only(reg_map->_update_for_id = fr->id());
 368 

 369   // Check if caller must update oop argument
 370   assert((reg_map->include_argument_oops() ||
 371           !cb->caller_must_gc_arguments(reg_map->thread())),
 372          "include_argument_oops should already be set");
 373 
 374   // Scan through oopmap and find location of all callee-saved registers
 375   // (we do not do update in place, since info could be overwritten)
 376 
 377   address pc = fr->pc();
 378   const ImmutableOopMap* map  = cb->oop_map_for_return_address(pc);
 379   assert(map != NULL, "no ptr map found");
 380   DEBUG_ONLY(int nof_callee = 0;)

 381 
 382   for (OopMapStream oms(map); !oms.is_done(); oms.next()) {

 383     OopMapValue omv = oms.current();
 384     if (omv.type() == OopMapValue::callee_saved_value) {
 385       VMReg reg = omv.content_reg();
 386       oop* loc = fr->oopmapreg_to_oop_location(omv.reg(), reg_map);
 387       reg_map->set_location(reg, (address) loc);
 388       DEBUG_ONLY(nof_callee++;)
 389     }
 390   }

 391 
 392   // Check that runtime stubs save all callee-saved registers
 393 #ifdef COMPILER2
 394   assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
 395          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
 396          "must save all");
 397 #endif // COMPILER2
 398 }
 399 

















































 400 // Printing code is present in product build for -XX:+PrintAssembly.
 401 
 402 static
 403 void print_register_type(OopMapValue::oop_types x, VMReg optional,
 404                          outputStream* st) {
 405   switch( x ) {
 406   case OopMapValue::oop_value:
 407     st->print("Oop");
 408     break;
 409   case OopMapValue::narrowoop_value:
 410     st->print("NarrowOop");
 411     break;
 412   case OopMapValue::callee_saved_value:
 413     st->print("Callers_");
 414     optional->print_on(st);
 415     break;
 416   case OopMapValue::derived_oop_value:
 417     st->print("Derived_oop_");
 418     optional->print_on(st);
 419     break;

 490     st->cr();
 491   }
 492   st->cr();
 493 }
 494 
 495 void OopMapSet::print() const { print_on(tty); }
 496 
 497 bool OopMap::equals(const OopMap* other) const {
 498   if (other->_omv_count != _omv_count) {
 499     return false;
 500   }
 501   if (other->write_stream()->position() != write_stream()->position()) {
 502     return false;
 503   }
 504   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
 505     return false;
 506   }
 507   return true;
 508 }
 509 















 510 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
 511   ImmutableOopMapPair* pairs = get_pairs();
 512   ImmutableOopMapPair* last  = NULL;
 513 
 514   for (int i = 0; i < _count; ++i) {
 515     if (pairs[i].pc_offset() >= pc_offset) {
 516       last = &pairs[i];
 517       break;
 518     }
 519   }
 520 
 521   // Heal Coverity issue: potential index out of bounds access.
 522   guarantee(last != NULL, "last may not be null");
 523   assert(last->pc_offset() == pc_offset, "oopmap not found");
 524   return last->get_from(this);
 525 }
 526 
 527 const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const {
 528   return set->oopmap_at_offset(_oopmap_offset);





 529 }
 530 
 531 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) {
 532   address addr = data_addr();
 533   oopmap->copy_data_to(addr);



 534 }
 535 
 536 #ifdef ASSERT
 537 int ImmutableOopMap::nr_of_bytes() const {
 538   OopMapStream oms(this);
 539 
 540   while (!oms.is_done()) {
 541     oms.next();
 542   }
 543   return sizeof(ImmutableOopMap) + oms.stream_position();
 544 }

 545 #endif
 546 
 547 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
 548   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
 549 }
 550 
 551 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
 552   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
 553 }
 554 
 555 int ImmutableOopMapBuilder::heap_size() {
 556   int base = sizeof(ImmutableOopMapSet);
 557   base = align_up(base, 8);
 558 
 559   // all of ours pc / offset pairs
 560   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
 561   pairs = align_up(pairs, 8);
 562 
 563   for (int i = 0; i < _set->size(); ++i) {
 564     int size = 0;

 606 
 607   new (addr) ImmutableOopMap(map);
 608   return size_for(map);
 609 }
 610 
 611 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
 612   ImmutableOopMapPair* pairs = set->get_pairs();
 613 
 614   for (int i = 0; i < set->count(); ++i) {
 615     const OopMap* map = _mapping[i]._map;
 616     ImmutableOopMapPair* pair = NULL;
 617     int size = 0;
 618 
 619     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
 620       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
 621     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
 622       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
 623     }
 624 
 625     const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
 626     assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
 627   }
 628 }
 629 
 630 #ifdef ASSERT
 631 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
 632   for (int i = 0; i < 8; ++i) {
 633     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
 634   }
 635 
 636   for (int i = 0; i < set->count(); ++i) {
 637     const ImmutableOopMapPair* pair = set->pair_at(i);
 638     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
 639     const ImmutableOopMap* map = pair->get_from(set);
 640     int nr_of_bytes = map->nr_of_bytes();
 641     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
 642   }
 643 }
 644 #endif
 645 
 646 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "compiler/oopMap.inline.hpp"
  32 #include "gc/shared/collectedHeap.hpp"
  33 #include "logging/log.hpp"
  34 #include "logging/logStream.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/iterator.hpp"
  37 #include "memory/resourceArea.hpp"

  38 #include "oops/compressedOops.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/signature.hpp"
  43 #include "runtime/stackWatermarkSet.inline.hpp"
  44 #include "utilities/align.hpp"
  45 #include "utilities/lockFreeStack.hpp"
  46 #ifdef COMPILER1
  47 #include "c1/c1_Defs.hpp"
  48 #endif
  49 #ifdef COMPILER2
  50 #include "opto/optoreg.hpp"
  51 #endif
  52 #if INCLUDE_JVMCI
  53 #include "jvmci/jvmci_globals.hpp"
  54 #endif
  55 
  56 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
  57 
  58 static inline intptr_t derived_pointer_value(derived_pointer p) {
  59   return static_cast<intptr_t>(p);
  60 }
  61 
  62 static inline derived_pointer to_derived_pointer(oop obj) {
  63   return static_cast<derived_pointer>(cast_from_oop<intptr_t>(obj));
  64 }
  65 
  66 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
  67   return derived_pointer_value(p) - derived_pointer_value(p1);
  68 }
  69 
  70 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
  71   return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
  72 }
  73 
  74 // OopMapStream
  75 
  76 OopMapStream::OopMapStream(const OopMap* oop_map)
  77   : _stream(oop_map->write_stream()->buffer()) {
  78   // _stream = new CompressedReadStream(oop_map->write_stream()->buffer());
  79   _size = oop_map->omv_count();
  80   _position = 0;
  81   _valid_omv = false;
  82 }
  83 
  84 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map)
  85   : _stream(oop_map->data_addr()) {
  86   // _stream = new CompressedReadStream(oop_map->data_addr());
  87   _size = oop_map->count();
  88   _position = 0;
  89   _valid_omv = false;
  90 }
  91 
  92 void OopMapStream::find_next() {
  93   if (_position++ < _size) {
  94     _omv.read_from(&_stream);
  95     _valid_omv = true;
  96     return;
  97   }
  98   _valid_omv = false;
  99 }
 100 
 101 
 102 // OopMap
 103 
 104 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 105 // slots to hold 4-byte values like ints and floats in the LP64 build.
 106 OopMap::OopMap(int frame_size, int arg_count) {
 107   // OopMaps are usually quite so small, so pick a small initial size
 108   set_write_stream(new CompressedWriteStream(32));
 109   set_omv_count(0);
 110   _num_oops = 0;
 111   _has_derived_oops = false;
 112   _index = -1;
 113 
 114 #ifdef ASSERT
 115   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
 116   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 117   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 118 #endif
 119 }
 120 
 121 
 122 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
 123   // This constructor does a deep copy
 124   // of the source OopMap.
 125   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
 126   set_omv_count(0);
 127   set_offset(source->offset());
 128   _num_oops = source->num_oops();
 129   _has_derived_oops = source->has_derived_oops();
 130   _index = -1;
 131 
 132 #ifdef ASSERT
 133   _locs_length = source->_locs_length;
 134   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 135   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 136 #endif
 137 
 138   // We need to copy the entries too.
 139   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
 140     OopMapValue omv = oms.current();
 141     omv.write_on(write_stream());
 142     increment_count();
 143   }
 144 }
 145 
 146 
 147 OopMap* OopMap::deep_copy() {
 148   return new OopMap(_deep_copy_token, this);
 149 }
 150 
 151 void OopMap::copy_data_to(address addr) const {
 152   memcpy(addr, write_stream()->buffer(), write_stream()->position());
 153 }
 154 
 155 class OopMapSort {
 156 private:
 157   const OopMap* _map;
 158   OopMapValue* _values;
 159   int _count;
 160 
 161 public:
 162   OopMapSort(const OopMap* map) : _map(map), _count(0) {
 163     _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
 164   }
 165 
 166   void sort();
 167 
 168   void print();
 169 
 170   void write(CompressedWriteStream* stream) {
 171     for (int i = 0; i < _count; ++i) {
 172       _values[i].write_on(stream);
 173     }
 174   }
 175 
 176 private:
 177   int find_derived_position(OopMapValue omv, int start) {
 178     assert(omv.type() == OopMapValue::derived_oop_value, "");
 179 
 180     VMReg base = omv.content_reg();
 181     int i = start;
 182 
 183     for (; i < _count; ++i) {
 184       if (base == _values[i].reg()) {
 185 
 186         for (int n = i + 1; n < _count; ++n) {
 187           if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
 188             return n;
 189           }
 190 
 191           if (derived_cost(_values[i]) > derived_cost(omv)) {
 192             return n;
 193           }
 194         }
 195         return _count;
 196       }
 197     }
 198 
 199     assert(false, "failed to find base");
 200     return -1;
 201   }
 202 
 203   int find_position(OopMapValue omv, int start) {
 204     assert(omv.type() != OopMapValue::derived_oop_value, "");
 205 
 206     int i = start;
 207     for (; i < _count; ++i) {
 208       if (omv_cost(_values[i]) > omv_cost(omv)) {
 209         return i;
 210       }
 211     }
 212     assert(i < _map->omv_count(), "bounds check");
 213     return i;
 214   }
 215 
 216   void insert(OopMapValue value, int pos) {
 217     assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
 218     assert(pos <= _count, "sanity");
 219 
 220     if (pos < _count) {
 221       OopMapValue prev = _values[pos];
 222 
 223       for (int i = pos; i < _count; ++i) {
 224         OopMapValue tmp = _values[i+1];
 225         _values[i+1] = prev;
 226         prev = tmp;
 227       }
 228     }
 229     _values[pos] = value;
 230 
 231     ++_count;
 232   }
 233 
 234   int omv_cost(OopMapValue omv) {
 235     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
 236     return reg_cost(omv.reg());
 237   }
 238 
 239   int reg_cost(VMReg reg) {
 240     if (reg->is_reg()) {
 241       return 0;
 242     }
 243     return reg->reg2stack() * VMRegImpl::stack_slot_size;
 244   }
 245 
 246   int derived_cost(OopMapValue omv) {
 247     return reg_cost(omv.reg());
 248   }
 249 };
 250 
 251 void OopMapSort::sort() {
 252   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
 253     OopMapValue omv = oms.current();
 254     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
 255   }
 256 
 257   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
 258     if (oms.current().type() == OopMapValue::callee_saved_value) {
 259       insert(oms.current(), _count);
 260     }
 261   }
 262 
 263   int start = _count;
 264   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
 265     OopMapValue omv = oms.current();
 266     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
 267       int pos = find_position(omv, start);
 268       insert(omv, pos);
 269     }
 270   }
 271 
 272   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
 273     OopMapValue omv = oms.current();
 274     if (omv.type() == OopMapValue::derived_oop_value) {
 275       int pos = find_derived_position(omv, start);
 276       assert(pos > 0, "");
 277       insert(omv, pos);
 278     }
 279   }
 280 }
 281 
 282 void OopMapSort::print() {
 283   for (int i = 0; i < _count; ++i) {
 284     OopMapValue omv = _values[i];
 285     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
 286       if (omv.reg()->is_reg()) {
 287         tty->print_cr("[%c][%d] -> reg (" INTPTR_FORMAT ")", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
 288       } else {
 289         tty->print_cr("[%c][%d] -> stack ("  INTPTR_FORMAT ")", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
 290       }
 291     } else {
 292       if (omv.content_reg()->is_reg()) {
 293         tty->print_cr("[d][%d] -> reg (" INTPTR_FORMAT ") stack (" INTPTR_FORMAT ")", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
 294       } else if (omv.reg()->is_reg()) {
 295         tty->print_cr("[d][%d] -> stack (" INTPTR_FORMAT ") reg (" INTPTR_FORMAT ")", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
 296       } else {
 297         int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
 298         int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
 299         tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
 300       }
 301     }
 302   }
 303 }
 304 
 305 void OopMap::copy_and_sort_data_to(address addr) const {
 306   OopMapSort sort(this);
 307   sort.sort();
 308   CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
 309   sort.write(stream);
 310 
 311   assert(stream->position() == write_stream()->position(), "");
 312   memcpy(addr, stream->buffer(), stream->position());
 313   //copy_data_to(addr);
 314   //sort.print();
 315 }
 316 
 317 int OopMap::heap_size() const {
 318   int size = sizeof(OopMap);
 319   int align = sizeof(void *) - 1;
 320   size += write_stream()->position();
 321   // Align to a reasonable ending point
 322   size = ((size+align) & ~align);
 323   return size;
 324 }
 325 
 326 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 327 // slots to hold 4-byte values like ints and floats in the LP64 build.
 328 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
 329 
 330   assert(reg->value() < _locs_length, "too big reg value for stack size");
 331   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
 332   debug_only( _locs_used[reg->value()] = x; )
 333 
 334   OopMapValue o(reg, x, optional);
 335   o.write_on(write_stream());
 336   increment_count();
 337   if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) {
 338     increment_num_oops();
 339   } else if (x == OopMapValue::derived_oop_value) {
 340     set_has_derived_oops(true);
 341   }
 342 }
 343 
 344 
 345 void OopMap::set_oop(VMReg reg) {
 346   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
 347 }
 348 
 349 
 350 // void OopMap::set_value(VMReg reg) {
 351 //   set_xxx(reg, OopMapValue::live_value, VMRegImpl::Bad());
 352 // }
 353 
 354 
 355 void OopMap::set_narrowoop(VMReg reg) {
 356   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
 357 }
 358 
 359 
 360 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
 361   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
 362 }
 363 
 364 
 365 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
 366   if( reg == derived_from_local_register ) {
 367     // Actually an oop, derived shares storage with base,
 368     set_oop(reg);
 369   } else {
 370     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
 371   }
 372 }
 373 
 374 // OopMapSet
 375 
 376 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
 377 
 378 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
 379   map->set_offset(pc_offset);
 380 
 381 #ifdef ASSERT
 382   if(_list.length() > 0) {
 383     OopMap* last = _list.last();
 384     if (last->offset() == map->offset() ) {
 385       fatal("OopMap inserted twice");
 386     }
 387     if (last->offset() > map->offset()) {
 388       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
 389                       _list.length(),last->offset(),_list.length()+1,map->offset());
 390     }
 391   }
 392 #endif // ASSERT
 393 
 394   int index = add(map);
 395   map->_index = index;
 396   return index;
 397 }
 398 
 399 class AddDerivedOop : public DerivedOopClosure {
 400  public:
 401   enum {
 402     SkipNull = true, NeedsLock = true
 403   };
 404 
 405   virtual void do_derived_oop(oop* base, derived_pointer* derived) {
 406 #if COMPILER2_OR_JVMCI
 407     DerivedPointerTable::add(derived, base);
 408 #endif // COMPILER2_OR_JVMCI


































 409   }
 410 };
 411 
 412 class ProcessDerivedOop : public DerivedOopClosure {
 413   OopClosure* _oop_cl;
 414 public:
 415   ProcessDerivedOop(OopClosure* oop_cl) :
 416       _oop_cl(oop_cl) {}
 417 
 418   enum {
 419     SkipNull = true, NeedsLock = true
 420   };
 421 
 422   virtual void do_derived_oop(oop* base, derived_pointer* derived) {
 423     // All derived pointers must be processed before the base pointer of any derived pointer is processed.
 424     // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
 425     // offset, if the base pointer is processed in the first derived pointer.
 426     derived_pointer derived_base = to_derived_pointer(*base);
 427     intptr_t offset = *derived - derived_base;
 428     *derived = derived_base;
 429     _oop_cl->do_oop((oop*)derived);
 430     *derived = *derived + offset;
 431   }
 432 };
 433 
 434 class IgnoreDerivedOop : public DerivedOopClosure {
 435   OopClosure* _oop_cl;
 436 public:
 437   enum {
 438         SkipNull = true, NeedsLock = true
 439   };
 440 
 441   virtual void do_derived_oop(oop* base, derived_pointer* derived) {}
 442 };
 443 
 444 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
 445   find_map(fr)->oops_do(fr, reg_map, f, mode);
 446 }
 447 
 448 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
 449   find_map(fr)->oops_do(fr, reg_map, f, df);
 450 }
 451 
 452 // void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
 453 //                        OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn,
 454 //                        OopClosure* value_fn) {
 455 //   find_map(fr)->oops_do(fr, reg_map, oop_fn, derived_oop_fn, value_fn);
 456 // }
 457 
 458 // NULL, fail, success (address)
 459 // void ImmutableOopMap::generate_stub(const CodeBlob* cb) const {
 460 //   /* The address of the ImmutableOopMap is put into the _freeze_stub and _thaw_stub
 461 //    * if we can't generate the stub for some reason */
 462 //   address default_value = Continuations::default_freeze_oops_stub();
 463 //   address slow_value = Continuations::freeze_oops_slow();
 464 
 465 //   assert(default_value != slow_value, "should not reach here!");
 466 
 467 //   if (_freeze_stub == default_value) {
 468 //     OopMapStubGenerator cgen(cb, *this);
 469 //     // lock this by putting the slow path in place
 470 //     if (Atomic::cmpxchg(&_freeze_stub, default_value, slow_value) == default_value) {
 471 //       if (!cgen.generate()) {
 472 //         Atomic::store(&_thaw_stub, (address) Continuations::thaw_oops_slow());
 473 //         cgen.free();
 474 //         return;
 475 //       }
 476 
 477 //       Atomic::store(&_freeze_stub, cgen.freeze_stub());
 478 //       Atomic::store(&_thaw_stub, cgen.thaw_stub());
 479 //     }
 480 //   }
 481 // }
 482 
 483 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
 484                               OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
 485   assert(derived_oop_fn != NULL, "sanity");
 486   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
 487   visitor.oops_do(fr, reg_map, this);
 488 }
 489 
 490 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
 491                               OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const {
 492   ProcessDerivedOop process_cl(oop_fn);
 493   AddDerivedOop add_cl;
 494   IgnoreDerivedOop ignore_cl;
 495   DerivedOopClosure* derived_cl;
 496   switch (derived_mode) {
 497   case DerivedPointerIterationMode::_directly:
 498     derived_cl = &process_cl;
 499     break;
 500   case DerivedPointerIterationMode::_with_table:
 501     derived_cl = &add_cl;
 502     break;
 503   case DerivedPointerIterationMode::_ignore:
 504     derived_cl = &ignore_cl;
 505     break;
 506   }
 507   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl);
 508   visitor.oops_do(fr, reg_map, this);
 509 }
 510 
 511 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const {
 512   OopMapValue omv;
 513   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
 514     omv = oms.current();
 515     if (fn->handle_type(omv.type())) {
 516       fn->do_value(omv.reg(), omv.type());

































 517     }
 518   }
 519 }
 520 
 521 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const {
 522   OopMapValue omv;
 523   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
 524     omv = oms.current();
 525     if (omv.type() == type) {
 526       fn->do_value(omv.reg(), omv.type());




























 527     }
 528   }
 529 }
 530 
 531 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
 532   for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
 533     OopMapValue omv = oms.current();
 534     if (omv.type() == OopMapValue::callee_saved_value) {
 535       VMReg reg = omv.content_reg();
 536       address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
 537       reg_map->set_location(reg, loc);
 538       //DEBUG_ONLY(nof_callee++;)
 539     }
 540   }
 541 }
 542 
 543 // Update callee-saved register info for the following frame
 544 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
 545   // ResourceMark rm;
 546   CodeBlob* cb = fr->cb();
 547   assert(cb != NULL, "no codeblob");

 548   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
 549   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
 550          "already updated this map; do not 'update' it twice!" );
 551   debug_only(reg_map->_update_for_id = fr->id());
 552 
 553 
 554   // Check if caller must update oop argument
 555   assert((reg_map->include_argument_oops() ||
 556           !cb->caller_must_gc_arguments(reg_map->thread())),
 557          "include_argument_oops should already be set");
 558 
 559   // Scan through oopmap and find location of all callee-saved registers
 560   // (we do not do update in place, since info could be overwritten)
 561 



 562   DEBUG_ONLY(int nof_callee = 0;)
 563   update_register_map1(this, fr, reg_map);
 564 
 565   /*
 566   for (OopMapStream oms(this, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
 567     OopMapValue omv = oms.current();
 568     VMReg reg = omv.content_reg();
 569     oop* loc = fr->oopmapreg_to_oop_location(omv.reg(), reg_map);
 570     reg_map->set_location(reg, (address) loc);
 571     DEBUG_ONLY(nof_callee++;)


 572   }
 573   */
 574 
 575   // Check that runtime stubs save all callee-saved registers
 576 #ifdef COMPILER2
 577   assert(cb == NULL || cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
 578          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
 579          "must save all");
 580 #endif // COMPILER2
 581 }
 582 
 583 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) { 
 584   return find_map(fr->cb(), fr->pc()); 
 585 }
 586 
 587 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
 588   assert(cb != NULL, "no codeblob");
 589   const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
 590   assert(map != NULL, "no ptr map found");
 591   return map;
 592 }
 593 
 594 // Update callee-saved register info for the following frame
 595 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
 596   find_map(fr)->update_register_map(fr, reg_map);
 597 }
 598 
 599 //=============================================================================
 600 // Non-Product code
 601 
 602 #ifndef PRODUCT
 603 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
 604   // Print oopmap and regmap
 605   tty->print_cr("------ ");
 606   CodeBlob* cb = fr->cb();
 607   const ImmutableOopMapSet* maps = cb->oop_maps();
 608   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 609   map->print();
 610   if( cb->is_nmethod() ) {
 611     nmethod* nm = (nmethod*)cb;
 612     // native wrappers have no scope data, it is implied
 613     if (nm->is_native_method()) {
 614       tty->print("bci: 0 (native)");
 615     } else {
 616       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
 617       tty->print("bci: %d ",scope->bci());
 618     }
 619   }
 620   tty->cr();
 621   fr->print_on(tty);
 622   tty->print("     ");
 623   cb->print_value_on(tty);  tty->cr();
 624   if (reg_map != NULL) {
 625     reg_map->print();
 626   }
 627   tty->print_cr("------ ");
 628 
 629 }
 630 #endif // PRODUCT
 631 
 632 // Printing code is present in product build for -XX:+PrintAssembly.
 633 
 634 static
 635 void print_register_type(OopMapValue::oop_types x, VMReg optional,
 636                          outputStream* st) {
 637   switch( x ) {
 638   case OopMapValue::oop_value:
 639     st->print("Oop");
 640     break;
 641   case OopMapValue::narrowoop_value:
 642     st->print("NarrowOop");
 643     break;
 644   case OopMapValue::callee_saved_value:
 645     st->print("Callers_");
 646     optional->print_on(st);
 647     break;
 648   case OopMapValue::derived_oop_value:
 649     st->print("Derived_oop_");
 650     optional->print_on(st);
 651     break;

 722     st->cr();
 723   }
 724   st->cr();
 725 }
 726 
 727 void OopMapSet::print() const { print_on(tty); }
 728 
 729 bool OopMap::equals(const OopMap* other) const {
 730   if (other->_omv_count != _omv_count) {
 731     return false;
 732   }
 733   if (other->write_stream()->position() != write_stream()->position()) {
 734     return false;
 735   }
 736   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
 737     return false;
 738   }
 739   return true;
 740 }
 741 
 742 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
 743   ImmutableOopMapPair* pairs = get_pairs();
 744 
 745   for (int i = 0; i < _count; ++i) {
 746     if (pairs[i].pc_offset() >= pc_offset) {
 747       ImmutableOopMapPair* last = &pairs[i];
 748       assert(last->pc_offset() == pc_offset, "oopmap not found");
 749       return i;
 750     }
 751   }
 752 
 753   guarantee(false, "failed to find oopmap for pc");
 754   return -1;
 755 }
 756 
 757 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
 758   ImmutableOopMapPair* pairs = get_pairs();
 759   ImmutableOopMapPair* last  = NULL;
 760 
 761   for (int i = 0; i < _count; ++i) {
 762     if (pairs[i].pc_offset() >= pc_offset) {
 763       last = &pairs[i];
 764       break;
 765     }
 766   }
 767 
 768   // Heal Coverity issue: potential index out of bounds access.
 769   guarantee(last != NULL, "last may not be null");
 770   assert(last->pc_offset() == pc_offset, "oopmap not found");
 771   return last->get_from(this);
 772 }
 773 
 774 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) 
 775   : _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
 776   _num_oops = oopmap->num_oops();
 777   _has_derived_oops = oopmap->has_derived_oops();
 778   address addr = data_addr();
 779   //oopmap->copy_data_to(addr);
 780   oopmap->copy_and_sort_data_to(addr);
 781 }
 782 
 783 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const {
 784   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
 785     if (oms.current().type() == type)
 786       return true;
 787   }
 788   return false;
 789 }
 790 
 791 #ifdef ASSERT
 792 int ImmutableOopMap::nr_of_bytes() const {
 793   OopMapStream oms(this);
 794 
 795   while (!oms.is_done()) {
 796     oms.next();
 797   }
 798   return sizeof(ImmutableOopMap) + oms.stream_position();
 799 }
 800 
 801 #endif
 802 
 803 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
 804   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
 805 }
 806 
 807 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
 808   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
 809 }
 810 
 811 int ImmutableOopMapBuilder::heap_size() {
 812   int base = sizeof(ImmutableOopMapSet);
 813   base = align_up(base, 8);
 814 
 815   // all of ours pc / offset pairs
 816   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
 817   pairs = align_up(pairs, 8);
 818 
 819   for (int i = 0; i < _set->size(); ++i) {
 820     int size = 0;

 862 
 863   new (addr) ImmutableOopMap(map);
 864   return size_for(map);
 865 }
 866 
 867 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
 868   ImmutableOopMapPair* pairs = set->get_pairs();
 869 
 870   for (int i = 0; i < set->count(); ++i) {
 871     const OopMap* map = _mapping[i]._map;
 872     ImmutableOopMapPair* pair = NULL;
 873     int size = 0;
 874 
 875     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
 876       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
 877     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
 878       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
 879     }
 880 
 881     const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
 882     //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
 883   }
 884 }
 885 
 886 #ifdef ASSERT
 887 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
 888   for (int i = 0; i < 8; ++i) {
 889     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
 890   }
 891 
 892   for (int i = 0; i < set->count(); ++i) {
 893     const ImmutableOopMapPair* pair = set->pair_at(i);
 894     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
 895     const ImmutableOopMap* map = pair->get_from(set);
 896     int nr_of_bytes = map->nr_of_bytes();
 897     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
 898   }
 899 }
 900 #endif
 901 
 902 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
< prev index next >