1 /*
  2  * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeBlob.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "code/nmethod.hpp"
 29 #include "code/scopeDesc.hpp"
 30 #include "compiler/oopMap.inline.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "logging/log.hpp"
 33 #include "logging/logStream.hpp"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/iterator.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/compressedOops.hpp"
 38 #include "runtime/atomic.hpp"
 39 #include "runtime/frame.inline.hpp"
 40 #include "runtime/handles.inline.hpp"
 41 #include "runtime/signature.hpp"
 42 #include "runtime/stackWatermarkSet.inline.hpp"
 43 #include "utilities/align.hpp"
 44 #include "utilities/lockFreeStack.hpp"
 45 #ifdef COMPILER1
 46 #include "c1/c1_Defs.hpp"
 47 #endif
 48 #ifdef COMPILER2
 49 #include "opto/optoreg.hpp"
 50 #endif
 51 #if INCLUDE_JVMCI
 52 #include "jvmci/jvmci_globals.hpp"
 53 #endif
 54 
 55 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
 56 
 57 static inline intptr_t derived_pointer_value(derived_pointer p) {
 58   return static_cast<intptr_t>(p);
 59 }
 60 
 61 static inline derived_pointer to_derived_pointer(intptr_t obj) {
 62   return static_cast<derived_pointer>(obj);
 63 }
 64 
 65 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
 66   return derived_pointer_value(p) - derived_pointer_value(p1);
 67 }
 68 
 69 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
 70   return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
 71 }
 72 
 73 // OopMapStream
 74 
 75 OopMapStream::OopMapStream(const OopMap* oop_map)
 76   : _stream(oop_map->write_stream()->buffer()) {
 77   _size = oop_map->omv_count();
 78   _position = 0;
 79   _valid_omv = false;
 80 }
 81 
 82 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map)
 83   : _stream(oop_map->data_addr()) {
 84   _size = oop_map->count();
 85   _position = 0;
 86   _valid_omv = false;
 87 }
 88 
 89 void OopMapStream::find_next() {
 90   if (_position++ < _size) {
 91     _omv.read_from(&_stream);
 92     _valid_omv = true;
 93     return;
 94   }
 95   _valid_omv = false;
 96 }
 97 
 98 
 99 // OopMap
100 
101 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
102 // slots to hold 4-byte values like ints and floats in the LP64 build.
103 OopMap::OopMap(int frame_size, int arg_count) {
104   // OopMaps are usually quite so small, so pick a small initial size
105   set_write_stream(new CompressedWriteStream(32));
106   set_omv_count(0);
107   _num_oops = 0;
108   _has_derived_oops = false;
109   _index = -1;
110 
111 #ifdef ASSERT
112   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
113   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
114   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
115 #endif
116 }
117 
118 OopMap::OopMap(int data_size) {
119   // OopMaps are usually quite so small, so pick a small initial size
120   set_write_stream(new CompressedWriteStream(data_size));
121   set_omv_count(0);
122   _num_oops = 0;
123   _has_derived_oops = false;
124   _index = -1;
125 #ifdef ASSERT
126   _locs_length = 0;
127   _locs_used   = nullptr;
128 #endif
129 }
130 
131 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
132   // This constructor does a deep copy
133   // of the source OopMap.
134   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
135   set_omv_count(0);
136   set_offset(source->offset());
137   _num_oops = source->num_oops();
138   _has_derived_oops = source->has_derived_oops();
139   _index = -1;
140 
141 #ifdef ASSERT
142   _locs_length = source->_locs_length;
143   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
144   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
145 #endif
146 
147   // We need to copy the entries too.
148   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
149     OopMapValue omv = oms.current();
150     omv.write_on(write_stream());
151     increment_count();
152   }
153 }
154 
155 
156 OopMap* OopMap::deep_copy() {
157   return new OopMap(_deep_copy_token, this);
158 }
159 
160 void OopMap::copy_data_to(address addr) const {
161   memcpy(addr, write_stream()->buffer(), write_stream()->position());
162 }
163 
164 class OopMapSort {
165 private:
166   const OopMap* _map;
167   OopMapValue* _values;
168   int _count;
169 
170 public:
171   OopMapSort(const OopMap* map) : _map(map), _count(0) {
172     _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
173   }
174 
175   void sort();
176 
177   void print();
178 
179   void write(CompressedWriteStream* stream) {
180     for (int i = 0; i < _count; ++i) {
181       _values[i].write_on(stream);
182     }
183   }
184 
185 private:
186   int find_derived_position(OopMapValue omv, int start) {
187     assert(omv.type() == OopMapValue::derived_oop_value, "");
188 
189     VMReg base = omv.content_reg();
190     int i = start;
191 
192     for (; i < _count; ++i) {
193       if (base == _values[i].reg()) {
194 
195         for (int n = i + 1; n < _count; ++n) {
196           if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
197             return n;
198           }
199 
200           if (derived_cost(_values[i]) > derived_cost(omv)) {
201             return n;
202           }
203         }
204         return _count;
205       }
206     }
207 
208     assert(false, "failed to find base");
209     return -1;
210   }
211 
212   int find_position(OopMapValue omv, int start) {
213     assert(omv.type() != OopMapValue::derived_oop_value, "");
214 
215     int i = start;
216     for (; i < _count; ++i) {
217       if (omv_cost(_values[i]) > omv_cost(omv)) {
218         return i;
219       }
220     }
221     assert(i < _map->omv_count(), "bounds check");
222     return i;
223   }
224 
225   void insert(OopMapValue value, int pos) {
226     assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
227     assert(pos <= _count, "sanity");
228 
229     if (pos < _count) {
230       OopMapValue prev = _values[pos];
231 
232       for (int i = pos; i < _count; ++i) {
233         OopMapValue tmp = _values[i+1];
234         _values[i+1] = prev;
235         prev = tmp;
236       }
237     }
238     _values[pos] = value;
239 
240     ++_count;
241   }
242 
243   int omv_cost(OopMapValue omv) {
244     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
245     return reg_cost(omv.reg());
246   }
247 
248   int reg_cost(VMReg reg) {
249     if (reg->is_reg()) {
250       return 0;
251     }
252     return reg->reg2stack() * VMRegImpl::stack_slot_size;
253   }
254 
255   int derived_cost(OopMapValue omv) {
256     return reg_cost(omv.reg());
257   }
258 };
259 
260 void OopMapSort::sort() {
261   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
262     OopMapValue omv = oms.current();
263     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
264   }
265 
266   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
267     if (oms.current().type() == OopMapValue::callee_saved_value) {
268       insert(oms.current(), _count);
269     }
270   }
271 
272   int start = _count;
273   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
274     OopMapValue omv = oms.current();
275     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
276       int pos = find_position(omv, start);
277       insert(omv, pos);
278     }
279   }
280 
281   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
282     OopMapValue omv = oms.current();
283     if (omv.type() == OopMapValue::derived_oop_value) {
284       int pos = find_derived_position(omv, start);
285       assert(pos > 0, "");
286       insert(omv, pos);
287     }
288   }
289 }
290 
291 void OopMapSort::print() {
292   for (int i = 0; i < _count; ++i) {
293     OopMapValue omv = _values[i];
294     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
295       if (omv.reg()->is_reg()) {
296         tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
297       } else {
298         tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
299       }
300     } else {
301       if (omv.content_reg()->is_reg()) {
302         tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
303       } else if (omv.reg()->is_reg()) {
304         tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
305       } else {
306         int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
307         int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
308         tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
309       }
310     }
311   }
312 }
313 
314 void OopMap::copy_and_sort_data_to(address addr) const {
315   OopMapSort sort(this);
316   sort.sort();
317   CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
318   sort.write(stream);
319 
320   assert(stream->position() == write_stream()->position(), "");
321   memcpy(addr, stream->buffer(), stream->position());
322 }
323 
324 int OopMap::heap_size() const {
325   int size = sizeof(OopMap);
326   int align = sizeof(void *) - 1;
327   size += write_stream()->position();
328   // Align to a reasonable ending point
329   size = ((size+align) & ~align);
330   return size;
331 }
332 
333 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
334 // slots to hold 4-byte values like ints and floats in the LP64 build.
335 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
336 
337   assert(reg->value() < _locs_length, "too big reg value for stack size");
338   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
339   debug_only( _locs_used[reg->value()] = x; )
340 
341   OopMapValue o(reg, x, optional);
342   o.write_on(write_stream());
343   increment_count();
344   if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) {
345     increment_num_oops();
346   } else if (x == OopMapValue::derived_oop_value) {
347     set_has_derived_oops(true);
348   }
349 }
350 
351 
352 void OopMap::set_oop(VMReg reg) {
353   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
354 }
355 
356 
357 void OopMap::set_narrowoop(VMReg reg) {
358   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
359 }
360 
361 
362 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
363   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
364 }
365 
366 
367 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
368   if( reg == derived_from_local_register ) {
369     // Actually an oop, derived shares storage with base,
370     set_oop(reg);
371   } else {
372     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
373   }
374 }
375 
376 // OopMapSet
377 
378 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
379 
380 OopMapSet::OopMapSet(int size) : _list(size) {}
381 
382 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
383   map->set_offset(pc_offset);
384 
385 #ifdef ASSERT
386   if(_list.length() > 0) {
387     OopMap* last = _list.last();
388     if (last->offset() == map->offset() ) {
389       fatal("OopMap inserted twice");
390     }
391     if (last->offset() > map->offset()) {
392       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
393                       _list.length(),last->offset(),_list.length()+1,map->offset());
394     }
395   }
396 #endif // ASSERT
397 
398   int index = add(map);
399   map->_index = index;
400   return index;
401 }
402 
403 class AddDerivedOop : public DerivedOopClosure {
404  public:
405   enum {
406     SkipNull = true, NeedsLock = true
407   };
408 
409   virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
410 #if COMPILER2_OR_JVMCI
411     DerivedPointerTable::add(derived, base);
412 #endif // COMPILER2_OR_JVMCI
413   }
414 };
415 
416 class ProcessDerivedOop : public DerivedOopClosure {
417   OopClosure* _oop_cl;
418 
419 public:
420   ProcessDerivedOop(OopClosure* oop_cl) :
421       _oop_cl(oop_cl) {}
422 
423   enum {
424     SkipNull = true, NeedsLock = true
425   };
426 
427   virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
428     // All derived pointers must be processed before the base pointer of any derived pointer is processed.
429     // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
430     // offset, if the base pointer is processed in the first derived pointer.
431   derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base));
432     intptr_t offset = *derived - derived_base;
433     *derived = derived_base;
434     _oop_cl->do_oop((oop*)derived);
435     *derived = *derived + offset;
436   }
437 };
438 
439 class IgnoreDerivedOop : public DerivedOopClosure {
440   OopClosure* _oop_cl;
441 
442 public:
443   enum {
444     SkipNull = true, NeedsLock = true
445   };
446 
447   virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {}
448 };
449 
450 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
451   find_map(fr)->oops_do(fr, reg_map, f, mode);
452 }
453 
454 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
455   find_map(fr)->oops_do(fr, reg_map, f, df);
456 }
457 
458 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
459                               OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
460   assert(derived_oop_fn != nullptr, "sanity");
461   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
462   visitor.oops_do(fr, reg_map, this);
463 }
464 
465 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
466                               OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const {
467   ProcessDerivedOop process_cl(oop_fn);
468   AddDerivedOop add_cl;
469   IgnoreDerivedOop ignore_cl;
470   DerivedOopClosure* derived_cl;
471   switch (derived_mode) {
472   case DerivedPointerIterationMode::_directly:
473     derived_cl = &process_cl;
474     break;
475   case DerivedPointerIterationMode::_with_table:
476     derived_cl = &add_cl;
477     break;
478   case DerivedPointerIterationMode::_ignore:
479     derived_cl = &ignore_cl;
480     break;
481   default:
482     guarantee (false, "unreachable");
483   }
484   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl);
485   visitor.oops_do(fr, reg_map, this);
486 }
487 
488 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const {
489   OopMapValue omv;
490   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
491     omv = oms.current();
492     if (fn->handle_type(omv.type())) {
493       fn->do_value(omv.reg(), omv.type());
494     }
495   }
496 }
497 
498 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const {
499   OopMapValue omv;
500   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
501     omv = oms.current();
502     if (omv.type() == type) {
503       fn->do_value(omv.reg(), omv.type());
504     }
505   }
506 }
507 
508 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
509   for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
510     OopMapValue omv = oms.current();
511     if (omv.type() == OopMapValue::callee_saved_value) {
512       VMReg reg = omv.content_reg();
513       address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
514       reg_map->set_location(reg, loc);
515       //DEBUG_ONLY(nof_callee++;)
516     }
517   }
518 }
519 
520 // Update callee-saved register info for the following frame
521 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
522   CodeBlob* cb = fr->cb();
523   assert(cb != nullptr, "no codeblob");
524   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
525   assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id),
526          "already updated this map; do not 'update' it twice!" );
527   debug_only(reg_map->_update_for_id = fr->id());
528 
529   // Check if caller must update oop argument
530   assert((reg_map->include_argument_oops() ||
531           !cb->caller_must_gc_arguments(reg_map->thread())),
532          "include_argument_oops should already be set");
533 
534   // Scan through oopmap and find location of all callee-saved registers
535   // (we do not do update in place, since info could be overwritten)
536 
537   DEBUG_ONLY(int nof_callee = 0;)
538   update_register_map1(this, fr, reg_map);
539 
540   // Check that runtime stubs save all callee-saved registers
541 #ifdef COMPILER2
542   assert(cb == nullptr || cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
543          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
544          "must save all");
545 #endif // COMPILER2
546 }
547 
548 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) {
549   return find_map(fr->cb(), fr->pc());
550 }
551 
552 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
553   assert(cb != nullptr, "no codeblob");
554   const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
555   assert(map != nullptr, "no ptr map found");
556   return map;
557 }
558 
559 // Update callee-saved register info for the following frame
560 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
561   find_map(fr)->update_register_map(fr, reg_map);
562 }
563 
564 //=============================================================================
565 // Non-Product code
566 
567 #ifndef PRODUCT
568 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
569   // Print oopmap and regmap
570   tty->print_cr("------ ");
571   CodeBlob* cb = fr->cb();
572   const ImmutableOopMapSet* maps = cb->oop_maps();
573   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
574   map->print();
575   if( cb->is_nmethod() ) {
576     nmethod* nm = (nmethod*)cb;
577     // native wrappers have no scope data, it is implied
578     if (nm->is_native_method()) {
579       tty->print("bci: 0 (native)");
580     } else {
581       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
582       tty->print("bci: %d ",scope->bci());
583     }
584   }
585   tty->cr();
586   fr->print_on(tty);
587   tty->print("     ");
588   cb->print_value_on(tty);  tty->cr();
589   if (reg_map != nullptr) {
590     reg_map->print();
591   }
592   tty->print_cr("------ ");
593 
594 }
595 #endif // PRODUCT
596 
597 // Printing code is present in product build for -XX:+PrintAssembly.
598 
599 static
600 void print_register_type(OopMapValue::oop_types x, VMReg optional,
601                          outputStream* st) {
602   switch( x ) {
603   case OopMapValue::oop_value:
604     st->print("Oop");
605     break;
606   case OopMapValue::narrowoop_value:
607     st->print("NarrowOop");
608     break;
609   case OopMapValue::callee_saved_value:
610     st->print("Callers_");
611     optional->print_on(st);
612     break;
613   case OopMapValue::derived_oop_value:
614     st->print("Derived_oop_");
615     optional->print_on(st);
616     break;
617   default:
618     ShouldNotReachHere();
619   }
620 }
621 
622 void OopMapValue::print_on(outputStream* st) const {
623   reg()->print_on(st);
624   st->print("=");
625   print_register_type(type(),content_reg(),st);
626   st->print(" ");
627 }
628 
629 void OopMapValue::print() const { print_on(tty); }
630 
631 void ImmutableOopMap::print_on(outputStream* st) const {
632   OopMapValue omv;
633   st->print("ImmutableOopMap {");
634   for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
635     omv = oms.current();
636     omv.print_on(st);
637   }
638   st->print("}");
639 }
640 
641 void ImmutableOopMap::print() const { print_on(tty); }
642 
643 void OopMap::print_on(outputStream* st) const {
644   OopMapValue omv;
645   st->print("OopMap {");
646   for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
647     omv = oms.current();
648     omv.print_on(st);
649   }
650   // Print hex offset in addition.
651   st->print("off=%d/0x%x}", (int) offset(), (int) offset());
652 }
653 
654 void OopMap::print() const { print_on(tty); }
655 
656 void ImmutableOopMapSet::print_on(outputStream* st) const {
657   const ImmutableOopMap* last = nullptr;
658   const int len = count();
659 
660   st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
661 
662   for (int i = 0; i < len; i++) {
663     const ImmutableOopMapPair* pair = pair_at(i);
664     const ImmutableOopMap* map = pair->get_from(this);
665     if (map != last) {
666       st->cr();
667       map->print_on(st);
668       st->print(" pc offsets: ");
669     }
670     last = map;
671     st->print("%d ", pair->pc_offset());
672   }
673   st->cr();
674 }
675 
676 void ImmutableOopMapSet::print() const { print_on(tty); }
677 
678 void OopMapSet::print_on(outputStream* st) const {
679   const int len = _list.length();
680 
681   st->print_cr("OopMapSet contains %d OopMaps", len);
682 
683   for( int i = 0; i < len; i++) {
684     OopMap* m = at(i);
685     st->print_cr("#%d ",i);
686     m->print_on(st);
687     st->cr();
688   }
689   st->cr();
690 }
691 
692 void OopMapSet::print() const { print_on(tty); }
693 
694 bool OopMap::equals(const OopMap* other) const {
695   if (other->_omv_count != _omv_count) {
696     return false;
697   }
698   if (other->write_stream()->position() != write_stream()->position()) {
699     return false;
700   }
701   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
702     return false;
703   }
704   return true;
705 }
706 
707 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
708   // we might not have an oopmap at asynchronous (non-safepoint) stackwalks
709   ImmutableOopMapPair* pairs = get_pairs();
710   for (int i = 0; i < _count; ++i) {
711     if (pairs[i].pc_offset() >= pc_offset) {
712       ImmutableOopMapPair* last = &pairs[i];
713       return last->pc_offset() == pc_offset ? i : -1;
714     }
715   }
716   return -1;
717 }
718 
719 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
720   ImmutableOopMapPair* pairs = get_pairs();
721   ImmutableOopMapPair* last  = nullptr;
722 
723   for (int i = 0; i < _count; ++i) {
724     if (pairs[i].pc_offset() >= pc_offset) {
725       last = &pairs[i];
726       break;
727     }
728   }
729 
730   // Heal Coverity issue: potential index out of bounds access.
731   guarantee(last != nullptr, "last may not be null");
732   assert(last->pc_offset() == pc_offset, "oopmap not found");
733   return last->get_from(this);
734 }
735 
736 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap)
737   : _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
738   _num_oops = oopmap->num_oops();
739   _has_derived_oops = oopmap->has_derived_oops();
740   address addr = data_addr();
741   oopmap->copy_and_sort_data_to(addr);
742 }
743 
744 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const {
745   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
746     if (oms.current().type() == type) {
747       return true;
748     }
749   }
750   return false;
751 }
752 
753 #ifdef ASSERT
754 int ImmutableOopMap::nr_of_bytes() const {
755   OopMapStream oms(this);
756 
757   while (!oms.is_done()) {
758     oms.next();
759   }
760   return sizeof(ImmutableOopMap) + oms.stream_position();
761 }
762 #endif
763 
764 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) {
765   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
766 }
767 
768 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
769   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
770 }
771 
772 int ImmutableOopMapBuilder::heap_size() {
773   int base = sizeof(ImmutableOopMapSet);
774   base = align_up(base, 8);
775 
776   // all of ours pc / offset pairs
777   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
778   pairs = align_up(pairs, 8);
779 
780   for (int i = 0; i < _set->size(); ++i) {
781     int size = 0;
782     OopMap* map = _set->at(i);
783 
784     if (is_empty(map)) {
785       /* only keep a single empty map in the set */
786       if (has_empty()) {
787         _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
788       } else {
789         _empty_offset = _offset;
790         _empty = map;
791         size = size_for(map);
792         _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
793       }
794     } else if (is_last_duplicate(map)) {
795       /* if this entry is identical to the previous one, just point it there */
796       _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
797     } else {
798       /* not empty, not an identical copy of the previous entry */
799       size = size_for(map);
800       _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
801       _last_offset = _offset;
802       _last = map;
803     }
804 
805     assert(_mapping[i]._map == map, "check");
806     _offset += size;
807   }
808 
809   int total = base + pairs + _offset;
810   DEBUG_ONLY(total += 8);
811   _required = total;
812   return total;
813 }
814 
815 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
816   assert(offset < set->nr_of_bytes(), "check");
817   new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
818 }
819 
820 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
821   fill_pair(pair, map, offset, set);
822   address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
823 
824   new (addr) ImmutableOopMap(map);
825   return size_for(map);
826 }
827 
828 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
829   ImmutableOopMapPair* pairs = set->get_pairs();
830 
831   for (int i = 0; i < set->count(); ++i) {
832     const OopMap* map = _mapping[i]._map;
833     ImmutableOopMapPair* pair = nullptr;
834     int size = 0;
835 
836     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
837       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
838     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
839       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
840     }
841 
842     //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
843     //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
844   }
845 }
846 
847 #ifdef ASSERT
848 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
849   for (int i = 0; i < 8; ++i) {
850     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
851   }
852 
853   for (int i = 0; i < set->count(); ++i) {
854     const ImmutableOopMapPair* pair = set->pair_at(i);
855     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
856     const ImmutableOopMap* map = pair->get_from(set);
857     int nr_of_bytes = map->nr_of_bytes();
858     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
859   }
860 }
861 #endif
862 
863 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
864   DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
865 
866   _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
867   fill(_new_set, _required);
868 
869   DEBUG_ONLY(verify(buffer, _required, _new_set));
870 
871   return _new_set;
872 }
873 
874 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
875   _required = heap_size();
876 
877   // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
878   address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
879   return generate_into(buffer);
880 }
881 
882 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
883   ResourceMark mark;
884   ImmutableOopMapBuilder builder(oopmap_set);
885   return builder.build();
886 }
887 
888 void ImmutableOopMapSet::operator delete(void* p) {
889   FREE_C_HEAP_ARRAY(unsigned char, p);
890 }
891 
892 //------------------------------DerivedPointerTable---------------------------
893 
894 #if COMPILER2_OR_JVMCI
895 
896 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
897   derived_pointer* _location; // Location of derived pointer, also pointing to base
898   intptr_t         _offset;   // Offset from base pointer
899   Entry* volatile  _next;
900 
901   static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
902 
903 public:
904   Entry(derived_pointer* location, intptr_t offset) :
905     _location(location), _offset(offset), _next(nullptr) {}
906 
907   derived_pointer* location() const { return _location; }
908   intptr_t offset() const { return _offset; }
909   Entry* next() const { return _next; }
910 
911   typedef LockFreeStack<Entry, &next_ptr> List;
912   static List* _list;
913 };
914 
915 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr;
916 bool DerivedPointerTable::_active = false;
917 
918 bool DerivedPointerTable::is_empty() {
919   return Entry::_list == nullptr || Entry::_list->empty();
920 }
921 
922 void DerivedPointerTable::clear() {
923   // The first time, we create the list.  Otherwise it should be
924   // empty.  If not, then we have probably forgotton to call
925   // update_pointers after last GC/Scavenge.
926   assert (!_active, "should not be active");
927   assert(is_empty(), "table not empty");
928   if (Entry::_list == nullptr) {
929     void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
930     Entry::_list = ::new (mem) Entry::List();
931   }
932   _active = true;
933 }
934 
935 void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) {
936   assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop");
937   assert(derived_loc != (void*)base_loc, "Base and derived in same location");
938   derived_pointer base_loc_as_derived_pointer =
939     static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
940   assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
941   assert(Entry::_list != nullptr, "list must exist");
942   assert(is_active(), "table must be active here");
943   intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc));
944   // This assert is invalid because derived pointers can be
945   // arbitrarily far away from their base.
946   // assert(offset >= -1000000, "wrong derived pointer info");
947 
948   if (TraceDerivedPointers) {
949     tty->print_cr(
950       "Add derived pointer@" INTPTR_FORMAT
951       " - Derived: " INTPTR_FORMAT
952       " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
953       p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset
954     );
955   }
956   // Set derived oop location to point to base.
957   *derived_loc = base_loc_as_derived_pointer;
958   Entry* entry = new Entry(derived_loc, offset);
959   Entry::_list->push(*entry);
960 }
961 
962 void DerivedPointerTable::update_pointers() {
963   assert(Entry::_list != nullptr, "list must exist");
964   Entry* entries = Entry::_list->pop_all();
965   while (entries != nullptr) {
966     Entry* entry = entries;
967     entries = entry->next();
968     derived_pointer* derived_loc = entry->location();
969     intptr_t offset  = entry->offset();
970     // The derived oop was setup to point to location of base
971     oop base = **reinterpret_cast<oop**>(derived_loc);
972     assert(Universe::heap()->is_in_or_null(base), "must be an oop");
973 
974     derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base));
975     *derived_loc = derived_base + offset;
976     assert(*derived_loc - derived_base == offset, "sanity check");
977 
978     // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0);
979 
980     if (TraceDerivedPointers) {
981       tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
982                     " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
983                     p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
984     }
985 
986     // Delete entry
987     delete entry;
988   }
989   assert(Entry::_list->empty(), "invariant");
990   _active = false;
991 }
992 
993 #endif // COMPILER2_OR_JVMCI