1 /*
  2  * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeBlob.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "code/nmethod.hpp"
 29 #include "code/scopeDesc.hpp"
 30 #include "compiler/oopMap.inline.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "logging/log.hpp"
 33 #include "logging/logStream.hpp"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/iterator.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/inlineKlass.hpp"
 38 #include "oops/compressedOops.hpp"
 39 #include "runtime/atomic.hpp"
 40 #include "runtime/frame.inline.hpp"
 41 #include "runtime/handles.inline.hpp"
 42 #include "runtime/signature.hpp"
 43 #include "runtime/stackWatermarkSet.inline.hpp"
 44 #include "utilities/align.hpp"
 45 #include "utilities/lockFreeStack.hpp"
 46 #ifdef COMPILER1
 47 #include "c1/c1_Defs.hpp"
 48 #endif
 49 #ifdef COMPILER2
 50 #include "opto/optoreg.hpp"
 51 #endif
 52 #if INCLUDE_JVMCI
 53 #include "jvmci/jvmci_globals.hpp"
 54 #endif
 55 
 56 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
 57 
 58 static inline intptr_t derived_pointer_value(derived_pointer p) {
 59   return static_cast<intptr_t>(p);
 60 }
 61 
 62 static inline derived_pointer to_derived_pointer(intptr_t obj) {
 63   return static_cast<derived_pointer>(obj);
 64 }
 65 
 66 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
 67   return derived_pointer_value(p) - derived_pointer_value(p1);
 68 }
 69 
 70 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
 71   return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
 72 }
 73 
 74 // OopMapStream
 75 
 76 OopMapStream::OopMapStream(const OopMap* oop_map)
 77   : _stream(oop_map->write_stream()->buffer()) {
 78   _size = oop_map->omv_count();
 79   _position = 0;
 80   _valid_omv = false;
 81 }
 82 
 83 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map)
 84   : _stream(oop_map->data_addr()) {
 85   _size = oop_map->count();
 86   _position = 0;
 87   _valid_omv = false;
 88 }
 89 
 90 void OopMapStream::find_next() {
 91   if (_position++ < _size) {
 92     _omv.read_from(&_stream);
 93     _valid_omv = true;
 94     return;
 95   }
 96   _valid_omv = false;
 97 }
 98 
 99 
100 // OopMap
101 
102 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
103 // slots to hold 4-byte values like ints and floats in the LP64 build.
104 OopMap::OopMap(int frame_size, int arg_count) {
105   // OopMaps are usually quite so small, so pick a small initial size
106   set_write_stream(new CompressedWriteStream(32));
107   set_omv_count(0);
108   _num_oops = 0;
109   _has_derived_oops = false;
110   _index = -1;
111 
112 #ifdef ASSERT
113   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
114   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
115   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
116 #endif
117 }
118 
119 
120 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
121   // This constructor does a deep copy
122   // of the source OopMap.
123   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
124   set_omv_count(0);
125   set_offset(source->offset());
126   _num_oops = source->num_oops();
127   _has_derived_oops = source->has_derived_oops();
128   _index = -1;
129 
130 #ifdef ASSERT
131   _locs_length = source->_locs_length;
132   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
133   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
134 #endif
135 
136   // We need to copy the entries too.
137   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
138     OopMapValue omv = oms.current();
139     omv.write_on(write_stream());
140     increment_count();
141   }
142 }
143 
144 
145 OopMap* OopMap::deep_copy() {
146   return new OopMap(_deep_copy_token, this);
147 }
148 
149 void OopMap::copy_data_to(address addr) const {
150   memcpy(addr, write_stream()->buffer(), write_stream()->position());
151 }
152 
153 class OopMapSort {
154 private:
155   const OopMap* _map;
156   OopMapValue* _values;
157   int _count;
158 
159 public:
160   OopMapSort(const OopMap* map) : _map(map), _count(0) {
161     _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
162   }
163 
164   void sort();
165 
166   void print();
167 
168   void write(CompressedWriteStream* stream) {
169     for (int i = 0; i < _count; ++i) {
170       _values[i].write_on(stream);
171     }
172   }
173 
174 private:
175   int find_derived_position(OopMapValue omv, int start) {
176     assert(omv.type() == OopMapValue::derived_oop_value, "");
177 
178     VMReg base = omv.content_reg();
179     int i = start;
180 
181     for (; i < _count; ++i) {
182       if (base == _values[i].reg()) {
183 
184         for (int n = i + 1; n < _count; ++n) {
185           if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
186             return n;
187           }
188 
189           if (derived_cost(_values[i]) > derived_cost(omv)) {
190             return n;
191           }
192         }
193         return _count;
194       }
195     }
196 
197     assert(false, "failed to find base");
198     return -1;
199   }
200 
201   int find_position(OopMapValue omv, int start) {
202     assert(omv.type() != OopMapValue::derived_oop_value, "");
203 
204     int i = start;
205     for (; i < _count; ++i) {
206       if (omv_cost(_values[i]) > omv_cost(omv)) {
207         return i;
208       }
209     }
210     assert(i < _map->omv_count(), "bounds check");
211     return i;
212   }
213 
214   void insert(OopMapValue value, int pos) {
215     assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
216     assert(pos <= _count, "sanity");
217 
218     if (pos < _count) {
219       OopMapValue prev = _values[pos];
220 
221       for (int i = pos; i < _count; ++i) {
222         OopMapValue tmp = _values[i+1];
223         _values[i+1] = prev;
224         prev = tmp;
225       }
226     }
227     _values[pos] = value;
228 
229     ++_count;
230   }
231 
232   int omv_cost(OopMapValue omv) {
233     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
234     return reg_cost(omv.reg());
235   }
236 
237   int reg_cost(VMReg reg) {
238     if (reg->is_reg()) {
239       return 0;
240     }
241     return reg->reg2stack() * VMRegImpl::stack_slot_size;
242   }
243 
244   int derived_cost(OopMapValue omv) {
245     return reg_cost(omv.reg());
246   }
247 };
248 
249 void OopMapSort::sort() {
250   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
251     OopMapValue omv = oms.current();
252     assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
253   }
254 
255   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
256     if (oms.current().type() == OopMapValue::callee_saved_value) {
257       insert(oms.current(), _count);
258     }
259   }
260 
261   int start = _count;
262   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
263     OopMapValue omv = oms.current();
264     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
265       int pos = find_position(omv, start);
266       insert(omv, pos);
267     }
268   }
269 
270   for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
271     OopMapValue omv = oms.current();
272     if (omv.type() == OopMapValue::derived_oop_value) {
273       int pos = find_derived_position(omv, start);
274       assert(pos > 0, "");
275       insert(omv, pos);
276     }
277   }
278 }
279 
280 void OopMapSort::print() {
281   for (int i = 0; i < _count; ++i) {
282     OopMapValue omv = _values[i];
283     if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
284       if (omv.reg()->is_reg()) {
285         tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
286       } else {
287         tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
288       }
289     } else {
290       if (omv.content_reg()->is_reg()) {
291         tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
292       } else if (omv.reg()->is_reg()) {
293         tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
294       } else {
295         int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
296         int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
297         tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
298       }
299     }
300   }
301 }
302 
303 void OopMap::copy_and_sort_data_to(address addr) const {
304   OopMapSort sort(this);
305   sort.sort();
306   CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
307   sort.write(stream);
308 
309   assert(stream->position() == write_stream()->position(), "");
310   memcpy(addr, stream->buffer(), stream->position());
311 }
312 
313 int OopMap::heap_size() const {
314   int size = sizeof(OopMap);
315   int align = sizeof(void *) - 1;
316   size += write_stream()->position();
317   // Align to a reasonable ending point
318   size = ((size+align) & ~align);
319   return size;
320 }
321 
322 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
323 // slots to hold 4-byte values like ints and floats in the LP64 build.
324 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
325 
326   assert(reg->value() < _locs_length, "too big reg value for stack size");
327   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
328   debug_only( _locs_used[reg->value()] = x; )
329 
330   OopMapValue o(reg, x, optional);
331   o.write_on(write_stream());
332   increment_count();
333   if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) {
334     increment_num_oops();
335   } else if (x == OopMapValue::derived_oop_value) {
336     set_has_derived_oops(true);
337   }
338 }
339 
340 
341 void OopMap::set_oop(VMReg reg) {
342   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
343 }
344 
345 
346 void OopMap::set_narrowoop(VMReg reg) {
347   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
348 }
349 
350 
351 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
352   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
353 }
354 
355 
356 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
357   if( reg == derived_from_local_register ) {
358     // Actually an oop, derived shares storage with base,
359     set_oop(reg);
360   } else {
361     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
362   }
363 }
364 
365 // OopMapSet
366 
367 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
368 
369 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
370   map->set_offset(pc_offset);
371 
372 #ifdef ASSERT
373   if(_list.length() > 0) {
374     OopMap* last = _list.last();
375     if (last->offset() == map->offset() ) {
376       fatal("OopMap inserted twice");
377     }
378     if (last->offset() > map->offset()) {
379       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
380                       _list.length(),last->offset(),_list.length()+1,map->offset());
381     }
382   }
383 #endif // ASSERT
384 
385   int index = add(map);
386   map->_index = index;
387   return index;
388 }
389 
390 class AddDerivedOop : public DerivedOopClosure {
391  public:
392   enum {
393     SkipNull = true, NeedsLock = true
394   };
395 
396   virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
397 #if COMPILER2_OR_JVMCI
398     DerivedPointerTable::add(derived, base);
399 #endif // COMPILER2_OR_JVMCI
400   }
401 };
402 
403 class ProcessDerivedOop : public DerivedOopClosure {
404   OopClosure* _oop_cl;
405 
406 public:
407   ProcessDerivedOop(OopClosure* oop_cl) :
408       _oop_cl(oop_cl) {}
409 
410   enum {
411     SkipNull = true, NeedsLock = true
412   };
413 
414   virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
415     // All derived pointers must be processed before the base pointer of any derived pointer is processed.
416     // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
417     // offset, if the base pointer is processed in the first derived pointer.
418   derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base));
419     intptr_t offset = *derived - derived_base;
420     *derived = derived_base;
421     _oop_cl->do_oop((oop*)derived);
422     *derived = *derived + offset;
423   }
424 };
425 
426 class IgnoreDerivedOop : public DerivedOopClosure {
427   OopClosure* _oop_cl;
428 
429 public:
430   enum {
431     SkipNull = true, NeedsLock = true
432   };
433 
434   virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {}
435 };
436 
437 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
438   find_map(fr)->oops_do(fr, reg_map, f, mode);
439 }
440 
441 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
442   find_map(fr)->oops_do(fr, reg_map, f, df);
443 }
444 
445 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
446                               OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
447   assert(derived_oop_fn != nullptr, "sanity");
448   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
449   visitor.oops_do(fr, reg_map, this);
450 }
451 
452 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
453                               OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const {
454   ProcessDerivedOop process_cl(oop_fn);
455   AddDerivedOop add_cl;
456   IgnoreDerivedOop ignore_cl;
457   DerivedOopClosure* derived_cl;
458   switch (derived_mode) {
459   case DerivedPointerIterationMode::_directly:
460     derived_cl = &process_cl;
461     break;
462   case DerivedPointerIterationMode::_with_table:
463     derived_cl = &add_cl;
464     break;
465   case DerivedPointerIterationMode::_ignore:
466     derived_cl = &ignore_cl;
467     break;
468   default:
469     guarantee (false, "unreachable");
470   }
471   OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl);
472   visitor.oops_do(fr, reg_map, this);
473 }
474 
475 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const {
476   OopMapValue omv;
477   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
478     omv = oms.current();
479     if (fn->handle_type(omv.type())) {
480       fn->do_value(omv.reg(), omv.type());
481     }
482   }
483 }
484 
485 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const {
486   OopMapValue omv;
487   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
488     omv = oms.current();
489     if (omv.type() == type) {
490       fn->do_value(omv.reg(), omv.type());
491     }
492   }
493 }
494 
495 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
496   for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
497     OopMapValue omv = oms.current();
498     if (omv.type() == OopMapValue::callee_saved_value) {
499       VMReg reg = omv.content_reg();
500       address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
501       reg_map->set_location(reg, loc);
502       //DEBUG_ONLY(nof_callee++;)
503     }
504   }
505 }
506 
507 // Update callee-saved register info for the following frame
508 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
509   CodeBlob* cb = fr->cb();
510   assert(cb != nullptr, "no codeblob");
511   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
512   assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id),
513          "already updated this map; do not 'update' it twice!" );
514   debug_only(reg_map->_update_for_id = fr->id());
515 
516   // Check if caller must update oop argument
517   assert((reg_map->include_argument_oops() ||
518           !cb->caller_must_gc_arguments(reg_map->thread())),
519          "include_argument_oops should already be set");
520 
521   // Scan through oopmap and find location of all callee-saved registers
522   // (we do not do update in place, since info could be overwritten)
523 
524   DEBUG_ONLY(int nof_callee = 0;)
525   update_register_map1(this, fr, reg_map);
526 
527   // Check that runtime stubs save all callee-saved registers
528 #ifdef COMPILER2
529   assert(cb == nullptr || cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
530          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
531          "must save all");
532 #endif // COMPILER2
533 }
534 
535 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) {
536   return find_map(fr->cb(), fr->pc());
537 }
538 
539 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
540   assert(cb != nullptr, "no codeblob");
541   const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
542   assert(map != nullptr, "no ptr map found");
543   return map;
544 }
545 
546 // Update callee-saved register info for the following frame
547 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
548   find_map(fr)->update_register_map(fr, reg_map);
549 }
550 
551 //=============================================================================
552 // Non-Product code
553 
554 #ifndef PRODUCT
555 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
556   // Print oopmap and regmap
557   tty->print_cr("------ ");
558   CodeBlob* cb = fr->cb();
559   const ImmutableOopMapSet* maps = cb->oop_maps();
560   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
561   map->print();
562   if( cb->is_nmethod() ) {
563     nmethod* nm = (nmethod*)cb;
564     // native wrappers have no scope data, it is implied
565     if (nm->is_native_method()) {
566       tty->print("bci: 0 (native)");
567     } else {
568       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
569       tty->print("bci: %d ",scope->bci());
570     }
571   }
572   tty->cr();
573   fr->print_on(tty);
574   tty->print("     ");
575   cb->print_value_on(tty);  tty->cr();
576   if (reg_map != nullptr) {
577     reg_map->print();
578   }
579   tty->print_cr("------ ");
580 
581 }
582 #endif // PRODUCT
583 
584 // Printing code is present in product build for -XX:+PrintAssembly.
585 
586 static
587 void print_register_type(OopMapValue::oop_types x, VMReg optional,
588                          outputStream* st) {
589   switch( x ) {
590   case OopMapValue::oop_value:
591     st->print("Oop");
592     break;
593   case OopMapValue::narrowoop_value:
594     st->print("NarrowOop");
595     break;
596   case OopMapValue::callee_saved_value:
597     st->print("Callers_");
598     optional->print_on(st);
599     break;
600   case OopMapValue::derived_oop_value:
601     st->print("Derived_oop_");
602     optional->print_on(st);
603     break;
604   default:
605     ShouldNotReachHere();
606   }
607 }
608 
609 void OopMapValue::print_on(outputStream* st) const {
610   reg()->print_on(st);
611   st->print("=");
612   print_register_type(type(),content_reg(),st);
613   st->print(" ");
614 }
615 
616 void OopMapValue::print() const { print_on(tty); }
617 
618 void ImmutableOopMap::print_on(outputStream* st) const {
619   OopMapValue omv;
620   st->print("ImmutableOopMap {");
621   for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
622     omv = oms.current();
623     omv.print_on(st);
624   }
625   st->print("}");
626 }
627 
628 void ImmutableOopMap::print() const { print_on(tty); }
629 
630 void OopMap::print_on(outputStream* st) const {
631   OopMapValue omv;
632   st->print("OopMap {");
633   for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
634     omv = oms.current();
635     omv.print_on(st);
636   }
637   // Print hex offset in addition.
638   st->print("off=%d/0x%x}", (int) offset(), (int) offset());
639 }
640 
641 void OopMap::print() const { print_on(tty); }
642 
643 void ImmutableOopMapSet::print_on(outputStream* st) const {
644   const ImmutableOopMap* last = nullptr;
645   const int len = count();
646 
647   st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
648 
649   for (int i = 0; i < len; i++) {
650     const ImmutableOopMapPair* pair = pair_at(i);
651     const ImmutableOopMap* map = pair->get_from(this);
652     if (map != last) {
653       st->cr();
654       map->print_on(st);
655       st->print(" pc offsets: ");
656     }
657     last = map;
658     st->print("%d ", pair->pc_offset());
659   }
660   st->cr();
661 }
662 
663 void ImmutableOopMapSet::print() const { print_on(tty); }
664 
665 void OopMapSet::print_on(outputStream* st) const {
666   const int len = _list.length();
667 
668   st->print_cr("OopMapSet contains %d OopMaps", len);
669 
670   for( int i = 0; i < len; i++) {
671     OopMap* m = at(i);
672     st->print_cr("#%d ",i);
673     m->print_on(st);
674     st->cr();
675   }
676   st->cr();
677 }
678 
679 void OopMapSet::print() const { print_on(tty); }
680 
681 bool OopMap::equals(const OopMap* other) const {
682   if (other->_omv_count != _omv_count) {
683     return false;
684   }
685   if (other->write_stream()->position() != write_stream()->position()) {
686     return false;
687   }
688   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
689     return false;
690   }
691   return true;
692 }
693 
694 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
695   // we might not have an oopmap at asynchronous (non-safepoint) stackwalks
696   ImmutableOopMapPair* pairs = get_pairs();
697   for (int i = 0; i < _count; ++i) {
698     if (pairs[i].pc_offset() >= pc_offset) {
699       ImmutableOopMapPair* last = &pairs[i];
700       return last->pc_offset() == pc_offset ? i : -1;
701     }
702   }
703   return -1;
704 }
705 
706 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
707   ImmutableOopMapPair* pairs = get_pairs();
708   ImmutableOopMapPair* last  = nullptr;
709 
710   for (int i = 0; i < _count; ++i) {
711     if (pairs[i].pc_offset() >= pc_offset) {
712       last = &pairs[i];
713       break;
714     }
715   }
716 
717   // Heal Coverity issue: potential index out of bounds access.
718   guarantee(last != nullptr, "last may not be null");
719   assert(last->pc_offset() == pc_offset, "oopmap not found");
720   return last->get_from(this);
721 }
722 
723 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap)
724   : _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
725   _num_oops = oopmap->num_oops();
726   _has_derived_oops = oopmap->has_derived_oops();
727   address addr = data_addr();
728   oopmap->copy_and_sort_data_to(addr);
729 }
730 
731 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const {
732   for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
733     if (oms.current().type() == type) {
734       return true;
735     }
736   }
737   return false;
738 }
739 
740 #ifdef ASSERT
741 int ImmutableOopMap::nr_of_bytes() const {
742   OopMapStream oms(this);
743 
744   while (!oms.is_done()) {
745     oms.next();
746   }
747   return sizeof(ImmutableOopMap) + oms.stream_position();
748 }
749 #endif
750 
751 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) {
752   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
753 }
754 
755 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
756   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
757 }
758 
759 int ImmutableOopMapBuilder::heap_size() {
760   int base = sizeof(ImmutableOopMapSet);
761   base = align_up(base, 8);
762 
763   // all of ours pc / offset pairs
764   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
765   pairs = align_up(pairs, 8);
766 
767   for (int i = 0; i < _set->size(); ++i) {
768     int size = 0;
769     OopMap* map = _set->at(i);
770 
771     if (is_empty(map)) {
772       /* only keep a single empty map in the set */
773       if (has_empty()) {
774         _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
775       } else {
776         _empty_offset = _offset;
777         _empty = map;
778         size = size_for(map);
779         _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
780       }
781     } else if (is_last_duplicate(map)) {
782       /* if this entry is identical to the previous one, just point it there */
783       _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
784     } else {
785       /* not empty, not an identical copy of the previous entry */
786       size = size_for(map);
787       _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
788       _last_offset = _offset;
789       _last = map;
790     }
791 
792     assert(_mapping[i]._map == map, "check");
793     _offset += size;
794   }
795 
796   int total = base + pairs + _offset;
797   DEBUG_ONLY(total += 8);
798   _required = total;
799   return total;
800 }
801 
802 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
803   assert(offset < set->nr_of_bytes(), "check");
804   new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
805 }
806 
807 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
808   fill_pair(pair, map, offset, set);
809   address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
810 
811   new (addr) ImmutableOopMap(map);
812   return size_for(map);
813 }
814 
815 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
816   ImmutableOopMapPair* pairs = set->get_pairs();
817 
818   for (int i = 0; i < set->count(); ++i) {
819     const OopMap* map = _mapping[i]._map;
820     ImmutableOopMapPair* pair = nullptr;
821     int size = 0;
822 
823     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
824       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
825     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
826       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
827     }
828 
829     //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
830     //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
831   }
832 }
833 
834 #ifdef ASSERT
835 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
836   for (int i = 0; i < 8; ++i) {
837     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
838   }
839 
840   for (int i = 0; i < set->count(); ++i) {
841     const ImmutableOopMapPair* pair = set->pair_at(i);
842     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
843     const ImmutableOopMap* map = pair->get_from(set);
844     int nr_of_bytes = map->nr_of_bytes();
845     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
846   }
847 }
848 #endif
849 
850 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
851   DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
852 
853   _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
854   fill(_new_set, _required);
855 
856   DEBUG_ONLY(verify(buffer, _required, _new_set));
857 
858   return _new_set;
859 }
860 
861 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
862   _required = heap_size();
863 
864   // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
865   address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
866   return generate_into(buffer);
867 }
868 
869 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
870   ResourceMark mark;
871   ImmutableOopMapBuilder builder(oopmap_set);
872   return builder.build();
873 }
874 
875 void ImmutableOopMapSet::operator delete(void* p) {
876   FREE_C_HEAP_ARRAY(unsigned char, p);
877 }
878 
879 //------------------------------DerivedPointerTable---------------------------
880 
881 #if COMPILER2_OR_JVMCI
882 
883 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
884   derived_pointer* _location; // Location of derived pointer, also pointing to base
885   intptr_t         _offset;   // Offset from base pointer
886   Entry* volatile  _next;
887 
888   static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
889 
890 public:
891   Entry(derived_pointer* location, intptr_t offset) :
892     _location(location), _offset(offset), _next(nullptr) {}
893 
894   derived_pointer* location() const { return _location; }
895   intptr_t offset() const { return _offset; }
896   Entry* next() const { return _next; }
897 
898   typedef LockFreeStack<Entry, &next_ptr> List;
899   static List* _list;
900 };
901 
902 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr;
903 bool DerivedPointerTable::_active = false;
904 
905 bool DerivedPointerTable::is_empty() {
906   return Entry::_list == nullptr || Entry::_list->empty();
907 }
908 
909 void DerivedPointerTable::clear() {
910   // The first time, we create the list.  Otherwise it should be
911   // empty.  If not, then we have probably forgotton to call
912   // update_pointers after last GC/Scavenge.
913   assert (!_active, "should not be active");
914   assert(is_empty(), "table not empty");
915   if (Entry::_list == nullptr) {
916     void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
917     Entry::_list = ::new (mem) Entry::List();
918   }
919   _active = true;
920 }
921 
922 void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) {
923   assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop");
924   assert(derived_loc != (void*)base_loc, "Base and derived in same location");
925   derived_pointer base_loc_as_derived_pointer =
926     static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
927   assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
928   assert(Entry::_list != nullptr, "list must exist");
929   assert(is_active(), "table must be active here");
930   intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc));
931   // This assert is invalid because derived pointers can be
932   // arbitrarily far away from their base.
933   // assert(offset >= -1000000, "wrong derived pointer info");
934 
935   if (TraceDerivedPointers) {
936     tty->print_cr(
937       "Add derived pointer@" INTPTR_FORMAT
938       " - Derived: " INTPTR_FORMAT
939       " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
940       p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset
941     );
942   }
943   // Set derived oop location to point to base.
944   *derived_loc = base_loc_as_derived_pointer;
945   Entry* entry = new Entry(derived_loc, offset);
946   Entry::_list->push(*entry);
947 }
948 
949 void DerivedPointerTable::update_pointers() {
950   assert(Entry::_list != nullptr, "list must exist");
951   Entry* entries = Entry::_list->pop_all();
952   while (entries != nullptr) {
953     Entry* entry = entries;
954     entries = entry->next();
955     derived_pointer* derived_loc = entry->location();
956     intptr_t offset  = entry->offset();
957     // The derived oop was setup to point to location of base
958     oop base = **reinterpret_cast<oop**>(derived_loc);
959     assert(Universe::heap()->is_in_or_null(base), "must be an oop");
960 
961     derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base));
962     *derived_loc = derived_base + offset;
963     assert(*derived_loc - derived_base == offset, "sanity check");
964 
965     // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0);
966 
967     if (TraceDerivedPointers) {
968       tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
969                     " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
970                     p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
971     }
972 
973     // Delete entry
974     delete entry;
975   }
976   assert(Entry::_list->empty(), "invariant");
977   _active = false;
978 }
979 
980 #endif // COMPILER2_OR_JVMCI