1 /*
  2  * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeBlob.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "code/nmethod.hpp"
 29 #include "code/scopeDesc.hpp"
 30 #include "compiler/oopMap.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "memory/allocation.inline.hpp"
 33 #include "memory/iterator.hpp"
 34 #include "memory/resourceArea.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/inlineKlass.hpp"
 37 #include "oops/compressedOops.hpp"
 38 #include "runtime/frame.inline.hpp"
 39 #include "runtime/handles.inline.hpp"
 40 #include "runtime/signature.hpp"
 41 #include "runtime/stackWatermarkSet.inline.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/lockFreeStack.hpp"
 44 #ifdef COMPILER1
 45 #include "c1/c1_Defs.hpp"
 46 #endif
 47 #ifdef COMPILER2
 48 #include "opto/optoreg.hpp"
 49 #endif
 50 #if INCLUDE_JVMCI
 51 #include "jvmci/jvmci_globals.hpp"
 52 #endif
 53 
 54 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
 55 
 56 static inline intptr_t derived_pointer_value(derived_pointer p) {
 57   return static_cast<intptr_t>(p);
 58 }
 59 
 60 static inline derived_pointer to_derived_pointer(oop obj) {
 61   return static_cast<derived_pointer>(cast_from_oop<intptr_t>(obj));
 62 }
 63 
 64 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
 65   return derived_pointer_value(p) - derived_pointer_value(p1);
 66 }
 67 
 68 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
 69   return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
 70 }
 71 
 72 // OopMapStream
 73 
 74 OopMapStream::OopMapStream(OopMap* oop_map) {
 75   _stream = new CompressedReadStream(oop_map->write_stream()->buffer());
 76   _size = oop_map->omv_count();
 77   _position = 0;
 78   _valid_omv = false;
 79 }
 80 
 81 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) {
 82   _stream = new CompressedReadStream(oop_map->data_addr());
 83   _size = oop_map->count();
 84   _position = 0;
 85   _valid_omv = false;
 86 }
 87 
 88 void OopMapStream::find_next() {
 89   if (_position++ < _size) {
 90     _omv.read_from(_stream);
 91     _valid_omv = true;
 92     return;
 93   }
 94   _valid_omv = false;
 95 }
 96 
 97 
 98 // OopMap
 99 
100 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
101 // slots to hold 4-byte values like ints and floats in the LP64 build.
102 OopMap::OopMap(int frame_size, int arg_count) {
103   // OopMaps are usually quite so small, so pick a small initial size
104   set_write_stream(new CompressedWriteStream(32));
105   set_omv_count(0);
106 
107 #ifdef ASSERT
108   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
109   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
110   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
111 #endif
112 }
113 
114 
115 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
116   // This constructor does a deep copy
117   // of the source OopMap.
118   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
119   set_omv_count(0);
120   set_offset(source->offset());
121 
122 #ifdef ASSERT
123   _locs_length = source->_locs_length;
124   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
125   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
126 #endif
127 
128   // We need to copy the entries too.
129   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
130     OopMapValue omv = oms.current();
131     omv.write_on(write_stream());
132     increment_count();
133   }
134 }
135 
136 
137 OopMap* OopMap::deep_copy() {
138   return new OopMap(_deep_copy_token, this);
139 }
140 
141 void OopMap::copy_data_to(address addr) const {
142   memcpy(addr, write_stream()->buffer(), write_stream()->position());
143 }
144 
145 int OopMap::heap_size() const {
146   int size = sizeof(OopMap);
147   int align = sizeof(void *) - 1;
148   size += write_stream()->position();
149   // Align to a reasonable ending point
150   size = ((size+align) & ~align);
151   return size;
152 }
153 
154 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
155 // slots to hold 4-byte values like ints and floats in the LP64 build.
156 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
157 
158   assert(reg->value() < _locs_length, "too big reg value for stack size");
159   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
160   debug_only( _locs_used[reg->value()] = x; )
161 
162   OopMapValue o(reg, x, optional);
163   o.write_on(write_stream());
164   increment_count();
165 }
166 
167 
168 void OopMap::set_oop(VMReg reg) {
169   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
170 }
171 
172 
173 void OopMap::set_narrowoop(VMReg reg) {
174   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
175 }
176 
177 
178 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
179   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
180 }
181 
182 
183 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
184   if( reg == derived_from_local_register ) {
185     // Actually an oop, derived shares storage with base,
186     set_oop(reg);
187   } else {
188     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
189   }
190 }
191 
192 // OopMapSet
193 
194 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
195 
196 void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
197   map->set_offset(pc_offset);
198 
199 #ifdef ASSERT
200   if(_list.length() > 0) {
201     OopMap* last = _list.last();
202     if (last->offset() == map->offset() ) {
203       fatal("OopMap inserted twice");
204     }
205     if (last->offset() > map->offset()) {
206       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
207                       _list.length(),last->offset(),_list.length()+1,map->offset());
208     }
209   }
210 #endif // ASSERT
211 
212   add(map);
213 }
214 
215 static void add_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
216 #if COMPILER2_OR_JVMCI
217   DerivedPointerTable::add(derived, base);
218 #endif // COMPILER2_OR_JVMCI
219 }
220 
221 static void ignore_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
222 }
223 
224 static void process_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
225   // All derived pointers must be processed before the base pointer of any derived pointer is processed.
226   // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
227   // offset, if the base pointer is processed in the first derived pointer.
228   derived_pointer derived_base = to_derived_pointer(*base);
229   intptr_t offset = *derived - derived_base;
230   *derived = derived_base;
231   oop_fn->do_oop((oop*)derived);
232   *derived = *derived + offset;
233 }
234 
235 
236 #ifndef PRODUCT
237 static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
238   // Print oopmap and regmap
239   tty->print_cr("------ ");
240   CodeBlob* cb = fr->cb();
241   const ImmutableOopMapSet* maps = cb->oop_maps();
242   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
243   map->print();
244   if( cb->is_nmethod() ) {
245     nmethod* nm = (nmethod*)cb;
246     // native wrappers have no scope data, it is implied
247     if (nm->is_native_method()) {
248       tty->print("bci: 0 (native)");
249     } else {
250       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
251       tty->print("bci: %d ",scope->bci());
252     }
253   }
254   tty->cr();
255   fr->print_on(tty);
256   tty->print("     ");
257   cb->print_value_on(tty);  tty->cr();
258   reg_map->print();
259   tty->print_cr("------ ");
260 
261 }
262 #endif // PRODUCT
263 
264 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
265   switch (mode) {
266   case DerivedPointerIterationMode::_directly:
267     all_do(fr, reg_map, f, process_derived_oop);
268     break;
269   case DerivedPointerIterationMode::_with_table:
270     all_do(fr, reg_map, f, add_derived_oop);
271     break;
272   case DerivedPointerIterationMode::_ignore:
273     all_do(fr, reg_map, f, ignore_derived_oop);
274     break;
275   }
276 }
277 
278 
279 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
280                        OopClosure* oop_fn, void derived_oop_fn(oop*, derived_pointer*, OopClosure*)) {
281   CodeBlob* cb = fr->cb();
282   assert(cb != NULL, "no codeblob");
283 
284   NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
285 
286   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
287   assert(map != NULL, "no ptr map found");
288 
289   // handle derived pointers first (otherwise base pointer may be
290   // changed before derived pointer offset has been collected)
291   {
292     for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
293       OopMapValue omv = oms.current();
294       if (omv.type() != OopMapValue::derived_oop_value) {
295         continue;
296       }
297 
298 #ifndef COMPILER2
299       COMPILER1_PRESENT(ShouldNotReachHere();)
300 #if INCLUDE_JVMCI
301       if (UseJVMCICompiler) {
302         ShouldNotReachHere();
303       }
304 #endif
305 #endif // !COMPILER2
306       derived_pointer* derived_loc = (derived_pointer*)fr->oopmapreg_to_location(omv.reg(),reg_map);
307       guarantee(derived_loc != NULL, "missing saved register");
308       oop* base_loc = fr->oopmapreg_to_oop_location(omv.content_reg(), reg_map);
309       // Ignore NULL oops and decoded NULL narrow oops which
310       // equal to CompressedOops::base() when a narrow oop
311       // implicit null check is used in compiled code.
312       // The narrow_oop_base could be NULL or be the address
313       // of the page below heap depending on compressed oops mode.
314       if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
315         derived_oop_fn(base_loc, derived_loc, oop_fn);
316       }
317     }
318   }
319 
320   {
321     // We want coop and oop oop_types
322     for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
323       OopMapValue omv = oms.current();
324       oop* loc = fr->oopmapreg_to_oop_location(omv.reg(),reg_map);
325       // It should be an error if no location can be found for a
326       // register mentioned as contained an oop of some kind.  Maybe
327       // this was allowed previously because value_value items might
328       // be missing?
329       guarantee(loc != NULL, "missing saved register");
330       if ( omv.type() == OopMapValue::oop_value ) {
331         oop val = *loc;
332         if (val == NULL || CompressedOops::is_base(val)) {
333           // Ignore NULL oops and decoded NULL narrow oops which
334           // equal to CompressedOops::base() when a narrow oop
335           // implicit null check is used in compiled code.
336           // The narrow_oop_base could be NULL or be the address
337           // of the page below heap depending on compressed oops mode.
338           continue;
339         }
340         oop_fn->do_oop(loc);
341       } else if ( omv.type() == OopMapValue::narrowoop_value ) {
342         narrowOop *nl = (narrowOop*)loc;
343 #ifndef VM_LITTLE_ENDIAN
344         VMReg vmReg = omv.reg();
345         if (!vmReg->is_stack()) {
346           // compressed oops in registers only take up 4 bytes of an
347           // 8 byte register but they are in the wrong part of the
348           // word so adjust loc to point at the right place.
349           nl = (narrowOop*)((address)nl + 4);
350         }
351 #endif
352         oop_fn->do_oop(nl);
353       }
354     }
355   }
356 }
357 
358 
359 // Update callee-saved register info for the following frame
360 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
361   ResourceMark rm;
362   CodeBlob* cb = fr->cb();
363   assert(cb != NULL, "no codeblob");
364 
365   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
366   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
367          "already updated this map; do not 'update' it twice!" );
368   debug_only(reg_map->_update_for_id = fr->id());
369 
370   // Check if caller must update oop argument
371   assert((reg_map->include_argument_oops() ||
372           !cb->caller_must_gc_arguments(reg_map->thread())),
373          "include_argument_oops should already be set");
374 
375   // Scan through oopmap and find location of all callee-saved registers
376   // (we do not do update in place, since info could be overwritten)
377 
378   address pc = fr->pc();
379   const ImmutableOopMap* map  = cb->oop_map_for_return_address(pc);
380   assert(map != NULL, "no ptr map found");
381   DEBUG_ONLY(int nof_callee = 0;)
382 
383   for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
384     OopMapValue omv = oms.current();
385     if (omv.type() == OopMapValue::callee_saved_value) {
386       VMReg reg = omv.content_reg();
387       oop* loc = fr->oopmapreg_to_oop_location(omv.reg(), reg_map);
388       reg_map->set_location(reg, (address) loc);
389       DEBUG_ONLY(nof_callee++;)
390     }
391   }
392 
393   // Check that runtime stubs save all callee-saved registers
394 #ifdef COMPILER2
395   assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
396          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
397          "must save all");
398 #endif // COMPILER2
399 }
400 
401 // Printing code is present in product build for -XX:+PrintAssembly.
402 
403 static
404 void print_register_type(OopMapValue::oop_types x, VMReg optional,
405                          outputStream* st) {
406   switch( x ) {
407   case OopMapValue::oop_value:
408     st->print("Oop");
409     break;
410   case OopMapValue::narrowoop_value:
411     st->print("NarrowOop");
412     break;
413   case OopMapValue::callee_saved_value:
414     st->print("Callers_");
415     optional->print_on(st);
416     break;
417   case OopMapValue::derived_oop_value:
418     st->print("Derived_oop_");
419     optional->print_on(st);
420     break;
421   default:
422     ShouldNotReachHere();
423   }
424 }
425 
426 void OopMapValue::print_on(outputStream* st) const {
427   reg()->print_on(st);
428   st->print("=");
429   print_register_type(type(),content_reg(),st);
430   st->print(" ");
431 }
432 
433 void OopMapValue::print() const { print_on(tty); }
434 
435 void ImmutableOopMap::print_on(outputStream* st) const {
436   OopMapValue omv;
437   st->print("ImmutableOopMap {");
438   for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
439     omv = oms.current();
440     omv.print_on(st);
441   }
442   st->print("}");
443 }
444 
445 void ImmutableOopMap::print() const { print_on(tty); }
446 
447 void OopMap::print_on(outputStream* st) const {
448   OopMapValue omv;
449   st->print("OopMap {");
450   for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
451     omv = oms.current();
452     omv.print_on(st);
453   }
454   // Print hex offset in addition.
455   st->print("off=%d/0x%x}", (int) offset(), (int) offset());
456 }
457 
458 void OopMap::print() const { print_on(tty); }
459 
460 void ImmutableOopMapSet::print_on(outputStream* st) const {
461   const ImmutableOopMap* last = NULL;
462   const int len = count();
463 
464   st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
465 
466   for (int i = 0; i < len; i++) {
467     const ImmutableOopMapPair* pair = pair_at(i);
468     const ImmutableOopMap* map = pair->get_from(this);
469     if (map != last) {
470       st->cr();
471       map->print_on(st);
472       st->print(" pc offsets: ");
473     }
474     last = map;
475     st->print("%d ", pair->pc_offset());
476   }
477   st->cr();
478 }
479 
480 void ImmutableOopMapSet::print() const { print_on(tty); }
481 
482 void OopMapSet::print_on(outputStream* st) const {
483   const int len = _list.length();
484 
485   st->print_cr("OopMapSet contains %d OopMaps", len);
486 
487   for( int i = 0; i < len; i++) {
488     OopMap* m = at(i);
489     st->print_cr("#%d ",i);
490     m->print_on(st);
491     st->cr();
492   }
493   st->cr();
494 }
495 
496 void OopMapSet::print() const { print_on(tty); }
497 
498 bool OopMap::equals(const OopMap* other) const {
499   if (other->_omv_count != _omv_count) {
500     return false;
501   }
502   if (other->write_stream()->position() != write_stream()->position()) {
503     return false;
504   }
505   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
506     return false;
507   }
508   return true;
509 }
510 
511 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
512   ImmutableOopMapPair* pairs = get_pairs();
513   ImmutableOopMapPair* last  = NULL;
514 
515   for (int i = 0; i < _count; ++i) {
516     if (pairs[i].pc_offset() >= pc_offset) {
517       last = &pairs[i];
518       break;
519     }
520   }
521 
522   // Heal Coverity issue: potential index out of bounds access.
523   guarantee(last != NULL, "last may not be null");
524   assert(last->pc_offset() == pc_offset, "oopmap not found");
525   return last->get_from(this);
526 }
527 
528 const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const {
529   return set->oopmap_at_offset(_oopmap_offset);
530 }
531 
532 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) {
533   address addr = data_addr();
534   oopmap->copy_data_to(addr);
535 }
536 
537 #ifdef ASSERT
538 int ImmutableOopMap::nr_of_bytes() const {
539   OopMapStream oms(this);
540 
541   while (!oms.is_done()) {
542     oms.next();
543   }
544   return sizeof(ImmutableOopMap) + oms.stream_position();
545 }
546 #endif
547 
548 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
549   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
550 }
551 
552 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
553   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
554 }
555 
556 int ImmutableOopMapBuilder::heap_size() {
557   int base = sizeof(ImmutableOopMapSet);
558   base = align_up(base, 8);
559 
560   // all of ours pc / offset pairs
561   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
562   pairs = align_up(pairs, 8);
563 
564   for (int i = 0; i < _set->size(); ++i) {
565     int size = 0;
566     OopMap* map = _set->at(i);
567 
568     if (is_empty(map)) {
569       /* only keep a single empty map in the set */
570       if (has_empty()) {
571         _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
572       } else {
573         _empty_offset = _offset;
574         _empty = map;
575         size = size_for(map);
576         _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
577       }
578     } else if (is_last_duplicate(map)) {
579       /* if this entry is identical to the previous one, just point it there */
580       _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
581     } else {
582       /* not empty, not an identical copy of the previous entry */
583       size = size_for(map);
584       _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
585       _last_offset = _offset;
586       _last = map;
587     }
588 
589     assert(_mapping[i]._map == map, "check");
590     _offset += size;
591   }
592 
593   int total = base + pairs + _offset;
594   DEBUG_ONLY(total += 8);
595   _required = total;
596   return total;
597 }
598 
599 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
600   assert(offset < set->nr_of_bytes(), "check");
601   new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
602 }
603 
604 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
605   fill_pair(pair, map, offset, set);
606   address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
607 
608   new (addr) ImmutableOopMap(map);
609   return size_for(map);
610 }
611 
612 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
613   ImmutableOopMapPair* pairs = set->get_pairs();
614 
615   for (int i = 0; i < set->count(); ++i) {
616     const OopMap* map = _mapping[i]._map;
617     ImmutableOopMapPair* pair = NULL;
618     int size = 0;
619 
620     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
621       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
622     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
623       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
624     }
625 
626     const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
627     assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
628   }
629 }
630 
631 #ifdef ASSERT
632 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
633   for (int i = 0; i < 8; ++i) {
634     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
635   }
636 
637   for (int i = 0; i < set->count(); ++i) {
638     const ImmutableOopMapPair* pair = set->pair_at(i);
639     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
640     const ImmutableOopMap* map = pair->get_from(set);
641     int nr_of_bytes = map->nr_of_bytes();
642     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
643   }
644 }
645 #endif
646 
647 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
648   DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
649 
650   _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
651   fill(_new_set, _required);
652 
653   DEBUG_ONLY(verify(buffer, _required, _new_set));
654 
655   return _new_set;
656 }
657 
658 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
659   _required = heap_size();
660 
661   // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
662   address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
663   return generate_into(buffer);
664 }
665 
666 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
667   ResourceMark mark;
668   ImmutableOopMapBuilder builder(oopmap_set);
669   return builder.build();
670 }
671 
672 
673 //------------------------------DerivedPointerTable---------------------------
674 
675 #if COMPILER2_OR_JVMCI
676 
677 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
678   derived_pointer* _location; // Location of derived pointer, also pointing to base
679   intptr_t         _offset;   // Offset from base pointer
680   Entry* volatile  _next;
681 
682   static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
683 
684 public:
685   Entry(derived_pointer* location, intptr_t offset) :
686     _location(location), _offset(offset), _next(NULL) {}
687 
688   derived_pointer* location() const { return _location; }
689   intptr_t offset() const { return _offset; }
690   Entry* next() const { return _next; }
691 
692   typedef LockFreeStack<Entry, &next_ptr> List;
693   static List* _list;
694 };
695 
696 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = NULL;
697 bool DerivedPointerTable::_active = false;
698 
699 bool DerivedPointerTable::is_empty() {
700   return Entry::_list == NULL || Entry::_list->empty();
701 }
702 
703 void DerivedPointerTable::clear() {
704   // The first time, we create the list.  Otherwise it should be
705   // empty.  If not, then we have probably forgotton to call
706   // update_pointers after last GC/Scavenge.
707   assert (!_active, "should not be active");
708   assert(is_empty(), "table not empty");
709   if (Entry::_list == NULL) {
710     void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
711     Entry::_list = ::new (mem) Entry::List();
712   }
713   _active = true;
714 }
715 
716 void DerivedPointerTable::add(derived_pointer* derived_loc, oop *base_loc) {
717   assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
718   assert(derived_loc != (void*)base_loc, "Base and derived in same location");
719   derived_pointer base_loc_as_derived_pointer =
720     static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
721   assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
722   assert(Entry::_list != NULL, "list must exist");
723   assert(is_active(), "table must be active here");
724   intptr_t offset = *derived_loc - to_derived_pointer(*base_loc);
725   // This assert is invalid because derived pointers can be
726   // arbitrarily far away from their base.
727   // assert(offset >= -1000000, "wrong derived pointer info");
728 
729   if (TraceDerivedPointers) {
730     tty->print_cr(
731       "Add derived pointer@" INTPTR_FORMAT
732       " - Derived: " INTPTR_FORMAT
733       " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
734       p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(*base_loc), p2i(base_loc), offset
735     );
736   }
737   // Set derived oop location to point to base.
738   *derived_loc = base_loc_as_derived_pointer;
739   Entry* entry = new Entry(derived_loc, offset);
740   Entry::_list->push(*entry);
741 }
742 
743 void DerivedPointerTable::update_pointers() {
744   assert(Entry::_list != NULL, "list must exist");
745   Entry* entries = Entry::_list->pop_all();
746   while (entries != NULL) {
747     Entry* entry = entries;
748     entries = entry->next();
749     derived_pointer* derived_loc = entry->location();
750     intptr_t offset  = entry->offset();
751     // The derived oop was setup to point to location of base
752     oop base = **reinterpret_cast<oop**>(derived_loc);
753     assert(Universe::heap()->is_in_or_null(base), "must be an oop");
754 
755     derived_pointer derived_base = to_derived_pointer(base);
756     *derived_loc = derived_base + offset;
757     assert(*derived_loc - derived_base == offset, "sanity check");
758 
759     if (TraceDerivedPointers) {
760       tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
761                     " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
762                     p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
763     }
764 
765     // Delete entry
766     delete entry;
767   }
768   assert(Entry::_list->empty(), "invariant");
769   _active = false;
770 }
771 
772 #endif // COMPILER2_OR_JVMCI
--- EOF ---