1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/nmethod.hpp"
28 #include "code/scopeDesc.hpp"
29 #include "compiler/oopMap.inline.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logStream.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/iterator.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/compressedOops.hpp"
37 #include "runtime/atomicAccess.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/signature.hpp"
41 #include "runtime/stackWatermarkSet.inline.hpp"
42 #include "utilities/align.hpp"
43 #include "utilities/lockFreeStack.hpp"
44 #ifdef COMPILER1
45 #include "c1/c1_Defs.hpp"
46 #endif
47 #ifdef COMPILER2
48 #include "opto/optoreg.hpp"
49 #endif
50 #if INCLUDE_JVMCI
51 #include "jvmci/jvmci_globals.hpp"
52 #endif
53
54 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
55
56 static inline intptr_t derived_pointer_value(derived_pointer p) {
57 return static_cast<intptr_t>(p);
58 }
59
60 static inline derived_pointer to_derived_pointer(intptr_t obj) {
61 return static_cast<derived_pointer>(obj);
62 }
63
64 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
65 return derived_pointer_value(p) - derived_pointer_value(p1);
66 }
67
68 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
69 return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
70 }
71
72 // OopMapStream
73
74 OopMapStream::OopMapStream(const OopMap* oop_map)
75 : _stream(oop_map->write_stream()->buffer()) {
76 _size = oop_map->omv_count();
77 _position = 0;
78 _valid_omv = false;
79 }
80
81 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map)
82 : _stream(oop_map->data_addr()) {
83 _size = oop_map->count();
84 _position = 0;
85 _valid_omv = false;
86 }
87
88 void OopMapStream::find_next() {
89 if (_position++ < _size) {
90 _omv.read_from(&_stream);
91 _valid_omv = true;
92 return;
93 }
94 _valid_omv = false;
95 }
96
97
98 // OopMap
99
100 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
101 // slots to hold 4-byte values like ints and floats in the LP64 build.
102 OopMap::OopMap(int frame_size, int arg_count) {
103 // OopMaps are usually quite so small, so pick a small initial size
104 set_write_stream(new CompressedWriteStream(32));
105 set_omv_count(0);
106 _num_oops = 0;
107 _has_derived_oops = false;
108 _index = -1;
109
110 #ifdef ASSERT
111 _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
112 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
113 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
114 #endif
115 }
116
117 OopMap::OopMap(int data_size) {
118 // OopMaps are usually quite so small, so pick a small initial size
119 set_write_stream(new CompressedWriteStream(data_size));
120 set_omv_count(0);
121 _num_oops = 0;
122 _has_derived_oops = false;
123 _index = -1;
124 #ifdef ASSERT
125 _locs_length = 0;
126 _locs_used = nullptr;
127 #endif
128 }
129
130 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
131 // This constructor does a deep copy
132 // of the source OopMap.
133 set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
134 set_omv_count(0);
135 set_offset(source->offset());
136 _num_oops = source->num_oops();
137 _has_derived_oops = source->has_derived_oops();
138 _index = -1;
139
140 #ifdef ASSERT
141 _locs_length = source->_locs_length;
142 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
143 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
144 #endif
145
146 // We need to copy the entries too.
147 for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
148 OopMapValue omv = oms.current();
149 omv.write_on(write_stream());
150 increment_count();
151 }
152 }
153
154
155 OopMap* OopMap::deep_copy() {
156 return new OopMap(_deep_copy_token, this);
157 }
158
159 void OopMap::copy_data_to(address addr) const {
160 memcpy(addr, write_stream()->buffer(), write_stream()->position());
161 }
162
163 class OopMapSort {
164 private:
165 const OopMap* _map;
166 OopMapValue* _values;
167 int _count;
168
169 public:
170 OopMapSort(const OopMap* map) : _map(map), _count(0) {
171 _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
172 }
173
174 void sort();
175
176 void print();
177
178 void write(CompressedWriteStream* stream) {
179 for (int i = 0; i < _count; ++i) {
180 _values[i].write_on(stream);
181 }
182 }
183
184 private:
185 int find_derived_position(OopMapValue omv, int start) {
186 assert(omv.type() == OopMapValue::derived_oop_value, "");
187
188 VMReg base = omv.content_reg();
189 int i = start;
190
191 for (; i < _count; ++i) {
192 if (base == _values[i].reg()) {
193
194 for (int n = i + 1; n < _count; ++n) {
195 if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
196 return n;
197 }
198
199 if (derived_cost(_values[i]) > derived_cost(omv)) {
200 return n;
201 }
202 }
203 return _count;
204 }
205 }
206
207 assert(false, "failed to find base");
208 return -1;
209 }
210
211 int find_position(OopMapValue omv, int start) {
212 assert(omv.type() != OopMapValue::derived_oop_value, "");
213
214 int i = start;
215 for (; i < _count; ++i) {
216 if (omv_cost(_values[i]) > omv_cost(omv)) {
217 return i;
218 }
219 }
220 assert(i < _map->omv_count(), "bounds check");
221 return i;
222 }
223
224 void insert(OopMapValue value, int pos) {
225 assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
226 assert(pos <= _count, "sanity");
227
228 if (pos < _count) {
229 OopMapValue prev = _values[pos];
230
231 for (int i = pos; i < _count; ++i) {
232 OopMapValue tmp = _values[i+1];
233 _values[i+1] = prev;
234 prev = tmp;
235 }
236 }
237 _values[pos] = value;
238
239 ++_count;
240 }
241
242 int omv_cost(OopMapValue omv) {
243 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
244 return reg_cost(omv.reg());
245 }
246
247 int reg_cost(VMReg reg) {
248 if (reg->is_reg()) {
249 return 0;
250 }
251 return reg->reg2stack() * VMRegImpl::stack_slot_size;
252 }
253
254 int derived_cost(OopMapValue omv) {
255 return reg_cost(omv.reg());
256 }
257 };
258
259 void OopMapSort::sort() {
260 #ifdef ASSERT
261 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
262 OopMapValue omv = oms.current();
263 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value ||
264 omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
265 }
266 #endif
267
268 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
269 if (oms.current().type() == OopMapValue::callee_saved_value) {
270 insert(oms.current(), _count);
271 }
272 }
273
274 int start = _count;
275 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
276 OopMapValue omv = oms.current();
277 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
278 int pos = find_position(omv, start);
279 insert(omv, pos);
280 }
281 }
282
283 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
284 OopMapValue omv = oms.current();
285 if (omv.type() == OopMapValue::derived_oop_value) {
286 int pos = find_derived_position(omv, start);
287 assert(pos > 0, "");
288 insert(omv, pos);
289 }
290 }
291 }
292
293 void OopMapSort::print() {
294 for (int i = 0; i < _count; ++i) {
295 OopMapValue omv = _values[i];
296 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
297 if (omv.reg()->is_reg()) {
298 tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
299 } else {
300 tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
301 }
302 } else {
303 if (omv.content_reg()->is_reg()) {
304 tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
305 } else if (omv.reg()->is_reg()) {
306 tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
307 } else {
308 int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
309 int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
310 tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
311 }
312 }
313 }
314 }
315
316 void OopMap::copy_and_sort_data_to(address addr) const {
317 OopMapSort sort(this);
318 sort.sort();
319 CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
320 sort.write(stream);
321
322 assert(stream->position() == write_stream()->position(), "");
323 memcpy(addr, stream->buffer(), stream->position());
324 }
325
326 int OopMap::heap_size() const {
327 int size = sizeof(OopMap);
328 int align = sizeof(void *) - 1;
329 size += write_stream()->position();
330 // Align to a reasonable ending point
331 size = ((size+align) & ~align);
332 return size;
333 }
334
335 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
336 // slots to hold 4-byte values like ints and floats in the LP64 build.
337 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
338
339 assert(reg->value() < _locs_length, "too big reg value for stack size");
340 assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
341 DEBUG_ONLY( _locs_used[reg->value()] = x; )
342
343 OopMapValue o(reg, x, optional);
344 o.write_on(write_stream());
345 increment_count();
346 if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) {
347 increment_num_oops();
348 } else if (x == OopMapValue::derived_oop_value) {
349 set_has_derived_oops(true);
350 }
351 }
352
353
354 void OopMap::set_oop(VMReg reg) {
355 set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
356 }
357
358
359 void OopMap::set_narrowoop(VMReg reg) {
360 set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
361 }
362
363
364 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
365 set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
366 }
367
368
369 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
370 if( reg == derived_from_local_register ) {
371 // Actually an oop, derived shares storage with base,
372 set_oop(reg);
373 } else {
374 set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
375 }
376 }
377
378 // OopMapSet
379
380 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
381
382 OopMapSet::OopMapSet(int size) : _list(size) {}
383
384 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
385 map->set_offset(pc_offset);
386
387 #ifdef ASSERT
388 if(_list.length() > 0) {
389 OopMap* last = _list.last();
390 if (last->offset() == map->offset() ) {
391 fatal("OopMap inserted twice");
392 }
393 if (last->offset() > map->offset()) {
394 tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
395 _list.length(),last->offset(),_list.length()+1,map->offset());
396 }
397 }
398 #endif // ASSERT
399
400 int index = add(map);
401 map->_index = index;
402 return index;
403 }
404
405 class AddDerivedOop : public DerivedOopClosure {
406 public:
407 enum {
408 SkipNull = true, NeedsLock = true
409 };
410
411 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
412 #if COMPILER2_OR_JVMCI
413 DerivedPointerTable::add(derived, base);
414 #endif // COMPILER2_OR_JVMCI
415 }
416 };
417
418 class ProcessDerivedOop : public DerivedOopClosure {
419 OopClosure* _oop_cl;
420
421 public:
422 ProcessDerivedOop(OopClosure* oop_cl) :
423 _oop_cl(oop_cl) {}
424
425 enum {
426 SkipNull = true, NeedsLock = true
427 };
428
429 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
430 // All derived pointers must be processed before the base pointer of any derived pointer is processed.
431 // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
432 // offset, if the base pointer is processed in the first derived pointer.
433 derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base));
434 intptr_t offset = *derived - derived_base;
435 *derived = derived_base;
436 _oop_cl->do_oop((oop*)derived);
437 *derived = *derived + offset;
438 }
439 };
440
441 class IgnoreDerivedOop : public DerivedOopClosure {
442 OopClosure* _oop_cl;
443
444 public:
445 enum {
446 SkipNull = true, NeedsLock = true
447 };
448
449 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {}
450 };
451
452 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
453 find_map(fr)->oops_do(fr, reg_map, f, mode);
454 }
455
456 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
457 find_map(fr)->oops_do(fr, reg_map, f, df);
458 }
459
460 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
461 OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
462 assert(derived_oop_fn != nullptr, "sanity");
463 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
464 visitor.oops_do(fr, reg_map, this);
465 }
466
467 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
468 OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const {
469 ProcessDerivedOop process_cl(oop_fn);
470 AddDerivedOop add_cl;
471 IgnoreDerivedOop ignore_cl;
472 DerivedOopClosure* derived_cl;
473 switch (derived_mode) {
474 case DerivedPointerIterationMode::_directly:
475 derived_cl = &process_cl;
476 break;
477 case DerivedPointerIterationMode::_with_table:
478 derived_cl = &add_cl;
479 break;
480 case DerivedPointerIterationMode::_ignore:
481 derived_cl = &ignore_cl;
482 break;
483 default:
484 guarantee (false, "unreachable");
485 }
486 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl);
487 visitor.oops_do(fr, reg_map, this);
488 }
489
490 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const {
491 OopMapValue omv;
492 for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
493 omv = oms.current();
494 if (fn->handle_type(omv.type())) {
495 fn->do_value(omv.reg(), omv.type());
496 }
497 }
498 }
499
500 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const {
501 OopMapValue omv;
502 for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
503 omv = oms.current();
504 if (omv.type() == type) {
505 fn->do_value(omv.reg(), omv.type());
506 }
507 }
508 }
509
510 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
511 for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
512 OopMapValue omv = oms.current();
513 if (omv.type() == OopMapValue::callee_saved_value) {
514 VMReg reg = omv.content_reg();
515 address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
516 reg_map->set_location(reg, loc);
517 }
518 }
519 }
520
521 // Update callee-saved register info for the following frame
522 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
523 CodeBlob* cb = fr->cb();
524 assert(cb != nullptr, "no codeblob");
525 // Any reg might be saved by a safepoint handler (see generate_handler_blob).
526 assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id),
527 "already updated this map; do not 'update' it twice!" );
528 DEBUG_ONLY(reg_map->_update_for_id = fr->id());
529
530 // Check if caller must update oop argument
531 assert((reg_map->include_argument_oops() ||
532 !cb->caller_must_gc_arguments(reg_map->thread())),
533 "include_argument_oops should already be set");
534
535 // Scan through oopmap and find location of all callee-saved registers
536 // (we do not do update in place, since info could be overwritten)
537
538 update_register_map1(this, fr, reg_map);
539 }
540
541 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) {
542 return find_map(fr->cb(), fr->pc());
543 }
544
545 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
546 assert(cb != nullptr, "no codeblob");
547 const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
548 assert(map != nullptr, "no ptr map found");
549 return map;
550 }
551
552 // Update callee-saved register info for the following frame
553 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
554 find_map(fr)->update_register_map(fr, reg_map);
555 }
556
557 //=============================================================================
558 // Non-Product code
559
560 #ifndef PRODUCT
561 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
562 // Print oopmap and regmap
563 tty->print_cr("------ ");
564 CodeBlob* cb = fr->cb();
565 const ImmutableOopMapSet* maps = cb->oop_maps();
566 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
567 map->print();
568 if( cb->is_nmethod() ) {
569 nmethod* nm = (nmethod*)cb;
570 // native wrappers have no scope data, it is implied
571 if (nm->is_native_method()) {
572 tty->print("bci: 0 (native)");
573 } else {
574 ScopeDesc* scope = nm->scope_desc_at(fr->pc());
575 tty->print("bci: %d ",scope->bci());
576 }
577 }
578 tty->cr();
579 fr->print_on(tty);
580 tty->print(" ");
581 cb->print_value_on(tty); tty->cr();
582 if (reg_map != nullptr) {
583 reg_map->print();
584 }
585 tty->print_cr("------ ");
586
587 }
588 #endif // PRODUCT
589
590 // Printing code is present in product build for -XX:+PrintAssembly.
591
592 static
593 void print_register_type(OopMapValue::oop_types x, VMReg optional,
594 outputStream* st) {
595 switch( x ) {
596 case OopMapValue::oop_value:
597 st->print("Oop");
598 break;
599 case OopMapValue::narrowoop_value:
600 st->print("NarrowOop");
601 break;
602 case OopMapValue::callee_saved_value:
603 st->print("Callers_");
604 optional->print_on(st);
605 break;
606 case OopMapValue::derived_oop_value:
607 st->print("Derived_oop_");
608 optional->print_on(st);
609 break;
610 default:
611 ShouldNotReachHere();
612 }
613 }
614
615 void OopMapValue::print_on(outputStream* st) const {
616 reg()->print_on(st);
617 st->print("=");
618 print_register_type(type(),content_reg(),st);
619 st->print(" ");
620 }
621
622 void OopMapValue::print() const { print_on(tty); }
623
624 void ImmutableOopMap::print_on(outputStream* st) const {
625 OopMapValue omv;
626 st->print("ImmutableOopMap {");
627 for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
628 omv = oms.current();
629 omv.print_on(st);
630 }
631 st->print("}");
632 }
633
634 void ImmutableOopMap::print() const { print_on(tty); }
635
636 void OopMap::print_on(outputStream* st) const {
637 OopMapValue omv;
638 st->print("OopMap {");
639 for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
640 omv = oms.current();
641 omv.print_on(st);
642 }
643 // Print hex offset in addition.
644 st->print("off=%d/0x%x}", (int) offset(), (int) offset());
645 }
646
647 void OopMap::print() const { print_on(tty); }
648
649 void ImmutableOopMapSet::print_on(outputStream* st) const {
650 const ImmutableOopMap* last = nullptr;
651 const int len = count();
652
653 st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
654
655 for (int i = 0; i < len; i++) {
656 const ImmutableOopMapPair* pair = pair_at(i);
657 const ImmutableOopMap* map = pair->get_from(this);
658 if (map != last) {
659 st->cr();
660 map->print_on(st);
661 st->print(" pc offsets: ");
662 }
663 last = map;
664 st->print("%d ", pair->pc_offset());
665 }
666 st->cr();
667 }
668
669 void ImmutableOopMapSet::print() const { print_on(tty); }
670
671 void OopMapSet::print_on(outputStream* st) const {
672 const int len = _list.length();
673
674 st->print_cr("OopMapSet contains %d OopMaps", len);
675
676 for( int i = 0; i < len; i++) {
677 OopMap* m = at(i);
678 st->print_cr("#%d ",i);
679 m->print_on(st);
680 st->cr();
681 }
682 st->cr();
683 }
684
685 void OopMapSet::print() const { print_on(tty); }
686
687 bool OopMap::equals(const OopMap* other) const {
688 if (other->_omv_count != _omv_count) {
689 return false;
690 }
691 if (other->write_stream()->position() != write_stream()->position()) {
692 return false;
693 }
694 if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
695 return false;
696 }
697 return true;
698 }
699
700 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
701 // we might not have an oopmap at asynchronous (non-safepoint) stackwalks
702 ImmutableOopMapPair* pairs = get_pairs();
703 for (int i = 0; i < _count; ++i) {
704 if (pairs[i].pc_offset() >= pc_offset) {
705 ImmutableOopMapPair* last = &pairs[i];
706 return last->pc_offset() == pc_offset ? i : -1;
707 }
708 }
709 return -1;
710 }
711
712 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
713 ImmutableOopMapPair* pairs = get_pairs();
714 ImmutableOopMapPair* last = nullptr;
715
716 for (int i = 0; i < _count; ++i) {
717 if (pairs[i].pc_offset() >= pc_offset) {
718 last = &pairs[i];
719 break;
720 }
721 }
722
723 // Heal Coverity issue: potential index out of bounds access.
724 guarantee(last != nullptr, "last may not be null");
725 assert(last->pc_offset() == pc_offset, "oopmap not found");
726 return last->get_from(this);
727 }
728
729 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap)
730 : _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
731 _num_oops = oopmap->num_oops();
732 _has_derived_oops = oopmap->has_derived_oops();
733 address addr = data_addr();
734 oopmap->copy_and_sort_data_to(addr);
735 }
736
737 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const {
738 for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
739 if (oms.current().type() == type) {
740 return true;
741 }
742 }
743 return false;
744 }
745
746 int ImmutableOopMap::nr_of_bytes() const {
747 OopMapStream oms(this);
748
749 while (!oms.is_done()) {
750 oms.next();
751 }
752 return sizeof(ImmutableOopMap) + oms.stream_position();
753 }
754
755 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) {
756 _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
757 }
758
759 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
760 return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
761 }
762
763 int ImmutableOopMapBuilder::heap_size() {
764 int base = sizeof(ImmutableOopMapSet);
765 base = align_up(base, 8);
766
767 // all of ours pc / offset pairs
768 int pairs = _set->size() * sizeof(ImmutableOopMapPair);
769 pairs = align_up(pairs, 8);
770
771 for (int i = 0; i < _set->size(); ++i) {
772 int size = 0;
773 OopMap* map = _set->at(i);
774
775 if (is_empty(map)) {
776 /* only keep a single empty map in the set */
777 if (has_empty()) {
778 _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
779 } else {
780 _empty_offset = _offset;
781 _empty = map;
782 size = size_for(map);
783 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
784 }
785 } else if (is_last_duplicate(map)) {
786 /* if this entry is identical to the previous one, just point it there */
787 _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
788 } else {
789 /* not empty, not an identical copy of the previous entry */
790 size = size_for(map);
791 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
792 _last_offset = _offset;
793 _last = map;
794 }
795
796 assert(_mapping[i]._map == map, "check");
797 _offset += size;
798 }
799
800 int total = base + pairs + _offset;
801 DEBUG_ONLY(total += 8);
802 _required = total;
803 return total;
804 }
805
806 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
807 assert(offset < set->nr_of_bytes(), "check");
808 new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
809 }
810
811 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
812 fill_pair(pair, map, offset, set);
813 address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
814
815 new (addr) ImmutableOopMap(map);
816 return size_for(map);
817 }
818
819 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
820 ImmutableOopMapPair* pairs = set->get_pairs();
821
822 for (int i = 0; i < set->count(); ++i) {
823 const OopMap* map = _mapping[i]._map;
824 ImmutableOopMapPair* pair = nullptr;
825 int size = 0;
826
827 if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
828 size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
829 } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
830 fill_pair(&pairs[i], map, _mapping[i]._offset, set);
831 }
832
833 //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
834 //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
835 }
836 }
837
838 #ifdef ASSERT
839 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
840 for (int i = 0; i < 8; ++i) {
841 assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
842 }
843
844 for (int i = 0; i < set->count(); ++i) {
845 const ImmutableOopMapPair* pair = set->pair_at(i);
846 assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
847 const ImmutableOopMap* map = pair->get_from(set);
848 int nr_of_bytes = map->nr_of_bytes();
849 assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
850 }
851 }
852 #endif
853
854 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
855 DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
856
857 _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
858 fill(_new_set, _required);
859
860 DEBUG_ONLY(verify(buffer, _required, _new_set));
861
862 return _new_set;
863 }
864
865 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
866 _required = heap_size();
867
868 // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
869 address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
870 return generate_into(buffer);
871 }
872
873 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
874 ResourceMark mark;
875 ImmutableOopMapBuilder builder(oopmap_set);
876 return builder.build();
877 }
878
879 ImmutableOopMapSet* ImmutableOopMapSet::clone() const {
880 address buffer = NEW_C_HEAP_ARRAY(unsigned char, _size, mtCode);
881 memcpy(buffer, (address)this, _size);
882 return (ImmutableOopMapSet*)buffer;
883 }
884
885 void ImmutableOopMapSet::operator delete(void* p) {
886 FREE_C_HEAP_ARRAY(unsigned char, p);
887 }
888
889 //------------------------------DerivedPointerTable---------------------------
890
891 #if COMPILER2_OR_JVMCI
892
893 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
894 derived_pointer* _location; // Location of derived pointer, also pointing to base
895 intptr_t _offset; // Offset from base pointer
896 Entry* volatile _next;
897
898 static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
899
900 public:
901 Entry(derived_pointer* location, intptr_t offset) :
902 _location(location), _offset(offset), _next(nullptr) {}
903
904 derived_pointer* location() const { return _location; }
905 intptr_t offset() const { return _offset; }
906 Entry* next() const { return _next; }
907
908 typedef LockFreeStack<Entry, &next_ptr> List;
909 static List* _list;
910 };
911
912 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr;
913 bool DerivedPointerTable::_active = false;
914
915 bool DerivedPointerTable::is_empty() {
916 return Entry::_list == nullptr || Entry::_list->empty();
917 }
918
919 void DerivedPointerTable::clear() {
920 // The first time, we create the list. Otherwise it should be
921 // empty. If not, then we have probably forgotton to call
922 // update_pointers after last GC/Scavenge.
923 assert (!_active, "should not be active");
924 assert(is_empty(), "table not empty");
925 if (Entry::_list == nullptr) {
926 void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
927 Entry::_list = ::new (mem) Entry::List();
928 }
929 _active = true;
930 }
931
932 void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) {
933 assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop");
934 assert(derived_loc != (void*)base_loc, "Base and derived in same location");
935 derived_pointer base_loc_as_derived_pointer =
936 static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
937 assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
938 assert(Entry::_list != nullptr, "list must exist");
939 assert(is_active(), "table must be active here");
940 intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc));
941 // This assert is invalid because derived pointers can be
942 // arbitrarily far away from their base.
943 // assert(offset >= -1000000, "wrong derived pointer info");
944
945 if (TraceDerivedPointers) {
946 tty->print_cr(
947 "Add derived pointer@" INTPTR_FORMAT
948 " - Derived: " INTPTR_FORMAT
949 " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: %zd)",
950 p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset
951 );
952 }
953 // Set derived oop location to point to base.
954 *derived_loc = base_loc_as_derived_pointer;
955 Entry* entry = new Entry(derived_loc, offset);
956 Entry::_list->push(*entry);
957 }
958
959 void DerivedPointerTable::update_pointers() {
960 assert(Entry::_list != nullptr, "list must exist");
961 Entry* entries = Entry::_list->pop_all();
962 while (entries != nullptr) {
963 Entry* entry = entries;
964 entries = entry->next();
965 derived_pointer* derived_loc = entry->location();
966 intptr_t offset = entry->offset();
967 // The derived oop was setup to point to location of base
968 oop base = **reinterpret_cast<oop**>(derived_loc);
969 assert(Universe::heap()->is_in_or_null(base), "must be an oop");
970
971 derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base));
972 *derived_loc = derived_base + offset;
973 assert(*derived_loc - derived_base == offset, "sanity check");
974
975 // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0);
976
977 if (TraceDerivedPointers) {
978 tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
979 " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: %zd)",
980 p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
981 }
982
983 // Delete entry
984 delete entry;
985 }
986 assert(Entry::_list->empty(), "invariant");
987 _active = false;
988 }
989
990 #endif // COMPILER2_OR_JVMCI