1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeBlob.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/nmethod.hpp"
28 #include "code/scopeDesc.hpp"
29 #include "compiler/oopMap.inline.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logStream.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/iterator.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/compressedOops.hpp"
37 #include "oops/inlineKlass.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/signature.hpp"
42 #include "runtime/stackWatermarkSet.inline.hpp"
43 #include "utilities/align.hpp"
44 #include "utilities/lockFreeStack.hpp"
45 #ifdef COMPILER1
46 #include "c1/c1_Defs.hpp"
47 #endif
48 #ifdef COMPILER2
49 #include "opto/optoreg.hpp"
50 #endif
51 #if INCLUDE_JVMCI
52 #include "jvmci/jvmci_globals.hpp"
53 #endif
54
55 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
56
57 static inline intptr_t derived_pointer_value(derived_pointer p) {
58 return static_cast<intptr_t>(p);
59 }
60
61 static inline derived_pointer to_derived_pointer(intptr_t obj) {
62 return static_cast<derived_pointer>(obj);
63 }
64
65 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
66 return derived_pointer_value(p) - derived_pointer_value(p1);
67 }
68
69 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
70 return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
71 }
72
73 // OopMapStream
74
75 OopMapStream::OopMapStream(const OopMap* oop_map)
76 : _stream(oop_map->write_stream()->buffer()) {
77 _size = oop_map->omv_count();
78 _position = 0;
79 _valid_omv = false;
80 }
81
82 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map)
83 : _stream(oop_map->data_addr()) {
84 _size = oop_map->count();
85 _position = 0;
86 _valid_omv = false;
87 }
88
89 void OopMapStream::find_next() {
90 if (_position++ < _size) {
91 _omv.read_from(&_stream);
92 _valid_omv = true;
93 return;
94 }
95 _valid_omv = false;
96 }
97
98
99 // OopMap
100
101 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
102 // slots to hold 4-byte values like ints and floats in the LP64 build.
103 OopMap::OopMap(int frame_size, int arg_count) {
104 // OopMaps are usually quite so small, so pick a small initial size
105 set_write_stream(new CompressedWriteStream(32));
106 set_omv_count(0);
107 _num_oops = 0;
108 _has_derived_oops = false;
109 _index = -1;
110
111 #ifdef ASSERT
112 _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
113 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
114 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
115 #endif
116 }
117
118
119 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
120 // This constructor does a deep copy
121 // of the source OopMap.
122 set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
123 set_omv_count(0);
124 set_offset(source->offset());
125 _num_oops = source->num_oops();
126 _has_derived_oops = source->has_derived_oops();
127 _index = -1;
128
129 #ifdef ASSERT
130 _locs_length = source->_locs_length;
131 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
132 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
133 #endif
134
135 // We need to copy the entries too.
136 for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
137 OopMapValue omv = oms.current();
138 omv.write_on(write_stream());
139 increment_count();
140 }
141 }
142
143
144 OopMap* OopMap::deep_copy() {
145 return new OopMap(_deep_copy_token, this);
146 }
147
148 void OopMap::copy_data_to(address addr) const {
149 memcpy(addr, write_stream()->buffer(), write_stream()->position());
150 }
151
152 class OopMapSort {
153 private:
154 const OopMap* _map;
155 OopMapValue* _values;
156 int _count;
157
158 public:
159 OopMapSort(const OopMap* map) : _map(map), _count(0) {
160 _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count());
161 }
162
163 void sort();
164
165 void print();
166
167 void write(CompressedWriteStream* stream) {
168 for (int i = 0; i < _count; ++i) {
169 _values[i].write_on(stream);
170 }
171 }
172
173 private:
174 int find_derived_position(OopMapValue omv, int start) {
175 assert(omv.type() == OopMapValue::derived_oop_value, "");
176
177 VMReg base = omv.content_reg();
178 int i = start;
179
180 for (; i < _count; ++i) {
181 if (base == _values[i].reg()) {
182
183 for (int n = i + 1; n < _count; ++n) {
184 if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) {
185 return n;
186 }
187
188 if (derived_cost(_values[i]) > derived_cost(omv)) {
189 return n;
190 }
191 }
192 return _count;
193 }
194 }
195
196 assert(false, "failed to find base");
197 return -1;
198 }
199
200 int find_position(OopMapValue omv, int start) {
201 assert(omv.type() != OopMapValue::derived_oop_value, "");
202
203 int i = start;
204 for (; i < _count; ++i) {
205 if (omv_cost(_values[i]) > omv_cost(omv)) {
206 return i;
207 }
208 }
209 assert(i < _map->omv_count(), "bounds check");
210 return i;
211 }
212
213 void insert(OopMapValue value, int pos) {
214 assert(pos >= 0 && pos < _map->omv_count(), "bounds check");
215 assert(pos <= _count, "sanity");
216
217 if (pos < _count) {
218 OopMapValue prev = _values[pos];
219
220 for (int i = pos; i < _count; ++i) {
221 OopMapValue tmp = _values[i+1];
222 _values[i+1] = prev;
223 prev = tmp;
224 }
225 }
226 _values[pos] = value;
227
228 ++_count;
229 }
230
231 int omv_cost(OopMapValue omv) {
232 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, "");
233 return reg_cost(omv.reg());
234 }
235
236 int reg_cost(VMReg reg) {
237 if (reg->is_reg()) {
238 return 0;
239 }
240 return reg->reg2stack() * VMRegImpl::stack_slot_size;
241 }
242
243 int derived_cost(OopMapValue omv) {
244 return reg_cost(omv.reg());
245 }
246 };
247
248 void OopMapSort::sort() {
249 #ifdef ASSERT
250 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
251 OopMapValue omv = oms.current();
252 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value ||
253 omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, "");
254 }
255 #endif
256
257 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
258 if (oms.current().type() == OopMapValue::callee_saved_value) {
259 insert(oms.current(), _count);
260 }
261 }
262
263 int start = _count;
264 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
265 OopMapValue omv = oms.current();
266 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
267 int pos = find_position(omv, start);
268 insert(omv, pos);
269 }
270 }
271
272 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) {
273 OopMapValue omv = oms.current();
274 if (omv.type() == OopMapValue::derived_oop_value) {
275 int pos = find_derived_position(omv, start);
276 assert(pos > 0, "");
277 insert(omv, pos);
278 }
279 }
280 }
281
282 void OopMapSort::print() {
283 for (int i = 0; i < _count; ++i) {
284 OopMapValue omv = _values[i];
285 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) {
286 if (omv.reg()->is_reg()) {
287 tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value());
288 } else {
289 tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
290 }
291 } else {
292 if (omv.content_reg()->is_reg()) {
293 tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size);
294 } else if (omv.reg()->is_reg()) {
295 tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value());
296 } else {
297 int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size;
298 int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size;
299 tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset);
300 }
301 }
302 }
303 }
304
305 void OopMap::copy_and_sort_data_to(address addr) const {
306 OopMapSort sort(this);
307 sort.sort();
308 CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position());
309 sort.write(stream);
310
311 assert(stream->position() == write_stream()->position(), "");
312 memcpy(addr, stream->buffer(), stream->position());
313 }
314
315 int OopMap::heap_size() const {
316 int size = sizeof(OopMap);
317 int align = sizeof(void *) - 1;
318 size += write_stream()->position();
319 // Align to a reasonable ending point
320 size = ((size+align) & ~align);
321 return size;
322 }
323
324 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
325 // slots to hold 4-byte values like ints and floats in the LP64 build.
326 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
327
328 assert(reg->value() < _locs_length, "too big reg value for stack size");
329 assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
330 debug_only( _locs_used[reg->value()] = x; )
331
332 OopMapValue o(reg, x, optional);
333 o.write_on(write_stream());
334 increment_count();
335 if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) {
336 increment_num_oops();
337 } else if (x == OopMapValue::derived_oop_value) {
338 set_has_derived_oops(true);
339 }
340 }
341
342
343 void OopMap::set_oop(VMReg reg) {
344 set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
345 }
346
347
348 void OopMap::set_narrowoop(VMReg reg) {
349 set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
350 }
351
352
353 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
354 set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
355 }
356
357
358 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
359 if( reg == derived_from_local_register ) {
360 // Actually an oop, derived shares storage with base,
361 set_oop(reg);
362 } else {
363 set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
364 }
365 }
366
367 // OopMapSet
368
369 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
370
371 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
372 map->set_offset(pc_offset);
373
374 #ifdef ASSERT
375 if(_list.length() > 0) {
376 OopMap* last = _list.last();
377 if (last->offset() == map->offset() ) {
378 fatal("OopMap inserted twice");
379 }
380 if (last->offset() > map->offset()) {
381 tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
382 _list.length(),last->offset(),_list.length()+1,map->offset());
383 }
384 }
385 #endif // ASSERT
386
387 int index = add(map);
388 map->_index = index;
389 return index;
390 }
391
392 class AddDerivedOop : public DerivedOopClosure {
393 public:
394 enum {
395 SkipNull = true, NeedsLock = true
396 };
397
398 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
399 #if COMPILER2_OR_JVMCI
400 DerivedPointerTable::add(derived, base);
401 #endif // COMPILER2_OR_JVMCI
402 }
403 };
404
405 class ProcessDerivedOop : public DerivedOopClosure {
406 OopClosure* _oop_cl;
407
408 public:
409 ProcessDerivedOop(OopClosure* oop_cl) :
410 _oop_cl(oop_cl) {}
411
412 enum {
413 SkipNull = true, NeedsLock = true
414 };
415
416 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {
417 // All derived pointers must be processed before the base pointer of any derived pointer is processed.
418 // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
419 // offset, if the base pointer is processed in the first derived pointer.
420 derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base));
421 intptr_t offset = *derived - derived_base;
422 *derived = derived_base;
423 _oop_cl->do_oop((oop*)derived);
424 *derived = *derived + offset;
425 }
426 };
427
428 class IgnoreDerivedOop : public DerivedOopClosure {
429 OopClosure* _oop_cl;
430
431 public:
432 enum {
433 SkipNull = true, NeedsLock = true
434 };
435
436 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {}
437 };
438
439 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
440 find_map(fr)->oops_do(fr, reg_map, f, mode);
441 }
442
443 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) {
444 find_map(fr)->oops_do(fr, reg_map, f, df);
445 }
446
447 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
448 OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const {
449 assert(derived_oop_fn != nullptr, "sanity");
450 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn);
451 visitor.oops_do(fr, reg_map, this);
452 }
453
454 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map,
455 OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const {
456 ProcessDerivedOop process_cl(oop_fn);
457 AddDerivedOop add_cl;
458 IgnoreDerivedOop ignore_cl;
459 DerivedOopClosure* derived_cl;
460 switch (derived_mode) {
461 case DerivedPointerIterationMode::_directly:
462 derived_cl = &process_cl;
463 break;
464 case DerivedPointerIterationMode::_with_table:
465 derived_cl = &add_cl;
466 break;
467 case DerivedPointerIterationMode::_ignore:
468 derived_cl = &ignore_cl;
469 break;
470 default:
471 guarantee (false, "unreachable");
472 }
473 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl);
474 visitor.oops_do(fr, reg_map, this);
475 }
476
477 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const {
478 OopMapValue omv;
479 for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
480 omv = oms.current();
481 if (fn->handle_type(omv.type())) {
482 fn->do_value(omv.reg(), omv.type());
483 }
484 }
485 }
486
487 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const {
488 OopMapValue omv;
489 for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
490 omv = oms.current();
491 if (omv.type() == type) {
492 fn->do_value(omv.reg(), omv.type());
493 }
494 }
495 }
496
497 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) {
498 for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
499 OopMapValue omv = oms.current();
500 if (omv.type() == OopMapValue::callee_saved_value) {
501 VMReg reg = omv.content_reg();
502 address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
503 reg_map->set_location(reg, loc);
504 }
505 }
506 }
507
508 // Update callee-saved register info for the following frame
509 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const {
510 CodeBlob* cb = fr->cb();
511 assert(cb != nullptr, "no codeblob");
512 // Any reg might be saved by a safepoint handler (see generate_handler_blob).
513 assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id),
514 "already updated this map; do not 'update' it twice!" );
515 debug_only(reg_map->_update_for_id = fr->id());
516
517 // Check if caller must update oop argument
518 assert((reg_map->include_argument_oops() ||
519 !cb->caller_must_gc_arguments(reg_map->thread())),
520 "include_argument_oops should already be set");
521
522 // Scan through oopmap and find location of all callee-saved registers
523 // (we do not do update in place, since info could be overwritten)
524
525 update_register_map1(this, fr, reg_map);
526 }
527
528 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) {
529 return find_map(fr->cb(), fr->pc());
530 }
531
532 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) {
533 assert(cb != nullptr, "no codeblob");
534 const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
535 assert(map != nullptr, "no ptr map found");
536 return map;
537 }
538
539 // Update callee-saved register info for the following frame
540 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
541 find_map(fr)->update_register_map(fr, reg_map);
542 }
543
544 //=============================================================================
545 // Non-Product code
546
547 #ifndef PRODUCT
548 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
549 // Print oopmap and regmap
550 tty->print_cr("------ ");
551 CodeBlob* cb = fr->cb();
552 const ImmutableOopMapSet* maps = cb->oop_maps();
553 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
554 map->print();
555 if( cb->is_nmethod() ) {
556 nmethod* nm = (nmethod*)cb;
557 // native wrappers have no scope data, it is implied
558 if (nm->is_native_method()) {
559 tty->print("bci: 0 (native)");
560 } else {
561 ScopeDesc* scope = nm->scope_desc_at(fr->pc());
562 tty->print("bci: %d ",scope->bci());
563 }
564 }
565 tty->cr();
566 fr->print_on(tty);
567 tty->print(" ");
568 cb->print_value_on(tty); tty->cr();
569 if (reg_map != nullptr) {
570 reg_map->print();
571 }
572 tty->print_cr("------ ");
573
574 }
575 #endif // PRODUCT
576
577 // Printing code is present in product build for -XX:+PrintAssembly.
578
579 static
580 void print_register_type(OopMapValue::oop_types x, VMReg optional,
581 outputStream* st) {
582 switch( x ) {
583 case OopMapValue::oop_value:
584 st->print("Oop");
585 break;
586 case OopMapValue::narrowoop_value:
587 st->print("NarrowOop");
588 break;
589 case OopMapValue::callee_saved_value:
590 st->print("Callers_");
591 optional->print_on(st);
592 break;
593 case OopMapValue::derived_oop_value:
594 st->print("Derived_oop_");
595 optional->print_on(st);
596 break;
597 default:
598 ShouldNotReachHere();
599 }
600 }
601
602 void OopMapValue::print_on(outputStream* st) const {
603 reg()->print_on(st);
604 st->print("=");
605 print_register_type(type(),content_reg(),st);
606 st->print(" ");
607 }
608
609 void OopMapValue::print() const { print_on(tty); }
610
611 void ImmutableOopMap::print_on(outputStream* st) const {
612 OopMapValue omv;
613 st->print("ImmutableOopMap {");
614 for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
615 omv = oms.current();
616 omv.print_on(st);
617 }
618 st->print("}");
619 }
620
621 void ImmutableOopMap::print() const { print_on(tty); }
622
623 void OopMap::print_on(outputStream* st) const {
624 OopMapValue omv;
625 st->print("OopMap {");
626 for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
627 omv = oms.current();
628 omv.print_on(st);
629 }
630 // Print hex offset in addition.
631 st->print("off=%d/0x%x}", (int) offset(), (int) offset());
632 }
633
634 void OopMap::print() const { print_on(tty); }
635
636 void ImmutableOopMapSet::print_on(outputStream* st) const {
637 const ImmutableOopMap* last = nullptr;
638 const int len = count();
639
640 st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
641
642 for (int i = 0; i < len; i++) {
643 const ImmutableOopMapPair* pair = pair_at(i);
644 const ImmutableOopMap* map = pair->get_from(this);
645 if (map != last) {
646 st->cr();
647 map->print_on(st);
648 st->print(" pc offsets: ");
649 }
650 last = map;
651 st->print("%d ", pair->pc_offset());
652 }
653 st->cr();
654 }
655
656 void ImmutableOopMapSet::print() const { print_on(tty); }
657
658 void OopMapSet::print_on(outputStream* st) const {
659 const int len = _list.length();
660
661 st->print_cr("OopMapSet contains %d OopMaps", len);
662
663 for( int i = 0; i < len; i++) {
664 OopMap* m = at(i);
665 st->print_cr("#%d ",i);
666 m->print_on(st);
667 st->cr();
668 }
669 st->cr();
670 }
671
672 void OopMapSet::print() const { print_on(tty); }
673
674 bool OopMap::equals(const OopMap* other) const {
675 if (other->_omv_count != _omv_count) {
676 return false;
677 }
678 if (other->write_stream()->position() != write_stream()->position()) {
679 return false;
680 }
681 if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
682 return false;
683 }
684 return true;
685 }
686
687 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const {
688 // we might not have an oopmap at asynchronous (non-safepoint) stackwalks
689 ImmutableOopMapPair* pairs = get_pairs();
690 for (int i = 0; i < _count; ++i) {
691 if (pairs[i].pc_offset() >= pc_offset) {
692 ImmutableOopMapPair* last = &pairs[i];
693 return last->pc_offset() == pc_offset ? i : -1;
694 }
695 }
696 return -1;
697 }
698
699 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
700 ImmutableOopMapPair* pairs = get_pairs();
701 ImmutableOopMapPair* last = nullptr;
702
703 for (int i = 0; i < _count; ++i) {
704 if (pairs[i].pc_offset() >= pc_offset) {
705 last = &pairs[i];
706 break;
707 }
708 }
709
710 // Heal Coverity issue: potential index out of bounds access.
711 guarantee(last != nullptr, "last may not be null");
712 assert(last->pc_offset() == pc_offset, "oopmap not found");
713 return last->get_from(this);
714 }
715
716 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap)
717 : _count(oopmap->count()), _num_oops(oopmap->num_oops()) {
718 _num_oops = oopmap->num_oops();
719 _has_derived_oops = oopmap->has_derived_oops();
720 address addr = data_addr();
721 oopmap->copy_and_sort_data_to(addr);
722 }
723
724 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const {
725 for (OopMapStream oms(this); !oms.is_done(); oms.next()) {
726 if (oms.current().type() == type) {
727 return true;
728 }
729 }
730 return false;
731 }
732
733 #ifdef ASSERT
734 int ImmutableOopMap::nr_of_bytes() const {
735 OopMapStream oms(this);
736
737 while (!oms.is_done()) {
738 oms.next();
739 }
740 return sizeof(ImmutableOopMap) + oms.stream_position();
741 }
742 #endif
743
744 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) {
745 _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
746 }
747
748 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
749 return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
750 }
751
752 int ImmutableOopMapBuilder::heap_size() {
753 int base = sizeof(ImmutableOopMapSet);
754 base = align_up(base, 8);
755
756 // all of ours pc / offset pairs
757 int pairs = _set->size() * sizeof(ImmutableOopMapPair);
758 pairs = align_up(pairs, 8);
759
760 for (int i = 0; i < _set->size(); ++i) {
761 int size = 0;
762 OopMap* map = _set->at(i);
763
764 if (is_empty(map)) {
765 /* only keep a single empty map in the set */
766 if (has_empty()) {
767 _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
768 } else {
769 _empty_offset = _offset;
770 _empty = map;
771 size = size_for(map);
772 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
773 }
774 } else if (is_last_duplicate(map)) {
775 /* if this entry is identical to the previous one, just point it there */
776 _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
777 } else {
778 /* not empty, not an identical copy of the previous entry */
779 size = size_for(map);
780 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
781 _last_offset = _offset;
782 _last = map;
783 }
784
785 assert(_mapping[i]._map == map, "check");
786 _offset += size;
787 }
788
789 int total = base + pairs + _offset;
790 DEBUG_ONLY(total += 8);
791 _required = total;
792 return total;
793 }
794
795 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
796 assert(offset < set->nr_of_bytes(), "check");
797 new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
798 }
799
800 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
801 fill_pair(pair, map, offset, set);
802 address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
803
804 new (addr) ImmutableOopMap(map);
805 return size_for(map);
806 }
807
808 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
809 ImmutableOopMapPair* pairs = set->get_pairs();
810
811 for (int i = 0; i < set->count(); ++i) {
812 const OopMap* map = _mapping[i]._map;
813 ImmutableOopMapPair* pair = nullptr;
814 int size = 0;
815
816 if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
817 size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
818 } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
819 fill_pair(&pairs[i], map, _mapping[i]._offset, set);
820 }
821
822 //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
823 //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
824 }
825 }
826
827 #ifdef ASSERT
828 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
829 for (int i = 0; i < 8; ++i) {
830 assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
831 }
832
833 for (int i = 0; i < set->count(); ++i) {
834 const ImmutableOopMapPair* pair = set->pair_at(i);
835 assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
836 const ImmutableOopMap* map = pair->get_from(set);
837 int nr_of_bytes = map->nr_of_bytes();
838 assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
839 }
840 }
841 #endif
842
843 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
844 DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
845
846 _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
847 fill(_new_set, _required);
848
849 DEBUG_ONLY(verify(buffer, _required, _new_set));
850
851 return _new_set;
852 }
853
854 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
855 _required = heap_size();
856
857 // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
858 address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
859 return generate_into(buffer);
860 }
861
862 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
863 ResourceMark mark;
864 ImmutableOopMapBuilder builder(oopmap_set);
865 return builder.build();
866 }
867
868 void ImmutableOopMapSet::operator delete(void* p) {
869 FREE_C_HEAP_ARRAY(unsigned char, p);
870 }
871
872 //------------------------------DerivedPointerTable---------------------------
873
874 #if COMPILER2_OR_JVMCI
875
876 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
877 derived_pointer* _location; // Location of derived pointer, also pointing to base
878 intptr_t _offset; // Offset from base pointer
879 Entry* volatile _next;
880
881 static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
882
883 public:
884 Entry(derived_pointer* location, intptr_t offset) :
885 _location(location), _offset(offset), _next(nullptr) {}
886
887 derived_pointer* location() const { return _location; }
888 intptr_t offset() const { return _offset; }
889 Entry* next() const { return _next; }
890
891 typedef LockFreeStack<Entry, &next_ptr> List;
892 static List* _list;
893 };
894
895 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr;
896 bool DerivedPointerTable::_active = false;
897
898 bool DerivedPointerTable::is_empty() {
899 return Entry::_list == nullptr || Entry::_list->empty();
900 }
901
902 void DerivedPointerTable::clear() {
903 // The first time, we create the list. Otherwise it should be
904 // empty. If not, then we have probably forgotton to call
905 // update_pointers after last GC/Scavenge.
906 assert (!_active, "should not be active");
907 assert(is_empty(), "table not empty");
908 if (Entry::_list == nullptr) {
909 void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
910 Entry::_list = ::new (mem) Entry::List();
911 }
912 _active = true;
913 }
914
915 void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) {
916 assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop");
917 assert(derived_loc != (void*)base_loc, "Base and derived in same location");
918 derived_pointer base_loc_as_derived_pointer =
919 static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
920 assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
921 assert(Entry::_list != nullptr, "list must exist");
922 assert(is_active(), "table must be active here");
923 intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc));
924 // This assert is invalid because derived pointers can be
925 // arbitrarily far away from their base.
926 // assert(offset >= -1000000, "wrong derived pointer info");
927
928 if (TraceDerivedPointers) {
929 tty->print_cr(
930 "Add derived pointer@" INTPTR_FORMAT
931 " - Derived: " INTPTR_FORMAT
932 " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: %zd)",
933 p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset
934 );
935 }
936 // Set derived oop location to point to base.
937 *derived_loc = base_loc_as_derived_pointer;
938 Entry* entry = new Entry(derived_loc, offset);
939 Entry::_list->push(*entry);
940 }
941
942 void DerivedPointerTable::update_pointers() {
943 assert(Entry::_list != nullptr, "list must exist");
944 Entry* entries = Entry::_list->pop_all();
945 while (entries != nullptr) {
946 Entry* entry = entries;
947 entries = entry->next();
948 derived_pointer* derived_loc = entry->location();
949 intptr_t offset = entry->offset();
950 // The derived oop was setup to point to location of base
951 oop base = **reinterpret_cast<oop**>(derived_loc);
952 assert(Universe::heap()->is_in_or_null(base), "must be an oop");
953
954 derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base));
955 *derived_loc = derived_base + offset;
956 assert(*derived_loc - derived_base == offset, "sanity check");
957
958 // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0);
959
960 if (TraceDerivedPointers) {
961 tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
962 " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: %zd)",
963 p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
964 }
965
966 // Delete entry
967 delete entry;
968 }
969 assert(Entry::_list->empty(), "invariant");
970 _active = false;
971 }
972
973 #endif // COMPILER2_OR_JVMCI
--- EOF ---