1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/debugInfo.hpp"
26 #include "oops/access.hpp"
27 #include "oops/compressedOops.inline.hpp"
28 #include "oops/oop.hpp"
29 #include "runtime/frame.inline.hpp"
30 #include "runtime/globals.hpp"
31 #include "runtime/handles.inline.hpp"
32 #include "runtime/stackValue.hpp"
33 #if INCLUDE_SHENANDOAHGC
34 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
35 #endif
36
37 class RegisterMap;
38 class SmallRegisterMap;
39
40
41 static oop oop_from_oop_location(stackChunkOop chunk, void* addr) {
42 if (addr == nullptr) {
43 return nullptr;
44 }
45
46 if (UseCompressedOops) {
47 // When compressed oops is enabled, an oop location may
48 // contain narrow oop values - we deal with that here
49
50 if (chunk != nullptr && chunk->has_bitmap()) {
51 // Transformed stack chunk with narrow oops
52 return chunk->load_oop((narrowOop*)addr);
53 }
54
55 #ifdef _LP64
56 if (CompressedOops::is_base(*(void**)addr)) {
57 // Compiled code may produce decoded oop = narrow_oop_base
58 // when a narrow oop implicit null check is used.
59 // The narrow_oop_base could be null or be the address
60 // of the page below heap. Use null value for both cases.
61 return nullptr;
62 }
63 #endif
64 }
65
66 if (chunk != nullptr) {
67 // Load oop from chunk
68 return chunk->load_oop((oop*)addr);
69 }
70
71 // Load oop from stack
72 oop val = *(oop*)addr;
73
74 #if INCLUDE_SHENANDOAHGC
75 if (UseShenandoahGC) {
76 // Pass the value through the barrier to avoid capturing bad oops as
77 // stack values. Note: do not heal the location, to avoid accidentally
78 // corrupting the stack. Stack watermark barriers are supposed to handle
79 // the healing.
80 val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val);
81 }
82 #endif
83
84 return val;
85 }
86
87 static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) {
88 assert(UseCompressedOops, "Narrow oops should not exist");
89 assert(addr != nullptr, "Not expecting null address");
90 narrowOop* narrow_addr;
91 if (is_register) {
92 // The callee has no clue whether the register holds an int,
93 // long or is unused. He always saves a long. Here we know
94 // a long was saved, but we only want an int back. Narrow the
95 // saved long to the int that the JVM wants. We can't just
96 // use narrow_oop_cast directly, because we don't know what
97 // the high bits of the value might be.
98 narrow_addr = ((narrowOop*)addr) BIG_ENDIAN_ONLY(+ 1);
99 } else {
100 narrow_addr = (narrowOop*)addr;
101 }
102
103 if (chunk != nullptr) {
104 // Load oop from chunk
105 return chunk->load_oop(narrow_addr);
106 }
107
108 // Load oop from stack
109 oop val = CompressedOops::decode(*narrow_addr);
110
111 #if INCLUDE_SHENANDOAHGC
112 if (UseShenandoahGC) {
113 // Pass the value through the barrier to avoid capturing bad oops as
114 // stack values. Note: do not heal the location, to avoid accidentally
115 // corrupting the stack. Stack watermark barriers are supposed to handle
116 // the healing.
117 val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val);
118 }
119 #endif
120
121 return val;
122 }
123
124 StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk, void* addr) {
125 oop val = oop_from_oop_location(chunk, addr);
126 assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
127 p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops);
128 Handle h(Thread::current(), val); // Wrap a handle around the oop
129 return new StackValue(h);
130 }
131
132 StackValue* StackValue::create_stack_value_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) {
133 oop val = oop_from_narrowOop_location(chunk, addr, is_register);
134 assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
135 p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops);
136 Handle h(Thread::current(), val); // Wrap a handle around the oop
137 return new StackValue(h);
138 }
139
140
141 template StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv);
142 template StackValue* StackValue::create_stack_value(const frame* fr, const SmallRegisterMap* reg_map, ScopeValue* sv);
143
144 template<typename RegisterMapT>
145 StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMapT* reg_map, ScopeValue* sv) {
146 address value_addr = stack_value_address(fr, reg_map, sv);
147 stackChunkOop chunk = reg_map->stack_chunk()();
148 if (sv->is_location()) {
149 // Stack or register value
150 Location loc = ((LocationValue *)sv)->location();
151
152 // Then package it right depending on type
153 // Note: the transfer of the data is thru a union that contains
154 // an intptr_t. This is because an interpreter stack slot is
155 // really an intptr_t. The use of a union containing an intptr_t
156 // ensures that on a 64 bit platform we have proper alignment
157 // and that we store the value where the interpreter will expect
158 // to find it (i.e. proper endian). Similarly on a 32bit platform
159 // using the intptr_t ensures that when a value is larger than
160 // a stack slot (jlong/jdouble) that we capture the proper part
161 // of the value for the stack slot in question.
162 //
163 switch( loc.type() ) {
164 case Location::float_in_dbl: { // Holds a float in a double register?
165 // The callee has no clue whether the register holds a float,
166 // double or is unused. He always saves a double. Here we know
167 // a double was saved, but we only want a float back. Narrow the
168 // saved double to the float that the JVM wants.
169 assert( loc.is_register(), "floats always saved to stack in 1 word" );
170 union { intptr_t p; jfloat jf; } value;
171 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
172 value.jf = (jfloat) *(jdouble*) value_addr;
173 return new StackValue(value.p); // 64-bit high half is stack junk
174 }
175 case Location::int_in_long: { // Holds an int in a long register?
176 // The callee has no clue whether the register holds an int,
177 // long or is unused. He always saves a long. Here we know
178 // a long was saved, but we only want an int back. Narrow the
179 // saved long to the int that the JVM wants.
180 assert( loc.is_register(), "ints always saved to stack in 1 word" );
181 union { intptr_t p; jint ji;} value;
182 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
183 value.ji = (jint) *(jlong*) value_addr;
184 return new StackValue(value.p); // 64-bit high half is stack junk
185 }
186 #ifdef _LP64
187 case Location::dbl:
188 // Double value in an aligned adjacent pair
189 return new StackValue(*(intptr_t*)value_addr);
190 case Location::lng:
191 // Long value in an aligned adjacent pair
192 return new StackValue(*(intptr_t*)value_addr);
193 case Location::narrowoop:
194 return create_stack_value_from_narrowOop_location(reg_map->stack_chunk()(), (void*)value_addr, loc.is_register());
195 #endif
196 case Location::oop:
197 return create_stack_value_from_oop_location(reg_map->stack_chunk()(), (void*)value_addr);
198 case Location::addr: {
199 loc.print_on(tty);
200 ShouldNotReachHere(); // both C1 and C2 now inline jsrs
201 }
202 case Location::normal: {
203 // Just copy all other bits straight through
204 union { intptr_t p; jint ji;} value;
205 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
206 value.ji = *(jint*)value_addr;
207 return new StackValue(value.p);
208 }
209 case Location::invalid: {
210 return new StackValue();
211 }
212 case Location::vector: {
213 loc.print_on(tty);
214 ShouldNotReachHere(); // should be handled by VectorSupport::allocate_vector()
215 }
216 default:
217 loc.print_on(tty);
218 ShouldNotReachHere();
219 }
220
221 } else if (sv->is_constant_int()) {
222 // Constant int: treat same as register int.
223 union { intptr_t p; jint ji;} value;
224 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
225 value.ji = (jint)((ConstantIntValue*)sv)->value();
226 return new StackValue(value.p);
227 } else if (sv->is_constant_oop()) {
228 // constant oop
229 return new StackValue(sv->as_ConstantOopReadValue()->value());
230 #ifdef _LP64
231 } else if (sv->is_constant_double()) {
232 // Constant double in a single stack slot
233 union { intptr_t p; double d; } value;
234 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
235 value.d = ((ConstantDoubleValue *)sv)->value();
236 return new StackValue(value.p);
237 } else if (sv->is_constant_long()) {
238 // Constant long in a single stack slot
239 union { intptr_t p; jlong jl; } value;
240 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
241 value.jl = ((ConstantLongValue *)sv)->value();
242 return new StackValue(value.p);
243 #endif
244 } else if (sv->is_object()) { // Scalar replaced object in compiled frame
245 ObjectValue* ov = (ObjectValue *)sv;
246 Handle hdl = ov->value();
247 bool scalar_replaced = hdl.is_null() && ov->is_scalar_replaced();
248 if (ov->has_properties()) {
249 Klass* k = java_lang_Class::as_Klass(ov->klass()->as_ConstantOopReadValue()->value()());
250 if (!k->is_array_klass()) {
251 // Don't treat inline type as scalar replaced if it is null
252 jint null_marker = StackValue::create_stack_value(fr, reg_map, ov->properties())->get_jint();
253 scalar_replaced &= (null_marker != 0);
254 }
255 }
256 return new StackValue(hdl, scalar_replaced ? 1 : 0);
257 } else if (sv->is_marker()) {
258 // Should never need to directly construct a marker.
259 ShouldNotReachHere();
260 }
261 // Unknown ScopeValue type
262 ShouldNotReachHere();
263 return new StackValue((intptr_t) 0); // dummy
264 }
265
266 template address StackValue::stack_value_address(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv);
267 template address StackValue::stack_value_address(const frame* fr, const SmallRegisterMap* reg_map, ScopeValue* sv);
268
269 template<typename RegisterMapT>
270 address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg_map, ScopeValue* sv) {
271 if (!sv->is_location()) {
272 return nullptr;
273 }
274 Location loc = ((LocationValue *)sv)->location();
275 if (loc.type() == Location::invalid) {
276 return nullptr;
277 }
278
279 if (!reg_map->in_cont()) {
280 address value_addr = loc.is_register()
281 // Value was in a callee-save register
282 ? reg_map->location(VMRegImpl::as_VMReg(loc.register_number()), fr->sp())
283 // Else value was directly saved on the stack. The frame's original stack pointer,
284 // before any extension by its callee (due to Compiler1 linkage on SPARC), must be used.
285 : ((address)fr->unextended_sp()) + loc.stack_offset();
286
287 assert(value_addr == nullptr || reg_map->thread() == nullptr || reg_map->thread()->is_in_usable_stack(value_addr), INTPTR_FORMAT, p2i(value_addr));
288 return value_addr;
289 }
290
291 address value_addr = loc.is_register()
292 ? reg_map->as_RegisterMap()->stack_chunk()->reg_to_location(*fr, reg_map->as_RegisterMap(), VMRegImpl::as_VMReg(loc.register_number()))
293 : reg_map->as_RegisterMap()->stack_chunk()->usp_offset_to_location(*fr, loc.stack_offset());
294
295 assert(value_addr == nullptr || Continuation::is_in_usable_stack(value_addr, reg_map->as_RegisterMap()) || (reg_map->thread() != nullptr && reg_map->thread()->is_in_usable_stack(value_addr)), INTPTR_FORMAT, p2i(value_addr));
296 return value_addr;
297 }
298
299 BasicLock* StackValue::resolve_monitor_lock(const frame& fr, Location location) {
300 assert(location.is_stack(), "for now we only look at the stack");
301 int word_offset = location.stack_offset() / wordSize;
302 // (stack picture)
303 // high: [ ] word_offset + 1
304 // low [ ] word_offset
305 //
306 // sp-> [ ] 0
307 // the word_offset is the distance from the stack pointer to the lowest address
308 // The frame's original stack pointer, before any extension by its callee
309 // (due to Compiler1 linkage on SPARC), must be used.
310 return (BasicLock*) (fr.unextended_sp() + word_offset);
311 }
312
313
314 #ifndef PRODUCT
315
316 void StackValue::print_on(outputStream* st) const {
317 switch(_type) {
318 case T_INT:
319 st->print("%d (int) %f (float) %x (hex)", *(int *)&_integer_value, *(float *)&_integer_value, *(int *)&_integer_value);
320 break;
321
322 case T_OBJECT:
323 if (_handle_value() != nullptr) {
324 _handle_value()->print_value_on(st);
325 } else {
326 st->print("null");
327 }
328 st->print(" <" INTPTR_FORMAT ">", p2i(_handle_value()));
329 break;
330
331 case T_CONFLICT:
332 st->print("conflict");
333 break;
334
335 default:
336 ShouldNotReachHere();
337 }
338 }
339
340 #endif