1 /*
2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "oops/arrayOop.hpp"
34 #include "oops/markWord.hpp"
35 #include "runtime/basicLock.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39
40 void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
41 FloatRegister f0, FloatRegister f1,
42 Register result)
43 {
44 Label done;
45 if (is_float) {
46 fcmps(f0, f1);
47 } else {
48 fcmpd(f0, f1);
49 }
50 if (unordered_result < 0) {
51 // we want -1 for unordered or less than, 0 for equal and 1 for
52 // greater than.
53 cset(result, NE); // Not equal or unordered
54 cneg(result, result, LT); // Less than or unordered
55 } else {
56 // we want -1 for less than, 0 for equal and 1 for unordered or
57 // greater than.
58 cset(result, NE); // Not equal or unordered
59 cneg(result, result, LO); // Less than
60 }
61 }
62
63 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
64 const int aligned_mask = BytesPerWord -1;
65 const int hdr_offset = oopDesc::mark_offset_in_bytes();
66 assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
67 int null_check_offset = -1;
68
69 verify_oop(obj);
70
71 // save object being locked into the BasicObjectLock
72 str(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
73
74 null_check_offset = offset();
75
76 if (DiagnoseSyncOnValueBasedClasses != 0) {
77 load_klass(hdr, obj);
78 ldrw(hdr, Address(hdr, Klass::access_flags_offset()));
79 tstw(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
80 br(Assembler::NE, slow_case);
81 }
82
83 if (LockingMode == LM_LIGHTWEIGHT) {
84 lightweight_lock(obj, hdr, temp, rscratch2, slow_case);
85 } else if (LockingMode == LM_LEGACY) {
86 Label done;
87 // Load object header
88 ldr(hdr, Address(obj, hdr_offset));
89 // and mark it as unlocked
90 orr(hdr, hdr, markWord::unlocked_value);
91 // save unlocked object header into the displaced header location on the stack
92 str(hdr, Address(disp_hdr, 0));
93 // test if object header is still the same (i.e. unlocked), and if so, store the
94 // displaced header address in the object header - if it is not the same, get the
95 // object header instead
96 lea(rscratch2, Address(obj, hdr_offset));
97 cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/nullptr);
98 // if the object header was the same, we're done
99 // if the object header was not the same, it is now in the hdr register
100 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
101 //
102 // 1) (hdr & aligned_mask) == 0
103 // 2) sp <= hdr
104 // 3) hdr <= sp + page_size
105 //
106 // these 3 tests can be done by evaluating the following expression:
107 //
108 // (hdr - sp) & (aligned_mask - page_size)
109 //
110 // assuming both the stack pointer and page_size have their least
111 // significant 2 bits cleared and page_size is a power of 2
112 mov(rscratch1, sp);
113 sub(hdr, hdr, rscratch1);
114 ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
115 // for recursive locking, the result is zero => save it in the displaced header
116 // location (null in the displaced hdr location indicates recursive locking)
117 str(hdr, Address(disp_hdr, 0));
118 // otherwise we don't care about the result and handle locking via runtime call
119 cbnz(hdr, slow_case);
120 // done
121 bind(done);
122 }
123 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
124 return null_check_offset;
125 }
126
127
128 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
129 const int aligned_mask = BytesPerWord -1;
130 const int hdr_offset = oopDesc::mark_offset_in_bytes();
131 assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
132 Label done;
133
134 if (LockingMode != LM_LIGHTWEIGHT) {
135 // load displaced header
136 ldr(hdr, Address(disp_hdr, 0));
137 // if the loaded hdr is null we had recursive locking
138 // if we had recursive locking, we are done
139 cbz(hdr, done);
140 }
141
142 // load object
143 ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
144 verify_oop(obj);
145
146 if (LockingMode == LM_LIGHTWEIGHT) {
147 lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
148 } else if (LockingMode == LM_LEGACY) {
149 // test if object header is pointing to the displaced header, and if so, restore
150 // the displaced header in the object - if the object header is not pointing to
151 // the displaced header, get the object header instead
152 // if the object header was not pointing to the displaced header,
153 // we do unlocking via runtime call
154 if (hdr_offset) {
155 lea(rscratch1, Address(obj, hdr_offset));
156 cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, done, &slow_case);
157 } else {
158 cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
159 }
160 // done
161 bind(done);
162 }
163 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
164 }
165
166
167 // Defines obj, preserves var_size_in_bytes
168 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
169 if (UseTLAB) {
170 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
171 } else {
172 b(slow_case);
173 }
174 }
175
176 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
177 assert_different_registers(obj, klass, len);
178 if (UseCompactObjectHeaders) {
179 ldr(t1, Address(klass, Klass::prototype_header_offset()));
180 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
181 } else {
182 // This assumes that all prototype bits fit in an int32_t
183 mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
184 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
185
186 if (UseCompressedClassPointers) { // Take care not to kill klass
187 encode_klass_not_null(t1, klass);
188 strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
189 } else {
190 str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
191 }
192 }
193
194 if (len->is_valid()) {
195 strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
196 } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
197 store_klass_gap(obj, zr);
198 }
199 }
200
201 // preserves obj, destroys len_in_bytes
202 //
203 // Scratch registers: t1 = r10, t2 = r11
204 //
205 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1, Register t2) {
206 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
207 assert(t1 == r10 && t2 == r11, "must be");
208
209 Label done;
210
211 // len_in_bytes is positive and ptr sized
212 subs(len_in_bytes, len_in_bytes, hdr_size_in_bytes);
213 br(Assembler::EQ, done);
214
215 // zero_words() takes ptr in r10 and count in words in r11
216 mov(rscratch1, len_in_bytes);
217 lea(t1, Address(obj, hdr_size_in_bytes));
218 lsr(t2, rscratch1, LogBytesPerWord);
219 address tpc = zero_words(t1, t2);
220
221 bind(done);
222 if (tpc == nullptr) {
223 Compilation::current()->bailout("no space for trampoline stub");
224 }
225 }
226
227
228 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
229 assert_different_registers(obj, t1, t2); // XXX really?
230 assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
231
232 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
233
234 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB);
235 }
236
237 // Scratch registers: t1 = r10, t2 = r11
238 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) {
239 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
240 "con_size_in_bytes is not multiple of alignment");
241 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
242
243 initialize_header(obj, klass, noreg, t1, t2);
244
245 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
246 // clear rest of allocated space
247 const Register index = t2;
248 if (var_size_in_bytes != noreg) {
249 mov(index, var_size_in_bytes);
250 initialize_body(obj, index, hdr_size_in_bytes, t1, t2);
251 if (Compilation::current()->bailed_out()) {
252 return;
253 }
254 } else if (con_size_in_bytes > hdr_size_in_bytes) {
255 con_size_in_bytes -= hdr_size_in_bytes;
256 lea(t1, Address(obj, hdr_size_in_bytes));
257 address tpc = zero_words(t1, con_size_in_bytes / BytesPerWord);
258 if (tpc == nullptr) {
259 Compilation::current()->bailout("no space for trampoline stub");
260 return;
261 }
262 }
263 }
264
265 membar(StoreStore);
266
267 if (CURRENT_ENV->dtrace_alloc_probes()) {
268 assert(obj == r0, "must be");
269 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
270 }
271
272 verify_oop(obj);
273 }
274 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) {
275 assert_different_registers(obj, len, t1, t2, klass);
276
277 // determine alignment mask
278 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
279
280 // check for negative or excessive length
281 mov(rscratch1, (int32_t)max_array_allocation_length);
282 cmp(len, rscratch1);
283 br(Assembler::HS, slow_case);
284
285 const Register arr_size = t2; // okay to be the same
286 // align object end
287 mov(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask);
288 add(arr_size, arr_size, len, ext::uxtw, f);
289 andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
290
291 try_allocate(obj, arr_size, 0, t1, t2, slow_case);
292
293 initialize_header(obj, klass, len, t1, t2);
294
295 // Clear leading 4 bytes, if necessary.
296 // TODO: This could perhaps go into initialize_body() and also clear the leading 4 bytes
297 // for non-array objects, thereby replacing the klass-gap clearing code in initialize_header().
298 int base_offset = base_offset_in_bytes;
299 if (!is_aligned(base_offset, BytesPerWord)) {
300 assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
301 strw(zr, Address(obj, base_offset));
302 base_offset += BytesPerInt;
303 }
304 assert(is_aligned(base_offset, BytesPerWord), "must be word-aligned");
305
306 // clear rest of allocated space
307 initialize_body(obj, arr_size, base_offset, t1, t2);
308 if (Compilation::current()->bailed_out()) {
309 return;
310 }
311
312 membar(StoreStore);
313
314 if (CURRENT_ENV->dtrace_alloc_probes()) {
315 assert(obj == r0, "must be");
316 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
317 }
318
319 verify_oop(obj);
320 }
321
322
323 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
324 verify_oop(receiver);
325 // explicit null check not needed since load from [klass_offset] causes a trap
326 // check against inline cache. This is checked in Universe::genesis()..
327 cmp_klass(receiver, iCache, rscratch1);
328 }
329
330
331 void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
332 assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
333 // Make sure there is enough stack space for this method's activation.
334 // Note that we do this before creating a frame.
335 generate_stack_overflow_check(bang_size_in_bytes);
336 MacroAssembler::build_frame(framesize);
337
338 // Insert nmethod entry barrier into frame.
339 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
340 bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
341 }
342
343 void C1_MacroAssembler::remove_frame(int framesize) {
344 MacroAssembler::remove_frame(framesize);
345 }
346
347
348 void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
349 // If we have to make this method not-entrant we'll overwrite its
350 // first instruction with a jump. For this action to be legal we
351 // must ensure that this first instruction is a B, BL, NOP, BKPT,
352 // SVC, HVC, or SMC. Make it a NOP.
353 nop();
354 }
355
356 void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
357 // rfp, + 0: link
358 // + 1: return address
359 // + 2: argument with offset 0
360 // + 3: argument with offset 1
361 // + 4: ...
362
363 ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
364 }
365
366 #ifndef PRODUCT
367
368 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
369 if (!VerifyOops) return;
370 verify_oop_addr(Address(sp, stack_offset));
371 }
372
373 void C1_MacroAssembler::verify_not_null_oop(Register r) {
374 if (!VerifyOops) return;
375 Label not_null;
376 cbnz(r, not_null);
377 stop("non-null oop required");
378 bind(not_null);
379 verify_oop(r);
380 }
381
382 void C1_MacroAssembler::invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) {
383 #ifdef ASSERT
384 static int nn;
385 if (inv_r0) mov(r0, 0xDEAD);
386 if (inv_r19) mov(r19, 0xDEAD);
387 if (inv_r2) mov(r2, nn++);
388 if (inv_r3) mov(r3, 0xDEAD);
389 if (inv_r4) mov(r4, 0xDEAD);
390 if (inv_r5) mov(r5, 0xDEAD);
391 #endif
392 }
393 #endif // ifndef PRODUCT