1 /*
2 * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_MacroAssembler.hpp"
27 #include "c1/c1_Runtime1.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/tlab_globals.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/markWord.hpp"
33 #include "runtime/basicLock.hpp"
34 #include "runtime/os.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/powerOfTwo.hpp"
38
39 // Note: Rtemp usage is this file should not impact C2 and should be
40 // correct as long as it is not implicitly used in lower layers (the
41 // arm [macro]assembler) and used with care in the other C1 specific
42 // files.
43
44 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
45 Label verified;
46 load_klass(Rtemp, receiver);
47 cmp(Rtemp, iCache);
48 b(verified, eq); // jump over alignment no-ops
49 jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
50 align(CodeEntryAlignment);
51 bind(verified);
52 }
53
54 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
55 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
56 assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
57
58
59 arm_stack_overflow_check(bang_size_in_bytes, Rtemp);
60
61 // FP can no longer be used to memorize SP. It may be modified
62 // if this method contains a methodHandle call site
63 raw_push(FP, LR);
64 sub_slow(SP, SP, frame_size_in_bytes);
65 }
66
67 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
68 add_slow(SP, SP, frame_size_in_bytes);
69 raw_pop(FP, LR);
70 }
71
72 void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
73 if (breakAtEntry) {
74 breakpoint();
75 }
76 }
77
78 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
79 void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
80 RegisterOrConstant size_expression, Label& slow_case) {
81 if (UseTLAB) {
82 tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case);
83 } else {
84 eden_allocate(obj, obj_end, tmp1, tmp2, size_expression, slow_case);
85 }
86 }
87
88
89 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) {
90 assert_different_registers(obj, klass, len, tmp);
91
92 mov(tmp, (intptr_t)markWord::prototype().value());
93
94 str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
95 str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
96
97 if (len->is_valid()) {
98 str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
99 }
100 }
101
102
103 // Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers.
104 void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) {
105 zero_memory(base, obj_end, tmp);
106 }
107
108
109 void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass,
110 Register len, Register tmp1, Register tmp2,
111 RegisterOrConstant header_size, int obj_size_in_bytes,
112 bool is_tlab_allocated)
113 {
114 assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2);
115 initialize_header(obj, klass, len, tmp1);
116
117 const Register ptr = tmp2;
118
119 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
120 if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) {
121 mov(tmp1, 0);
122 const int base = instanceOopDesc::header_size() * HeapWordSize;
123 for (int i = base; i < obj_size_in_bytes; i += wordSize) {
124 str(tmp1, Address(obj, i));
125 }
126 } else {
127 assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption");
128 add(ptr, obj, header_size);
129 initialize_body(ptr, obj_end, tmp1);
130 }
131 }
132
133 // StoreStore barrier required after complete initialization
134 // (headers + content zeroing), before the object may escape.
135 membar(MacroAssembler::StoreStore, tmp1);
136 }
137
138 void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3,
139 int header_size, int object_size,
140 Register klass, Label& slow_case) {
141 assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp);
142 assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
143 const int object_size_in_bytes = object_size * BytesPerWord;
144
145 const Register obj_end = tmp1;
146 const Register len = noreg;
147
148 if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) {
149 try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case);
150 } else {
151 // Rtemp should be free at c1 LIR level
152 mov_slow(Rtemp, object_size_in_bytes);
153 try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case);
154 }
155 initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB);
156 }
157
158 void C1_MacroAssembler::allocate_array(Register obj, Register len,
159 Register tmp1, Register tmp2, Register tmp3,
160 int header_size_in_bytes, int element_size,
161 Register klass, Label& slow_case) {
162 assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp);
163 const int scale_shift = exact_log2(element_size);
164 const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level
165
166 cmp_32(len, max_array_allocation_length);
167 b(slow_case, hs);
168
169 bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0;
170 assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be");
171 assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be");
172
173 mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0));
174 add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift);
175
176 if (align_header) {
177 align_reg(obj_size, obj_size, MinObjAlignmentInBytes);
178 }
179
180 try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case);
181 initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB);
182 }
183
184 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
185 Label done, fast_lock, fast_lock_done;
186 int null_check_offset = 0;
187
188 const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
189 assert_different_registers(hdr, obj, disp_hdr, tmp2);
190
191 assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code");
192 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
193 const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
194
195 str(obj, Address(disp_hdr, obj_offset));
196
197 null_check_offset = offset();
198
199 if (DiagnoseSyncOnValueBasedClasses != 0) {
200 load_klass(tmp2, obj);
201 ldr_u32(tmp2, Address(tmp2, Klass::access_flags_offset()));
202 tst(tmp2, JVM_ACC_IS_VALUE_BASED_CLASS);
203 b(slow_case, ne);
204 }
205
206 assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
207
208 // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
209 // That would be acceptable as ether CAS or slow case path is taken in that case.
210
211 // Must be the first instruction here, because implicit null check relies on it
212 ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
213
214 tst(hdr, markWord::unlocked_value);
215 b(fast_lock, ne);
216
217 // Check for recursive locking
218 // See comments in InterpreterMacroAssembler::lock_object for
219 // explanations on the fast recursive locking check.
220 // -1- test low 2 bits
221 movs(tmp2, AsmOperand(hdr, lsl, 30));
222 // -2- test (hdr - SP) if the low two bits are 0
223 sub(tmp2, hdr, SP, eq);
224 movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);
225 // If still 'eq' then recursive locking OK
226 // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
227 str(tmp2, Address(disp_hdr, mark_offset));
228 b(fast_lock_done, eq);
229 // else need slow case
230 b(slow_case);
231
232
233 bind(fast_lock);
234 // Save previous object header in BasicLock structure and update the header
235 str(hdr, Address(disp_hdr, mark_offset));
236
237 cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
238
239 bind(fast_lock_done);
240 bind(done);
241
242 return null_check_offset;
243 }
244
245 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
246 assert_different_registers(hdr, obj, disp_hdr, Rtemp);
247 Register tmp2 = Rtemp;
248
249 assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code");
250 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
251 const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
252
253 Label done;
254
255 assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
256
257 // Load displaced header and object from the lock
258 ldr(hdr, Address(disp_hdr, mark_offset));
259 // If hdr is NULL, we've got recursive locking and there's nothing more to do
260 cbz(hdr, done);
261
262 // load object
263 ldr(obj, Address(disp_hdr, obj_offset));
264
265 // Restore the object header
266 cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
267
268 bind(done);
269 }
270
271
272 #ifndef PRODUCT
273
274 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
275 if (!VerifyOops) return;
276 verify_oop_addr(Address(SP, stack_offset));
277 }
278
279 void C1_MacroAssembler::verify_not_null_oop(Register r) {
280 Label not_null;
281 cbnz(r, not_null);
282 stop("non-null oop required");
283 bind(not_null);
284 if (!VerifyOops) return;
285 verify_oop(r);
286 }
287
288 #endif // !PRODUCT
--- EOF ---