1 /*
  2  * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_MacroAssembler.hpp"
 27 #include "c1/c1_Runtime1.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/tlab_globals.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "logging/log.hpp"
 34 #include "oops/arrayOop.hpp"
 35 #include "oops/markWord.hpp"
 36 #include "runtime/basicLock.hpp"
 37 #include "runtime/os.hpp"
 38 #include "runtime/sharedRuntime.hpp"
 39 #include "runtime/stubRoutines.hpp"
 40 #include "utilities/powerOfTwo.hpp"
 41 
 42 // Note: Rtemp usage is this file should not impact C2 and should be
 43 // correct as long as it is not implicitly used in lower layers (the
 44 // arm [macro]assembler) and used with care in the other C1 specific
 45 // files.
 46 
 47 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
 48   Label verified;
 49   load_klass(Rtemp, receiver);
 50   cmp(Rtemp, iCache);
 51   b(verified, eq); // jump over alignment no-ops
 52   jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
 53   align(CodeEntryAlignment);
 54   bind(verified);
 55 }
 56 
 57 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
 58   assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
 59   assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
 60 
 61 
 62   arm_stack_overflow_check(bang_size_in_bytes, Rtemp);
 63 
 64   // FP can no longer be used to memorize SP. It may be modified
 65   // if this method contains a methodHandle call site
 66   raw_push(FP, LR);
 67   sub_slow(SP, SP, frame_size_in_bytes);
 68 
 69   // Insert nmethod entry barrier into frame.
 70   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 71   bs->nmethod_entry_barrier(this);
 72 }
 73 
 74 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
 75   add_slow(SP, SP, frame_size_in_bytes);
 76   raw_pop(FP, LR);
 77 }
 78 
 79 void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
 80   if (breakAtEntry) {
 81     breakpoint();
 82   }
 83 }
 84 
 85 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
 86 void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
 87                                      RegisterOrConstant size_expression, Label& slow_case) {
 88   if (UseTLAB) {
 89     tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case);
 90   } else {
 91     b(slow_case);
 92   }
 93 }
 94 
 95 
 96 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) {
 97   assert_different_registers(obj, klass, len, tmp);
 98 
 99   mov(tmp, (intptr_t)markWord::prototype().value());
100 
101   str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
102   str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
103 
104   if (len->is_valid()) {
105     str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
106   }
107 }
108 
109 
110 // Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers.
111 void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) {
112   zero_memory(base, obj_end, tmp);
113 }
114 
115 
116 void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass,
117                                           Register len, Register tmp1, Register tmp2,
118                                           RegisterOrConstant header_size, int obj_size_in_bytes,
119                                           bool is_tlab_allocated)
120 {
121   assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2);
122   initialize_header(obj, klass, len, tmp1);
123 
124   const Register ptr = tmp2;
125 
126   if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
127     if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) {
128       mov(tmp1, 0);
129       const int base = instanceOopDesc::header_size() * HeapWordSize;
130       for (int i = base; i < obj_size_in_bytes; i += wordSize) {
131         str(tmp1, Address(obj, i));
132       }
133     } else {
134       assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption");
135       add(ptr, obj, header_size);
136       initialize_body(ptr, obj_end, tmp1);
137     }
138   }
139 
140   // StoreStore barrier required after complete initialization
141   // (headers + content zeroing), before the object may escape.
142   membar(MacroAssembler::StoreStore, tmp1);
143 }
144 
145 void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3,
146                                         int header_size, int object_size,
147                                         Register klass, Label& slow_case) {
148   assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp);
149   assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
150   const int object_size_in_bytes = object_size * BytesPerWord;
151 
152   const Register obj_end = tmp1;
153   const Register len = noreg;
154 
155   if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) {
156     try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case);
157   } else {
158     // Rtemp should be free at c1 LIR level
159     mov_slow(Rtemp, object_size_in_bytes);
160     try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case);
161   }
162   initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB);
163 }
164 
165 void C1_MacroAssembler::allocate_array(Register obj, Register len,
166                                        Register tmp1, Register tmp2, Register tmp3,
167                                        int header_size, int element_size,
168                                        Register klass, Label& slow_case) {
169   assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp);
170   const int header_size_in_bytes = header_size * BytesPerWord;
171   const int scale_shift = exact_log2(element_size);
172   const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level
173 
174   cmp_32(len, max_array_allocation_length);
175   b(slow_case, hs);
176 
177   bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0;
178   assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be");
179   assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be");
180 
181   mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0));
182   add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift);
183 
184   if (align_header) {
185     align_reg(obj_size, obj_size, MinObjAlignmentInBytes);
186   }
187 
188   try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case);
189   initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB);
190 }
191 
192 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
193   Label done, fast_lock, fast_lock_done;
194   int null_check_offset = 0;
195 
196   const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
197   assert_different_registers(hdr, obj, disp_hdr, tmp2);
198 
199   assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code");
200   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
201   const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
202 
203   // save object being locked into the BasicObjectLock
204   str(obj, Address(disp_hdr, obj_offset));
205 
206   null_check_offset = offset();
207 
208   if (DiagnoseSyncOnValueBasedClasses != 0) {
209     load_klass(tmp2, obj);
210     ldr_u32(tmp2, Address(tmp2, Klass::access_flags_offset()));
211     tst(tmp2, JVM_ACC_IS_VALUE_BASED_CLASS);
212     b(slow_case, ne);
213   }
214 
215   assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
216 
217   if (LockingMode == LM_LIGHTWEIGHT) {
218     log_trace(fastlock)("C1_MacroAssembler::lock fast");
219 
220     Register t1 = disp_hdr; // Needs saving, probably
221     Register t2 = hdr;      // blow
222     Register t3 = Rtemp;    // blow
223 
224     fast_lock_2(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
225     // Success: fall through
226 
227   } else if (LockingMode == LM_LEGACY) {
228 
229     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
230     // That would be acceptable as ether CAS or slow case path is taken in that case.
231 
232     // Must be the first instruction here, because implicit null check relies on it
233     ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
234 
235     tst(hdr, markWord::unlocked_value);
236     b(fast_lock, ne);
237 
238     // Check for recursive locking
239     // See comments in InterpreterMacroAssembler::lock_object for
240     // explanations on the fast recursive locking check.
241     // -1- test low 2 bits
242     movs(tmp2, AsmOperand(hdr, lsl, 30));
243     // -2- test (hdr - SP) if the low two bits are 0
244     sub(tmp2, hdr, SP, eq);
245     movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);
246     // If still 'eq' then recursive locking OK
247     // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
248     str(tmp2, Address(disp_hdr, mark_offset));
249     b(fast_lock_done, eq);
250     // else need slow case
251     b(slow_case);
252 
253 
254     bind(fast_lock);
255     // Save previous object header in BasicLock structure and update the header
256     str(hdr, Address(disp_hdr, mark_offset));
257 
258     cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
259 
260     bind(fast_lock_done);
261   }
262   bind(done);
263 
264   return null_check_offset;
265 }
266 
267 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
268   assert_different_registers(hdr, obj, disp_hdr, Rtemp);
269   Register tmp2 = Rtemp;
270 
271   assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code");
272   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
273   const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
274 
275   Label done;
276 
277   assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
278 
279   if (LockingMode == LM_LIGHTWEIGHT) {
280     log_trace(fastlock)("C1_MacroAssembler::unlock fast");
281 
282     ldr(obj, Address(disp_hdr, obj_offset));
283 
284     Register t1 = disp_hdr; // Needs saving, probably
285     Register t2 = hdr;      // blow
286     Register t3 = Rtemp;    // blow
287 
288     fast_unlock_2(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
289                     slow_case);
290     // Success: Fall through
291 
292   } else if (LockingMode == LM_LEGACY) {
293 
294     // Load displaced header and object from the lock
295     ldr(hdr, Address(disp_hdr, mark_offset));
296     // If hdr is null, we've got recursive locking and there's nothing more to do
297     cbz(hdr, done);
298 
299     // load object
300     ldr(obj, Address(disp_hdr, obj_offset));
301 
302     // Restore the object header
303     cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
304   }
305   bind(done);
306 }
307 
308 #ifndef PRODUCT
309 
310 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
311   if (!VerifyOops) return;
312   verify_oop_addr(Address(SP, stack_offset));
313 }
314 
315 void C1_MacroAssembler::verify_not_null_oop(Register r) {
316   Label not_null;
317   cbnz(r, not_null);
318   stop("non-null oop required");
319   bind(not_null);
320   if (!VerifyOops) return;
321   verify_oop(r);
322 }
323 
324 #endif // !PRODUCT