49 assert_different_registers(hdr, obj, disp_hdr, tmp);
50 int null_check_offset = -1;
51
52 verify_oop(obj);
53
54 // save object being locked into the BasicObjectLock
55 movptr(Address(disp_hdr, BasicObjectLock::obj_offset()), obj);
56
57 null_check_offset = offset();
58
59 if (DiagnoseSyncOnValueBasedClasses != 0) {
60 load_klass(hdr, obj, rscratch1);
61 movl(hdr, Address(hdr, Klass::access_flags_offset()));
62 testl(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
63 jcc(Assembler::notZero, slow_case);
64 }
65
66 if (LockingMode == LM_LIGHTWEIGHT) {
67 #ifdef _LP64
68 const Register thread = r15_thread;
69 #else
70 const Register thread = disp_hdr;
71 get_thread(thread);
72 #endif
73 lightweight_lock(obj, hdr, thread, tmp, slow_case);
74 } else if (LockingMode == LM_LEGACY) {
75 Label done;
76 // Load object header
77 movptr(hdr, Address(obj, hdr_offset));
78 // and mark it as unlocked
79 orptr(hdr, markWord::unlocked_value);
80 // save unlocked object header into the displaced header location on the stack
81 movptr(Address(disp_hdr, 0), hdr);
82 // test if object header is still the same (i.e. unlocked), and if so, store the
83 // displaced header address in the object header - if it is not the same, get the
84 // object header instead
85 MacroAssembler::lock(); // must be immediately before cmpxchg!
86 cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
87 // if the object header was the same, we're done
88 jcc(Assembler::equal, done);
89 // if the object header was not the same, it is now in the hdr register
90 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
91 //
92 // 1) (hdr & aligned_mask) == 0
93 // 2) rsp <= hdr
122 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
123 Label done;
124
125 if (LockingMode != LM_LIGHTWEIGHT) {
126 // load displaced header
127 movptr(hdr, Address(disp_hdr, 0));
128 // if the loaded hdr is null we had recursive locking
129 testptr(hdr, hdr);
130 // if we had recursive locking, we are done
131 jcc(Assembler::zero, done);
132 }
133
134 // load object
135 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
136 verify_oop(obj);
137
138 if (LockingMode == LM_LIGHTWEIGHT) {
139 #ifdef _LP64
140 lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
141 #else
142 // This relies on the implementation of lightweight_unlock being able to handle
143 // that the reg_rax and thread Register parameters may alias each other.
144 get_thread(disp_hdr);
145 lightweight_unlock(obj, disp_hdr, disp_hdr, hdr, slow_case);
146 #endif
147 } else if (LockingMode == LM_LEGACY) {
148 // test if object header is pointing to the displaced header, and if so, restore
149 // the displaced header in the object - if the object header is not pointing to
150 // the displaced header, get the object header instead
151 MacroAssembler::lock(); // must be immediately before cmpxchg!
152 cmpxchgptr(hdr, Address(obj, hdr_offset));
153 // if the object header was not pointing to the displaced header,
154 // we do unlocking via runtime call
155 jcc(Assembler::notEqual, slow_case);
156 // done
157 }
158 bind(done);
159 dec_held_monitor_count();
160 }
161
162
163 // Defines obj, preserves var_size_in_bytes
164 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
165 if (UseTLAB) {
166 tlab_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
167 } else {
168 jmp(slow_case);
169 }
170 }
171
172
173 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
174 assert_different_registers(obj, klass, len);
175 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
176 #ifdef _LP64
177 if (UseCompressedClassPointers) { // Take care not to kill klass
178 movptr(t1, klass);
179 encode_klass_not_null(t1, rscratch1);
180 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
181 } else
182 #endif
183 {
184 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
185 }
186
187 if (len->is_valid()) {
188 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
189 #ifdef _LP64
190 int base_offset = arrayOopDesc::length_offset_in_bytes() + BytesPerInt;
191 if (!is_aligned(base_offset, BytesPerWord)) {
192 assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
193 // Clear gap/first 4 bytes following the length field.
194 xorl(t1, t1);
195 movl(Address(obj, base_offset), t1);
196 }
197 #endif
198 }
199 #ifdef _LP64
200 else if (UseCompressedClassPointers) {
201 xorptr(t1, t1);
202 store_klass_gap(obj, t1);
203 }
204 #endif
205 }
206
207
208 // preserves obj, destroys len_in_bytes
209 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
210 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
211 Label done;
212
213 // len_in_bytes is positive and ptr sized
214 subptr(len_in_bytes, hdr_size_in_bytes);
215 zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1);
216 bind(done);
217 }
218
219
220 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
221 assert(obj == rax, "obj must be in rax, for cmpxchg");
222 assert_different_registers(obj, t1, t2); // XXX really?
223 assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
224
225 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
226
227 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB);
228 }
229
230 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) {
231 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
232 "con_size_in_bytes is not multiple of alignment");
233 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
234
235 initialize_header(obj, klass, noreg, t1, t2);
236
237 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
238 // clear rest of allocated space
239 const Register t1_zero = t1;
240 const Register index = t2;
241 const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below)
242 if (var_size_in_bytes != noreg) {
243 mov(index, var_size_in_bytes);
244 initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
245 } else if (con_size_in_bytes <= threshold) {
246 // use explicit null stores
247 // code size = 2 + 3*n bytes (n = number of fields to clear)
248 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
249 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
250 movptr(Address(obj, i), t1_zero);
251 } else if (con_size_in_bytes > hdr_size_in_bytes) {
252 // use loop to null out the fields
253 // code size = 16 bytes for even n (n = number of fields to clear)
254 // initialize last object field first if odd number of fields
|
49 assert_different_registers(hdr, obj, disp_hdr, tmp);
50 int null_check_offset = -1;
51
52 verify_oop(obj);
53
54 // save object being locked into the BasicObjectLock
55 movptr(Address(disp_hdr, BasicObjectLock::obj_offset()), obj);
56
57 null_check_offset = offset();
58
59 if (DiagnoseSyncOnValueBasedClasses != 0) {
60 load_klass(hdr, obj, rscratch1);
61 movl(hdr, Address(hdr, Klass::access_flags_offset()));
62 testl(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
63 jcc(Assembler::notZero, slow_case);
64 }
65
66 if (LockingMode == LM_LIGHTWEIGHT) {
67 #ifdef _LP64
68 const Register thread = r15_thread;
69 lightweight_lock(disp_hdr, obj, hdr, thread, tmp, slow_case);
70 #else
71 // Implicit null check.
72 movptr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
73 // Lacking registers and thread on x86_32. Always take slow path.
74 jmp(slow_case);
75 #endif
76 } else if (LockingMode == LM_LEGACY) {
77 Label done;
78 // Load object header
79 movptr(hdr, Address(obj, hdr_offset));
80 // and mark it as unlocked
81 orptr(hdr, markWord::unlocked_value);
82 // save unlocked object header into the displaced header location on the stack
83 movptr(Address(disp_hdr, 0), hdr);
84 // test if object header is still the same (i.e. unlocked), and if so, store the
85 // displaced header address in the object header - if it is not the same, get the
86 // object header instead
87 MacroAssembler::lock(); // must be immediately before cmpxchg!
88 cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
89 // if the object header was the same, we're done
90 jcc(Assembler::equal, done);
91 // if the object header was not the same, it is now in the hdr register
92 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
93 //
94 // 1) (hdr & aligned_mask) == 0
95 // 2) rsp <= hdr
124 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
125 Label done;
126
127 if (LockingMode != LM_LIGHTWEIGHT) {
128 // load displaced header
129 movptr(hdr, Address(disp_hdr, 0));
130 // if the loaded hdr is null we had recursive locking
131 testptr(hdr, hdr);
132 // if we had recursive locking, we are done
133 jcc(Assembler::zero, done);
134 }
135
136 // load object
137 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
138 verify_oop(obj);
139
140 if (LockingMode == LM_LIGHTWEIGHT) {
141 #ifdef _LP64
142 lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
143 #else
144 // Lacking registers and thread on x86_32. Always take slow path.
145 jmp(slow_case);
146 #endif
147 } else if (LockingMode == LM_LEGACY) {
148 // test if object header is pointing to the displaced header, and if so, restore
149 // the displaced header in the object - if the object header is not pointing to
150 // the displaced header, get the object header instead
151 MacroAssembler::lock(); // must be immediately before cmpxchg!
152 cmpxchgptr(hdr, Address(obj, hdr_offset));
153 // if the object header was not pointing to the displaced header,
154 // we do unlocking via runtime call
155 jcc(Assembler::notEqual, slow_case);
156 // done
157 }
158 bind(done);
159 dec_held_monitor_count();
160 }
161
162
163 // Defines obj, preserves var_size_in_bytes
164 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
165 if (UseTLAB) {
166 tlab_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
167 } else {
168 jmp(slow_case);
169 }
170 }
171
172
173 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
174 assert_different_registers(obj, klass, len, t1, t2);
175 #ifdef _LP64
176 if (UseCompactObjectHeaders) {
177 movptr(t1, Address(klass, Klass::prototype_header_offset()));
178 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
179 } else if (UseCompressedClassPointers) { // Take care not to kill klass
180 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
181 movptr(t1, klass);
182 encode_klass_not_null(t1, rscratch1);
183 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
184 } else
185 #endif
186 {
187 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
188 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
189 }
190
191 if (len->is_valid()) {
192 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
193 #ifdef _LP64
194 int base_offset = arrayOopDesc::length_offset_in_bytes() + BytesPerInt;
195 if (!is_aligned(base_offset, BytesPerWord)) {
196 assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
197 // Clear gap/first 4 bytes following the length field.
198 xorl(t1, t1);
199 movl(Address(obj, base_offset), t1);
200 }
201 #endif
202 }
203 #ifdef _LP64
204 else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
205 xorptr(t1, t1);
206 store_klass_gap(obj, t1);
207 }
208 #endif
209 }
210
211
212 // preserves obj, destroys len_in_bytes
213 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
214 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
215 Label done;
216
217 // len_in_bytes is positive and ptr sized
218 subptr(len_in_bytes, hdr_size_in_bytes);
219 zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1);
220 bind(done);
221 }
222
223
224 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
225 assert(obj == rax, "obj must be in rax, for cmpxchg");
226 assert_different_registers(obj, t1, t2); // XXX really?
227 assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
228
229 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
230
231 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB);
232 }
233
234 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) {
235 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
236 "con_size_in_bytes is not multiple of alignment");
237 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
238 if (UseCompactObjectHeaders) {
239 assert(hdr_size_in_bytes == 8, "check object headers size");
240 }
241 initialize_header(obj, klass, noreg, t1, t2);
242
243 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
244 // clear rest of allocated space
245 const Register t1_zero = t1;
246 const Register index = t2;
247 const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below)
248 if (var_size_in_bytes != noreg) {
249 mov(index, var_size_in_bytes);
250 initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
251 } else if (con_size_in_bytes <= threshold) {
252 // use explicit null stores
253 // code size = 2 + 3*n bytes (n = number of fields to clear)
254 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
255 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
256 movptr(Address(obj, i), t1_zero);
257 } else if (con_size_in_bytes > hdr_size_in_bytes) {
258 // use loop to null out the fields
259 // code size = 16 bytes for even n (n = number of fields to clear)
260 // initialize last object field first if odd number of fields
|