55 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
56 Label done;
57 int null_check_offset = -1;
58
59 verify_oop(obj);
60
61 // save object being locked into the BasicObjectLock
62 sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
63
64 null_check_offset = offset();
65
66 if (DiagnoseSyncOnValueBasedClasses != 0) {
67 load_klass(hdr, obj);
68 lwu(hdr, Address(hdr, Klass::access_flags_offset()));
69 andi(t0, hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
70 bnez(t0, slow_case, true /* is_far */);
71 }
72
73 // Load object header
74 ld(hdr, Address(obj, hdr_offset));
75 // and mark it as unlocked
76 ori(hdr, hdr, markWord::unlocked_value);
77 // save unlocked object header into the displaced header location on the stack
78 sd(hdr, Address(disp_hdr, 0));
79 // test if object header is still the same (i.e. unlocked), and if so, store the
80 // displaced header address in the object header - if it is not the same, get the
81 // object header instead
82 la(t1, Address(obj, hdr_offset));
83 cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL);
84 // if the object header was the same, we're done
85 // if the object header was not the same, it is now in the hdr register
86 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
87 //
88 // 1) (hdr & aligned_mask) == 0
89 // 2) sp <= hdr
90 // 3) hdr <= sp + page_size
91 //
92 // these 3 tests can be done by evaluating the following expression:
93 //
94 // (hdr -sp) & (aligned_mask - page_size)
95 //
96 // assuming both the stack pointer and page_size have their least
97 // significant 2 bits cleared and page_size is a power of 2
98 sub(hdr, hdr, sp);
99 mv(t0, aligned_mask - (int)os::vm_page_size());
100 andr(hdr, hdr, t0);
101 // for recursive locking, the result is zero => save it in the displaced header
102 // location (NULL in the displaced hdr location indicates recursive locking)
103 sd(hdr, Address(disp_hdr, 0));
104 // otherwise we don't care about the result and handle locking via runtime call
105 bnez(hdr, slow_case, /* is_far */ true);
106 // done
107 bind(done);
108 increment(Address(xthread, JavaThread::held_monitor_count_offset()));
109 return null_check_offset;
110 }
111
112 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
113 const int aligned_mask = BytesPerWord - 1;
114 const int hdr_offset = oopDesc::mark_offset_in_bytes();
115 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
116 Label done;
117
118 // load displaced header
119 ld(hdr, Address(disp_hdr, 0));
120 // if the loaded hdr is NULL we had recursive locking
121 // if we had recursive locking, we are done
122 beqz(hdr, done);
123 // load object
124 ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
125 verify_oop(obj);
126 // test if object header is pointing to the displaced header, and if so, restore
127 // the displaced header in the object - if the object header is not pointing to
128 // the displaced header, get the object header instead
129 // if the object header was not pointing to the displaced header,
130 // we do unlocking via runtime call
131 if (hdr_offset) {
132 la(t0, Address(obj, hdr_offset));
133 cmpxchgptr(disp_hdr, hdr, t0, t1, done, &slow_case);
134 } else {
135 cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
136 }
137 // done
138 bind(done);
139 decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
140 }
141
142 // Defines obj, preserves var_size_in_bytes
143 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, Label& slow_case) {
144 if (UseTLAB) {
145 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, tmp2, slow_case, /* is_far */ true);
146 } else {
147 j(slow_case);
148 }
149 }
150
151 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) {
152 assert_different_registers(obj, klass, len, tmp1, tmp2);
153 // This assumes that all prototype bits fitr in an int32_t
154 mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value());
155 sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
156
157 if (UseCompressedClassPointers) { // Take care not to kill klass
158 encode_klass_not_null(tmp1, klass, tmp2);
288
289 membar(MacroAssembler::StoreStore);
290
291 if (CURRENT_ENV->dtrace_alloc_probes()) {
292 assert(obj == x10, "must be");
293 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
294 }
295
296 verify_oop(obj);
297 }
298
299 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
300 verify_oop(receiver);
301 // explicit NULL check not needed since load from [klass_offset] causes a trap
302 // check against inline cache
303 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
304 assert_different_registers(receiver, iCache, t0, t2);
305 cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
306 }
307
308 void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
309 assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
310 // Make sure there is enough stack space for this method's activation.
311 // Note that we do this before creating a frame.
312 generate_stack_overflow_check(bang_size_in_bytes);
313 MacroAssembler::build_frame(framesize);
314
315 // Insert nmethod entry barrier into frame.
316 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
317 bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
318 }
319
320 void C1_MacroAssembler::remove_frame(int framesize) {
321 MacroAssembler::remove_frame(framesize);
322 }
323
324
325 void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
326 // If we have to make this method not-entrant we'll overwrite its
327 // first instruction with a jump. For this action to be legal we
328 // must ensure that this first instruction is a J, JAL or NOP.
|
55 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
56 Label done;
57 int null_check_offset = -1;
58
59 verify_oop(obj);
60
61 // save object being locked into the BasicObjectLock
62 sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
63
64 null_check_offset = offset();
65
66 if (DiagnoseSyncOnValueBasedClasses != 0) {
67 load_klass(hdr, obj);
68 lwu(hdr, Address(hdr, Klass::access_flags_offset()));
69 andi(t0, hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
70 bnez(t0, slow_case, true /* is_far */);
71 }
72
73 // Load object header
74 ld(hdr, Address(obj, hdr_offset));
75
76 if (UseFastLocking) {
77 fast_lock(obj, hdr, t0, t1, slow_case);
78 } else {
79 // and mark it as unlocked
80 jori(hdr, hdr, markWord::unlocked_value);
81 // save unlocked object header into the displaced header location on the stack
82 sd(hdr, Address(disp_hdr, 0));
83 // test if object header is still the same (i.e. unlocked), and if so, store the
84 // displaced header address in the object header - if it is not the same, get the
85 // object header instead
86 la(t1, Address(obj, hdr_offset));
87 cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL);
88 // if the object header was the same, we're done
89 // if the object header was not the same, it is now in the hdr register
90 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
91 //
92 // 1) (hdr & aligned_mask) == 0
93 // 2) sp <= hdr
94 // 3) hdr <= sp + page_size
95 //
96 // these 3 tests can be done by evaluating the following expression:
97 //
98 // (hdr -sp) & (aligned_mask - page_size)
99 //
100 // assuming both the stack pointer and page_size have their least
101 // significant 2 bits cleared and page_size is a power of 2
102 sub(hdr, hdr, sp);
103 mv(t0, aligned_mask - (int)os::vm_page_size());
104 andr(hdr, hdr, t0);
105 // for recursive locking, the result is zero => save it in the displaced header
106 // location (NULL in the displaced hdr location indicates recursive locking)
107 sd(hdr, Address(disp_hdr, 0));
108 // otherwise we don't care about the result and handle locking via runtime call
109 bnez(hdr, slow_case, /* is_far */ true);
110 // done
111 bind(done);
112 }
113 increment(Address(xthread, JavaThread::held_monitor_count_offset()));
114 return null_check_offset;
115 }
116
117 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
118 const int aligned_mask = BytesPerWord - 1;
119 const int hdr_offset = oopDesc::mark_offset_in_bytes();
120 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
121 Label done;
122
123 if (UseFastLocking) {
124 // load object
125 ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
126 verify_oop(obj);
127 ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
128 fast_unlock(obj, hdr, t0, t1, slow_case);
129 } else {
130 // load displaced header
131 ld(hdr, Address(disp_hdr, 0));
132 // if the loaded hdr is NULL we had recursive locking
133 // if we had recursive locking, we are done
134 beqz(hdr, done);
135 // load object
136 ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
137 verify_oop(obj);
138 // test if object header is pointing to the displaced header, and if so, restore
139 // the displaced header in the object - if the object header is not pointing to
140 // the displaced header, get the object header instead
141 // if the object header was not pointing to the displaced header,
142 // we do unlocking via runtime call
143 if (hdr_offset) {
144 la(t0, Address(obj, hdr_offset));
145 cmpxchgptr(disp_hdr, hdr, t0, t1, done, &slow_case);
146 } else {
147 cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
148 }
149 // done
150 bind(done);
151 }
152 decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
153 }
154
155 // Defines obj, preserves var_size_in_bytes
156 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, Label& slow_case) {
157 if (UseTLAB) {
158 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, tmp2, slow_case, /* is_far */ true);
159 } else {
160 j(slow_case);
161 }
162 }
163
164 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) {
165 assert_different_registers(obj, klass, len, tmp1, tmp2);
166 // This assumes that all prototype bits fitr in an int32_t
167 mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value());
168 sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
169
170 if (UseCompressedClassPointers) { // Take care not to kill klass
171 encode_klass_not_null(tmp1, klass, tmp2);
301
302 membar(MacroAssembler::StoreStore);
303
304 if (CURRENT_ENV->dtrace_alloc_probes()) {
305 assert(obj == x10, "must be");
306 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
307 }
308
309 verify_oop(obj);
310 }
311
312 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
313 verify_oop(receiver);
314 // explicit NULL check not needed since load from [klass_offset] causes a trap
315 // check against inline cache
316 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
317 assert_different_registers(receiver, iCache, t0, t2);
318 cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
319 }
320
321 void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes, int max_monitors) {
322 assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
323 // Make sure there is enough stack space for this method's activation.
324 // Note that we do this before creating a frame.
325 generate_stack_overflow_check(bang_size_in_bytes);
326 MacroAssembler::build_frame(framesize);
327
328 // Insert nmethod entry barrier into frame.
329 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
330 bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
331 }
332
333 void C1_MacroAssembler::remove_frame(int framesize) {
334 MacroAssembler::remove_frame(framesize);
335 }
336
337
338 void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
339 // If we have to make this method not-entrant we'll overwrite its
340 // first instruction with a jump. For this action to be legal we
341 // must ensure that this first instruction is a J, JAL or NOP.
|