48 fcmpd(f0, f1);
49 }
50 if (unordered_result < 0) {
51 // we want -1 for unordered or less than, 0 for equal and 1 for
52 // greater than.
53 cset(result, NE); // Not equal or unordered
54 cneg(result, result, LT); // Less than or unordered
55 } else {
56 // we want -1 for less than, 0 for equal and 1 for unordered or
57 // greater than.
58 cset(result, NE); // Not equal or unordered
59 cneg(result, result, LO); // Less than
60 }
61 }
62
63 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
64 const int aligned_mask = BytesPerWord -1;
65 const int hdr_offset = oopDesc::mark_offset_in_bytes();
66 assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
67 int null_check_offset = -1;
68
69 verify_oop(obj);
70
71 // save object being locked into the BasicObjectLock
72 str(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
73
74 null_check_offset = offset();
75
76 if (DiagnoseSyncOnValueBasedClasses != 0) {
77 load_klass(hdr, obj);
78 ldrw(hdr, Address(hdr, Klass::access_flags_offset()));
79 tstw(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
80 br(Assembler::NE, slow_case);
81 }
82
83 if (LockingMode == LM_LIGHTWEIGHT) {
84 lightweight_lock(obj, hdr, temp, rscratch2, slow_case);
85 } else if (LockingMode == LM_LEGACY) {
86 Label done;
87 // Load object header
88 ldr(hdr, Address(obj, hdr_offset));
89 // and mark it as unlocked
90 orr(hdr, hdr, markWord::unlocked_value);
91 // save unlocked object header into the displaced header location on the stack
92 str(hdr, Address(disp_hdr, 0));
93 // test if object header is still the same (i.e. unlocked), and if so, store the
94 // displaced header address in the object header - if it is not the same, get the
95 // object header instead
96 lea(rscratch2, Address(obj, hdr_offset));
97 cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/nullptr);
98 // if the object header was the same, we're done
99 // if the object header was not the same, it is now in the hdr register
100 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
101 //
102 // 1) (hdr & aligned_mask) == 0
103 // 2) sp <= hdr
104 // 3) hdr <= sp + page_size
105 //
106 // these 3 tests can be done by evaluating the following expression:
107 //
108 // (hdr - sp) & (aligned_mask - page_size)
109 //
110 // assuming both the stack pointer and page_size have their least
111 // significant 2 bits cleared and page_size is a power of 2
112 mov(rscratch1, sp);
113 sub(hdr, hdr, rscratch1);
114 ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
115 // for recursive locking, the result is zero => save it in the displaced header
116 // location (null in the displaced hdr location indicates recursive locking)
117 str(hdr, Address(disp_hdr, 0));
118 // otherwise we don't care about the result and handle locking via runtime call
119 cbnz(hdr, slow_case);
120 // done
121 bind(done);
122 }
123 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
124 return null_check_offset;
125 }
126
127
128 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
129 const int aligned_mask = BytesPerWord -1;
130 const int hdr_offset = oopDesc::mark_offset_in_bytes();
131 assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
132 Label done;
133
134 if (LockingMode != LM_LIGHTWEIGHT) {
135 // load displaced header
136 ldr(hdr, Address(disp_hdr, 0));
137 // if the loaded hdr is null we had recursive locking
138 // if we had recursive locking, we are done
139 cbz(hdr, done);
140 }
141
142 // load object
143 ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
144 verify_oop(obj);
145
146 if (LockingMode == LM_LIGHTWEIGHT) {
147 lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
148 } else if (LockingMode == LM_LEGACY) {
149 // test if object header is pointing to the displaced header, and if so, restore
150 // the displaced header in the object - if the object header is not pointing to
151 // the displaced header, get the object header instead
152 // if the object header was not pointing to the displaced header,
153 // we do unlocking via runtime call
154 if (hdr_offset) {
155 lea(rscratch1, Address(obj, hdr_offset));
156 cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, done, &slow_case);
157 } else {
158 cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
159 }
160 // done
161 bind(done);
162 }
163 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
164 }
165
166
167 // Defines obj, preserves var_size_in_bytes
168 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
169 if (UseTLAB) {
170 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
171 } else {
172 b(slow_case);
173 }
174 }
175
176 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
177 assert_different_registers(obj, klass, len);
178 // This assumes that all prototype bits fit in an int32_t
179 mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
180 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
181
182 if (UseCompressedClassPointers) { // Take care not to kill klass
183 encode_klass_not_null(t1, klass);
|
48 fcmpd(f0, f1);
49 }
50 if (unordered_result < 0) {
51 // we want -1 for unordered or less than, 0 for equal and 1 for
52 // greater than.
53 cset(result, NE); // Not equal or unordered
54 cneg(result, result, LT); // Less than or unordered
55 } else {
56 // we want -1 for less than, 0 for equal and 1 for unordered or
57 // greater than.
58 cset(result, NE); // Not equal or unordered
59 cneg(result, result, LO); // Less than
60 }
61 }
62
63 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
64 const int aligned_mask = BytesPerWord -1;
65 const int hdr_offset = oopDesc::mark_offset_in_bytes();
66 assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
67 int null_check_offset = -1;
68 Label count_locking, done;
69
70 verify_oop(obj);
71
72 // save object being locked into the BasicObjectLock
73 str(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
74
75 null_check_offset = offset();
76
77 if (DiagnoseSyncOnValueBasedClasses != 0) {
78 load_klass(hdr, obj);
79 ldrw(hdr, Address(hdr, Klass::access_flags_offset()));
80 tstw(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
81 br(Assembler::NE, slow_case);
82 }
83
84 if (LockingMode == LM_LIGHTWEIGHT) {
85 lightweight_lock(obj, hdr, temp, rscratch2, slow_case);
86 b(done);
87 } else if (LockingMode == LM_LEGACY) {
88 // Load object header
89 ldr(hdr, Address(obj, hdr_offset));
90 // and mark it as unlocked
91 orr(hdr, hdr, markWord::unlocked_value);
92 // save unlocked object header into the displaced header location on the stack
93 str(hdr, Address(disp_hdr, 0));
94 // test if object header is still the same (i.e. unlocked), and if so, store the
95 // displaced header address in the object header - if it is not the same, get the
96 // object header instead
97 lea(rscratch2, Address(obj, hdr_offset));
98 // if the object header was the same, we're done
99 cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, count_locking, /*fallthough*/nullptr);
100 // if the object header was the same, we're done
101 // if the object header was not the same, it is now in the hdr register
102 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
103 //
104 // 1) (hdr & aligned_mask) == 0
105 // 2) sp <= hdr
106 // 3) hdr <= sp + page_size
107 //
108 // these 3 tests can be done by evaluating the following expression:
109 //
110 // (hdr - sp) & (aligned_mask - page_size)
111 //
112 // assuming both the stack pointer and page_size have their least
113 // significant 2 bits cleared and page_size is a power of 2
114 mov(rscratch1, sp);
115 sub(hdr, hdr, rscratch1);
116 ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
117 // for recursive locking, the result is zero => save it in the displaced header
118 // location (null in the displaced hdr location indicates recursive locking)
119 str(hdr, Address(disp_hdr, 0));
120 // otherwise we don't care about the result and handle locking via runtime call
121 cbnz(hdr, slow_case);
122 // done
123 b(done);
124 }
125 bind(count_locking);
126 inc_held_monitor_count();
127 bind(done);
128 return null_check_offset;
129 }
130
131
132 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
133 const int aligned_mask = BytesPerWord -1;
134 const int hdr_offset = oopDesc::mark_offset_in_bytes();
135 assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
136 Label count_locking, done;
137
138 if (LockingMode != LM_LIGHTWEIGHT) {
139 // load displaced header
140 ldr(hdr, Address(disp_hdr, 0));
141 // if the loaded hdr is null we had recursive locking
142 // if we had recursive locking, we are done
143 cbz(hdr, done);
144 }
145
146 // load object
147 ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
148 verify_oop(obj);
149
150 if (LockingMode == LM_LIGHTWEIGHT) {
151 lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
152 b(done);
153 } else if (LockingMode == LM_LEGACY) {
154 // test if object header is pointing to the displaced header, and if so, restore
155 // the displaced header in the object - if the object header is not pointing to
156 // the displaced header, get the object header instead
157 // if the object header was not pointing to the displaced header,
158 // we do unlocking via runtime call
159 if (hdr_offset) {
160 lea(rscratch1, Address(obj, hdr_offset));
161 cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, count_locking, &slow_case);
162 } else {
163 cmpxchgptr(disp_hdr, hdr, obj, rscratch2, count_locking, &slow_case);
164 }
165 // done
166 bind(count_locking);
167 dec_held_monitor_count();
168 }
169 bind(done);
170 }
171
172
173 // Defines obj, preserves var_size_in_bytes
174 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
175 if (UseTLAB) {
176 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
177 } else {
178 b(slow_case);
179 }
180 }
181
182 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
183 assert_different_registers(obj, klass, len);
184 // This assumes that all prototype bits fit in an int32_t
185 mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
186 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
187
188 if (UseCompressedClassPointers) { // Take care not to kill klass
189 encode_klass_not_null(t1, klass);
|