13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36 #include "vmreg_x86.inline.hpp"
37
38
39 #define __ ce->masm()->
40
41 #ifndef _LP64
42 float ConversionStub::float_zero = 0.0;
43 double ConversionStub::double_zero = 0.0;
44
45 void ConversionStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48
49
50 if (input()->is_single_xmm()) {
51 __ comiss(input()->as_xmm_float_reg(),
52 ExternalAddress((address)&float_zero));
261
262
263 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
264 __ bind(_entry);
265 if (_compute_lock) {
266 // lock_reg was destroyed by fast unlocking attempt => recompute it
267 ce->monitor_address(_monitor_ix, _lock_reg);
268 }
269 ce->store_parameter(_lock_reg->as_register(), 0);
270 // note: non-blocking leaf routine => no call info needed
271 Runtime1::StubID exit_id;
272 if (ce->compilation()->has_fpu_code()) {
273 exit_id = Runtime1::monitorexit_id;
274 } else {
275 exit_id = Runtime1::monitorexit_nofpu_id;
276 }
277 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
278 __ jmp(_continuation);
279 }
280
281
282 // Implementation of patching:
283 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
284 // - Replace original code with a call to the stub
285 // At Runtime:
286 // - call to stub, jump to runtime
287 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
288 // - in runtime: after initializing class, restore original code, reexecute instruction
289
290 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
291
292 void PatchingStub::align_patch_site(MacroAssembler* masm) {
293 // We're patching a 5-7 byte instruction on intel and we need to
294 // make sure that we don't see a piece of the instruction. It
295 // appears mostly impossible on Intel to simply invalidate other
296 // processors caches and since they may do aggressive prefetch it's
297 // very hard to make a guess about what code might be in the icache.
298 // Force the instruction to be double word aligned so that it
299 // doesn't span a cache line.
300 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/objectMonitor.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/macros.hpp"
37 #include "vmreg_x86.inline.hpp"
38
39
40 #define __ ce->masm()->
41
42 #ifndef _LP64
43 float ConversionStub::float_zero = 0.0;
44 double ConversionStub::double_zero = 0.0;
45
46 void ConversionStub::emit_code(LIR_Assembler* ce) {
47 __ bind(_entry);
48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
49
50
51 if (input()->is_single_xmm()) {
52 __ comiss(input()->as_xmm_float_reg(),
53 ExternalAddress((address)&float_zero));
262
263
264 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
265 __ bind(_entry);
266 if (_compute_lock) {
267 // lock_reg was destroyed by fast unlocking attempt => recompute it
268 ce->monitor_address(_monitor_ix, _lock_reg);
269 }
270 ce->store_parameter(_lock_reg->as_register(), 0);
271 // note: non-blocking leaf routine => no call info needed
272 Runtime1::StubID exit_id;
273 if (ce->compilation()->has_fpu_code()) {
274 exit_id = Runtime1::monitorexit_id;
275 } else {
276 exit_id = Runtime1::monitorexit_nofpu_id;
277 }
278 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
279 __ jmp(_continuation);
280 }
281
282 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
283 assert(UseCompactObjectHeaders, "only with compact headers");
284 __ bind(_entry);
285 #ifdef _LP64
286 Register d = _result->as_register();
287 __ movq(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
288 __ jmp(_continuation);
289 #else
290 __ should_not_reach_here();
291 #endif
292 }
293
294 // Implementation of patching:
295 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
296 // - Replace original code with a call to the stub
297 // At Runtime:
298 // - call to stub, jump to runtime
299 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
300 // - in runtime: after initializing class, restore original code, reexecute instruction
301
302 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
303
304 void PatchingStub::align_patch_site(MacroAssembler* masm) {
305 // We're patching a 5-7 byte instruction on intel and we need to
306 // make sure that we don't see a piece of the instruction. It
307 // appears mostly impossible on Intel to simply invalidate other
308 // processors caches and since they may do aggressive prefetch it's
309 // very hard to make a guess about what code might be in the icache.
310 // Force the instruction to be double word aligned so that it
311 // doesn't span a cache line.
312 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
|