13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36 #include "vmreg_x86.inline.hpp"
37
38
39 #define __ ce->masm()->
40
41 #ifndef _LP64
42 float ConversionStub::float_zero = 0.0;
43 double ConversionStub::double_zero = 0.0;
44
45 void ConversionStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48
49
50 if (input()->is_single_xmm()) {
51 __ comiss(input()->as_xmm_float_reg(),
52 ExternalAddress((address)&float_zero));
283
284
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286 __ bind(_entry);
287 if (_compute_lock) {
288 // lock_reg was destroyed by fast unlocking attempt => recompute it
289 ce->monitor_address(_monitor_ix, _lock_reg);
290 }
291 ce->store_parameter(_lock_reg->as_register(), 0);
292 // note: non-blocking leaf routine => no call info needed
293 Runtime1::StubID exit_id;
294 if (ce->compilation()->has_fpu_code()) {
295 exit_id = Runtime1::monitorexit_id;
296 } else {
297 exit_id = Runtime1::monitorexit_nofpu_id;
298 }
299 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300 __ jmp(_continuation);
301 }
302
303
304 // Implementation of patching:
305 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
306 // - Replace original code with a call to the stub
307 // At Runtime:
308 // - call to stub, jump to runtime
309 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
310 // - in runtime: after initializing class, restore original code, reexecute instruction
311
312 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
313
314 void PatchingStub::align_patch_site(MacroAssembler* masm) {
315 // We're patching a 5-7 byte instruction on intel and we need to
316 // make sure that we don't see a piece of the instruction. It
317 // appears mostly impossible on Intel to simply invalidate other
318 // processors caches and since they may do aggressive prefetch it's
319 // very hard to make a guess about what code might be in the icache.
320 // Force the instruction to be double word aligned so that it
321 // doesn't span a cache line.
322 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/objectMonitor.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/macros.hpp"
37 #include "vmreg_x86.inline.hpp"
38
39
40 #define __ ce->masm()->
41
42 #ifndef _LP64
43 float ConversionStub::float_zero = 0.0;
44 double ConversionStub::double_zero = 0.0;
45
46 void ConversionStub::emit_code(LIR_Assembler* ce) {
47 __ bind(_entry);
48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
49
50
51 if (input()->is_single_xmm()) {
52 __ comiss(input()->as_xmm_float_reg(),
53 ExternalAddress((address)&float_zero));
284
285
286 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
287 __ bind(_entry);
288 if (_compute_lock) {
289 // lock_reg was destroyed by fast unlocking attempt => recompute it
290 ce->monitor_address(_monitor_ix, _lock_reg);
291 }
292 ce->store_parameter(_lock_reg->as_register(), 0);
293 // note: non-blocking leaf routine => no call info needed
294 Runtime1::StubID exit_id;
295 if (ce->compilation()->has_fpu_code()) {
296 exit_id = Runtime1::monitorexit_id;
297 } else {
298 exit_id = Runtime1::monitorexit_nofpu_id;
299 }
300 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
301 __ jmp(_continuation);
302 }
303
304 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
305 assert(UseCompactObjectHeaders, "only with compact headers");
306 __ bind(_entry);
307 #ifdef _LP64
308 Register d = _result->as_register();
309 __ movq(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
310 __ jmp(_continuation);
311 #else
312 __ should_not_reach_here();
313 #endif
314 }
315
316 // Implementation of patching:
317 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
318 // - Replace original code with a call to the stub
319 // At Runtime:
320 // - call to stub, jump to runtime
321 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
322 // - in runtime: after initializing class, restore original code, reexecute instruction
323
324 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
325
326 void PatchingStub::align_patch_site(MacroAssembler* masm) {
327 // We're patching a 5-7 byte instruction on intel and we need to
328 // make sure that we don't see a piece of the instruction. It
329 // appears mostly impossible on Intel to simply invalidate other
330 // processors caches and since they may do aggressive prefetch it's
331 // very hard to make a guess about what code might be in the icache.
332 // Force the instruction to be double word aligned so that it
333 // doesn't span a cache line.
334 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
|