23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compiler_globals.hpp"
27 #include "interp_masm_x86.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "logging/log.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/markWord.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/resolvedFieldEntry.hpp"
36 #include "oops/resolvedIndyEntry.hpp"
37 #include "oops/resolvedMethodEntry.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "runtime/basicLock.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/javaThread.hpp"
43 #include "runtime/safepointMechanism.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/powerOfTwo.hpp"
46
47 // Implementation of InterpreterMacroAssembler
48
49 void InterpreterMacroAssembler::jump_to_entry(address entry) {
50 assert(entry, "Entry must have been generated by now");
51 jump(RuntimeAddress(entry));
52 }
53
54 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
55 Label update, next, none;
56
57 #ifdef _LP64
58 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
59 #else
60 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
61 #endif
62
1972 MacroAssembler::verify_FPU(stack_depth);
1973 }
1974 #endif
1975 }
1976
1977 // Jump if ((*counter_addr += increment) & mask) == 0
1978 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1979 Register scratch, Label* where) {
1980 // This update is actually not atomic and can lose a number of updates
1981 // under heavy contention, but the alternative of using the (contended)
1982 // atomic update here penalizes profiling paths too much.
1983 movl(scratch, counter_addr);
1984 incrementl(scratch, InvocationCounter::count_increment);
1985 movl(counter_addr, scratch);
1986 andl(scratch, mask);
1987 if (where != nullptr) {
1988 jcc(Assembler::zero, *where);
1989 }
1990 }
1991
1992 void InterpreterMacroAssembler::notify_method_entry() {
1993 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1994 // track stack depth. If it is possible to enter interp_only_mode we add
1995 // the code to check if the event should be sent.
1996 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1997 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
1998 if (JvmtiExport::can_post_interpreter_events()) {
1999 Label L;
2000 NOT_LP64(get_thread(rthread);)
2001 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
2002 testl(rdx, rdx);
2003 jcc(Assembler::zero, L);
2004 call_VM(noreg, CAST_FROM_FN_PTR(address,
2005 InterpreterRuntime::post_method_entry));
2006 bind(L);
2007 }
2008
2009 if (DTraceMethodProbes) {
2010 NOT_LP64(get_thread(rthread);)
2011 get_method(rarg);
|
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compiler_globals.hpp"
27 #include "interp_masm_x86.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "logging/log.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/markWord.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/resolvedFieldEntry.hpp"
36 #include "oops/resolvedIndyEntry.hpp"
37 #include "oops/resolvedMethodEntry.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "runtime/basicLock.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/javaThread.hpp"
43 #include "runtime/runtimeUpcalls.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47
48 // Implementation of InterpreterMacroAssembler
49
50 void InterpreterMacroAssembler::jump_to_entry(address entry) {
51 assert(entry, "Entry must have been generated by now");
52 jump(RuntimeAddress(entry));
53 }
54
55 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
56 Label update, next, none;
57
58 #ifdef _LP64
59 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
60 #else
61 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
62 #endif
63
1973 MacroAssembler::verify_FPU(stack_depth);
1974 }
1975 #endif
1976 }
1977
1978 // Jump if ((*counter_addr += increment) & mask) == 0
1979 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1980 Register scratch, Label* where) {
1981 // This update is actually not atomic and can lose a number of updates
1982 // under heavy contention, but the alternative of using the (contended)
1983 // atomic update here penalizes profiling paths too much.
1984 movl(scratch, counter_addr);
1985 incrementl(scratch, InvocationCounter::count_increment);
1986 movl(counter_addr, scratch);
1987 andl(scratch, mask);
1988 if (where != nullptr) {
1989 jcc(Assembler::zero, *where);
1990 }
1991 }
1992
1993 void InterpreterMacroAssembler::generate_runtime_upcalls_on_method_entry()
1994 {
1995 address upcall = RuntimeUpcalls::on_method_entry_upcall_address();
1996 if (RuntimeUpcalls::does_upcall_need_method_parameter(upcall)) {
1997 get_method(c_rarg1);
1998 call_VM(noreg,upcall, c_rarg1);
1999 } else {
2000 call_VM(noreg,upcall);
2001 }
2002 }
2003
2004 void InterpreterMacroAssembler::notify_method_entry() {
2005 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
2006 // track stack depth. If it is possible to enter interp_only_mode we add
2007 // the code to check if the event should be sent.
2008 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
2009 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
2010 if (JvmtiExport::can_post_interpreter_events()) {
2011 Label L;
2012 NOT_LP64(get_thread(rthread);)
2013 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
2014 testl(rdx, rdx);
2015 jcc(Assembler::zero, L);
2016 call_VM(noreg, CAST_FROM_FN_PTR(address,
2017 InterpreterRuntime::post_method_entry));
2018 bind(L);
2019 }
2020
2021 if (DTraceMethodProbes) {
2022 NOT_LP64(get_thread(rthread);)
2023 get_method(rarg);
|