22 *
23 */
24
25 #include "compiler/compiler_globals.hpp"
26 #include "interp_masm_x86.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/markWord.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.hpp"
34 #include "oops/resolvedFieldEntry.hpp"
35 #include "oops/resolvedIndyEntry.hpp"
36 #include "oops/resolvedMethodEntry.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/basicLock.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/javaThread.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/powerOfTwo.hpp"
45
46 // Implementation of InterpreterMacroAssembler
47
48 void InterpreterMacroAssembler::jump_to_entry(address entry) {
49 assert(entry, "Entry must have been generated by now");
50 jump(RuntimeAddress(entry));
51 }
52
53 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
54 Label update, next, none;
55
56 #ifdef _LP64
57 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
58 #else
59 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
60 #endif
61
1971 MacroAssembler::verify_FPU(stack_depth);
1972 }
1973 #endif
1974 }
1975
1976 // Jump if ((*counter_addr += increment) & mask) == 0
1977 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1978 Register scratch, Label* where) {
1979 // This update is actually not atomic and can lose a number of updates
1980 // under heavy contention, but the alternative of using the (contended)
1981 // atomic update here penalizes profiling paths too much.
1982 movl(scratch, counter_addr);
1983 incrementl(scratch, InvocationCounter::count_increment);
1984 movl(counter_addr, scratch);
1985 andl(scratch, mask);
1986 if (where != nullptr) {
1987 jcc(Assembler::zero, *where);
1988 }
1989 }
1990
1991 void InterpreterMacroAssembler::notify_method_entry() {
1992 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1993 // track stack depth. If it is possible to enter interp_only_mode we add
1994 // the code to check if the event should be sent.
1995 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1996 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
1997 if (JvmtiExport::can_post_interpreter_events()) {
1998 Label L;
1999 NOT_LP64(get_thread(rthread);)
2000 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
2001 testl(rdx, rdx);
2002 jcc(Assembler::zero, L);
2003 call_VM(noreg, CAST_FROM_FN_PTR(address,
2004 InterpreterRuntime::post_method_entry));
2005 bind(L);
2006 }
2007
2008 if (DTraceMethodProbes) {
2009 NOT_LP64(get_thread(rthread);)
2010 get_method(rarg);
|
22 *
23 */
24
25 #include "compiler/compiler_globals.hpp"
26 #include "interp_masm_x86.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/markWord.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.hpp"
34 #include "oops/resolvedFieldEntry.hpp"
35 #include "oops/resolvedIndyEntry.hpp"
36 #include "oops/resolvedMethodEntry.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/basicLock.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/javaThread.hpp"
42 #include "runtime/runtimeUpcalls.hpp"
43 #include "runtime/safepointMechanism.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/powerOfTwo.hpp"
46
47 // Implementation of InterpreterMacroAssembler
48
49 void InterpreterMacroAssembler::jump_to_entry(address entry) {
50 assert(entry, "Entry must have been generated by now");
51 jump(RuntimeAddress(entry));
52 }
53
54 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
55 Label update, next, none;
56
57 #ifdef _LP64
58 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
59 #else
60 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
61 #endif
62
1972 MacroAssembler::verify_FPU(stack_depth);
1973 }
1974 #endif
1975 }
1976
1977 // Jump if ((*counter_addr += increment) & mask) == 0
1978 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1979 Register scratch, Label* where) {
1980 // This update is actually not atomic and can lose a number of updates
1981 // under heavy contention, but the alternative of using the (contended)
1982 // atomic update here penalizes profiling paths too much.
1983 movl(scratch, counter_addr);
1984 incrementl(scratch, InvocationCounter::count_increment);
1985 movl(counter_addr, scratch);
1986 andl(scratch, mask);
1987 if (where != nullptr) {
1988 jcc(Assembler::zero, *where);
1989 }
1990 }
1991
1992 void InterpreterMacroAssembler::generate_runtime_upcalls_on_method_entry()
1993 {
1994 address upcall = RuntimeUpcalls::on_method_entry_upcall_address();
1995 if (RuntimeUpcalls::does_upcall_need_method_parameter(upcall)) {
1996 get_method(c_rarg1);
1997 call_VM(noreg,upcall, c_rarg1);
1998 } else {
1999 call_VM(noreg,upcall);
2000 }
2001 }
2002
2003 void InterpreterMacroAssembler::notify_method_entry() {
2004 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
2005 // track stack depth. If it is possible to enter interp_only_mode we add
2006 // the code to check if the event should be sent.
2007 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
2008 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
2009 if (JvmtiExport::can_post_interpreter_events()) {
2010 Label L;
2011 NOT_LP64(get_thread(rthread);)
2012 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
2013 testl(rdx, rdx);
2014 jcc(Assembler::zero, L);
2015 call_VM(noreg, CAST_FROM_FN_PTR(address,
2016 InterpreterRuntime::post_method_entry));
2017 bind(L);
2018 }
2019
2020 if (DTraceMethodProbes) {
2021 NOT_LP64(get_thread(rthread);)
2022 get_method(rarg);
|