< prev index next > src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
Print this page
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
! address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
// r1: Method*
// rscratch1: sender sp
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
! address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
// r1: Method*
// rscratch1: sender sp
#endif
// jvmti support
__ notify_method_entry();
+ if (runtime_upcalls) {
+ __ generate_runtime_upcalls_on_method_entry();
+ }
+
// work registers
const Register t = r17;
const Register result_handler = r19;
// allocate space for parameters
}
//
// Generic interpreted method entry to (asm) interpreter
//
! address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
// rscratch1: sender sp
address entry_point = __ pc();
}
//
// Generic interpreted method entry to (asm) interpreter
//
! address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
// rscratch1: sender sp
address entry_point = __ pc();
#endif
// jvmti support
__ notify_method_entry();
+ // runtime upcalls
+ if (runtime_upcalls) {
+ __ generate_runtime_upcalls_on_method_entry();
+ }
+
__ dispatch_next(vtos);
// invocation counter overflow
if (inc_counter) {
// Handle overflow of counter and compile method
generate_and_dispatch(t);
}
//-----------------------------------------------------------------------------
+ void TemplateInterpreterGenerator::count_bytecode() {
+ if (CountBytecodesPerThread) {
+ Address bc_counter_addr(rthread, Thread::bc_counter_offset());
+ __ ldr(r10, bc_counter_addr);
+ __ add(r10, r10, 1);
+ __ str(r10, bc_counter_addr);
+ }
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) {
+ __ mov(r10, (address) &BytecodeCounter::_counter_value);
+ __ atomic_add(noreg, 1, r10);
+ }
+ }
+
+ void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+ __ mov(r10, (address) &BytecodeHistogram::_counters[t->bytecode()]);
+ __ atomic_addw(noreg, 1, r10);
+ }
+
// Non-product code
#ifndef PRODUCT
address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
address entry = __ pc();
__ ret(lr); // return from result handler
return entry;
}
- void TemplateInterpreterGenerator::count_bytecode() {
- __ mov(r10, (address) &BytecodeCounter::_counter_value);
- __ atomic_addw(noreg, 1, r10);
- }
-
- void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
- __ mov(r10, (address) &BytecodeHistogram::_counters[t->bytecode()]);
- __ atomic_addw(noreg, 1, r10);
- }
-
void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
// Calculate new index for counter:
// _index = (_index >> log2_number_of_codes) |
// (bytecode << log2_number_of_codes);
Register index_addr = rscratch1;
< prev index next >