788 // generate a vanilla interpreter entry as the slow path
789 __ bind(slow_path);
790 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
791 return entry;
792 }
793
794 // Not supported
795 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
796 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
797 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
798 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
799 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
800 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
801
802 //
803 // Interpreter stub for calling a native method. (asm interpreter)
804 // This sets up a somewhat different looking stack for calling the native method
805 // than the typical interpreter frame setup.
806 //
807
808 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
809 // determine code generation flags
810 bool inc_counter = UseCompiler || CountCompiledCalls;
811
812 // Incoming registers:
813 //
814 // Rmethod: Method*
815 // Rthread: thread
816 // Rsender_sp: sender sp
817 // Rparams: parameters
818
819 address entry_point = __ pc();
820
821 // Register allocation
822 const Register Rsize_of_params = R6;
823 const Register Rsig_handler = Rtmp_save0; // R4
824 const Register Rnative_code = Rtmp_save1; // R5
825 const Register Rresult_handler = R6;
826
827 const Register Rsaved_result_lo = Rtmp_save0; // R4
828 const Register Rsaved_result_hi = Rtmp_save1; // R5
829 FloatRegister saved_result_fp;
830
1122
1123 // Restore FP/LR, sender_sp and return
1124 __ mov(Rtemp, FP);
1125 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1126 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1127
1128 __ ret();
1129
1130 if (inc_counter) {
1131 // Handle overflow of counter and compile method
1132 __ bind(invocation_counter_overflow);
1133 generate_counter_overflow(continue_after_compile);
1134 }
1135
1136 return entry_point;
1137 }
1138
1139 //
1140 // Generic interpreted method entry to (asm) interpreter
1141 //
1142 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1143 // determine code generation flags
1144 bool inc_counter = UseCompiler || CountCompiledCalls;
1145
1146 // Rmethod: Method*
1147 // Rthread: thread
1148 // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1149 // Rparams: pointer to the last parameter in the stack
1150
1151 address entry_point = __ pc();
1152
1153 const Register RconstMethod = R3;
1154
1155
1156 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1157
1158 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1159 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1160
1161 // setup Rlocals
1162 __ sub(Rlocals, Rparams, wordSize);
1163 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1164
1546 lep = __ pc(); __ push(ltos); __ b(L);
1547
1548 if (VerifyOops) { // can't share atos entry if VerifyOops
1549 aep = __ pc(); __ push(atos); __ b(L);
1550 } else {
1551 aep = __ pc(); // fall through
1552 }
1553
1554 #ifdef __SOFTFP__
1555 fep = __ pc(); // fall through
1556 #endif // __SOFTFP__
1557
1558 bep = cep = sep = // fall through
1559 iep = __ pc(); __ push(itos); // fall through
1560 vep = __ pc(); __ bind(L); // fall through
1561 generate_and_dispatch(t);
1562 }
1563
1564 //------------------------------------------------------------------------------------------------------------------------
1565
1566 // Non-product code
1567 #ifndef PRODUCT
1568 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1569 address entry = __ pc();
1570
1571 // prepare expression stack
1572 __ push(state); // save tosca
1573
1574 // pass tosca registers as arguments
1575 __ mov(R2, R0_tos);
1576 __ mov(R3, R1_tos_hi);
1577 __ mov(R1, LR); // save return address
1578
1579 // call tracer
1580 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1581
1582 __ mov(LR, R0); // restore return address
1583 __ pop(state); // restore tosca
1584
1585 // return
1586 __ ret();
1587
1588 return entry;
1589 }
1590
1591
1592 void TemplateInterpreterGenerator::count_bytecode() {
1593 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1594 }
1595
1596
1597 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1598 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1599 }
1600
1601
1602 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1603 const Register Rindex_addr = R2_tmp;
1604 Label Lcontinue;
1605 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1606 InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1607 const Register Rcounters_addr = R2_tmp;
1608 const Register Rindex = R4_tmp;
1609
1610 // calculate new index for counter:
1611 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1612 // (_index >> log2_number_of_codes) is previous bytecode
1613
1614 __ ldr_literal(Rindex_addr, Lindex);
1615 __ ldr_s32(Rindex, Address(Rindex_addr));
1616 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1617 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1618 __ str_32(Rindex, Address(Rindex_addr));
1619
1620 // Rindex (R4) contains index of counter
1621
|
788 // generate a vanilla interpreter entry as the slow path
789 __ bind(slow_path);
790 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
791 return entry;
792 }
793
794 // Not supported
795 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
796 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
797 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
798 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
799 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
800 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
801
802 //
803 // Interpreter stub for calling a native method. (asm interpreter)
804 // This sets up a somewhat different looking stack for calling the native method
805 // than the typical interpreter frame setup.
806 //
807
808 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
809 // determine code generation flags
810 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
811
812 // Incoming registers:
813 //
814 // Rmethod: Method*
815 // Rthread: thread
816 // Rsender_sp: sender sp
817 // Rparams: parameters
818
819 address entry_point = __ pc();
820
821 // Register allocation
822 const Register Rsize_of_params = R6;
823 const Register Rsig_handler = Rtmp_save0; // R4
824 const Register Rnative_code = Rtmp_save1; // R5
825 const Register Rresult_handler = R6;
826
827 const Register Rsaved_result_lo = Rtmp_save0; // R4
828 const Register Rsaved_result_hi = Rtmp_save1; // R5
829 FloatRegister saved_result_fp;
830
1122
1123 // Restore FP/LR, sender_sp and return
1124 __ mov(Rtemp, FP);
1125 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1126 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1127
1128 __ ret();
1129
1130 if (inc_counter) {
1131 // Handle overflow of counter and compile method
1132 __ bind(invocation_counter_overflow);
1133 generate_counter_overflow(continue_after_compile);
1134 }
1135
1136 return entry_point;
1137 }
1138
1139 //
1140 // Generic interpreted method entry to (asm) interpreter
1141 //
1142 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1143 // determine code generation flags
1144 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1145
1146 // Rmethod: Method*
1147 // Rthread: thread
1148 // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1149 // Rparams: pointer to the last parameter in the stack
1150
1151 address entry_point = __ pc();
1152
1153 const Register RconstMethod = R3;
1154
1155
1156 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1157
1158 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1159 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1160
1161 // setup Rlocals
1162 __ sub(Rlocals, Rparams, wordSize);
1163 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1164
1546 lep = __ pc(); __ push(ltos); __ b(L);
1547
1548 if (VerifyOops) { // can't share atos entry if VerifyOops
1549 aep = __ pc(); __ push(atos); __ b(L);
1550 } else {
1551 aep = __ pc(); // fall through
1552 }
1553
1554 #ifdef __SOFTFP__
1555 fep = __ pc(); // fall through
1556 #endif // __SOFTFP__
1557
1558 bep = cep = sep = // fall through
1559 iep = __ pc(); __ push(itos); // fall through
1560 vep = __ pc(); __ bind(L); // fall through
1561 generate_and_dispatch(t);
1562 }
1563
1564 //------------------------------------------------------------------------------------------------------------------------
1565
1566 void TemplateInterpreterGenerator::count_bytecode() {
1567 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1568 }
1569
1570
1571 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1572 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1573 }
1574
1575 // Non-product code
1576 #ifndef PRODUCT
1577 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1578 address entry = __ pc();
1579
1580 // prepare expression stack
1581 __ push(state); // save tosca
1582
1583 // pass tosca registers as arguments
1584 __ mov(R2, R0_tos);
1585 __ mov(R3, R1_tos_hi);
1586 __ mov(R1, LR); // save return address
1587
1588 // call tracer
1589 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1590
1591 __ mov(LR, R0); // restore return address
1592 __ pop(state); // restore tosca
1593
1594 // return
1595 __ ret();
1596
1597 return entry;
1598 }
1599
1600 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1601 const Register Rindex_addr = R2_tmp;
1602 Label Lcontinue;
1603 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1604 InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1605 const Register Rcounters_addr = R2_tmp;
1606 const Register Rindex = R4_tmp;
1607
1608 // calculate new index for counter:
1609 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1610 // (_index >> log2_number_of_codes) is previous bytecode
1611
1612 __ ldr_literal(Rindex_addr, Lindex);
1613 __ ldr_s32(Rindex, Address(Rindex_addr));
1614 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1615 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1616 __ str_32(Rindex, Address(Rindex_addr));
1617
1618 // Rindex (R4) contains index of counter
1619
|