786 // generate a vanilla interpreter entry as the slow path
787 __ bind(slow_path);
788 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
789 return entry;
790 }
791
792 // Not supported
793 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
794 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
795 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
796 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
797 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
798 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
799
800 //
801 // Interpreter stub for calling a native method. (asm interpreter)
802 // This sets up a somewhat different looking stack for calling the native method
803 // than the typical interpreter frame setup.
804 //
805
806 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
807 // determine code generation flags
808 bool inc_counter = UseCompiler || CountCompiledCalls;
809
810 // Incoming registers:
811 //
812 // Rmethod: Method*
813 // Rthread: thread
814 // Rsender_sp: sender sp
815 // Rparams: parameters
816
817 address entry_point = __ pc();
818
819 // Register allocation
820 const Register Rsize_of_params = R6;
821 const Register Rsig_handler = Rtmp_save0; // R4
822 const Register Rnative_code = Rtmp_save1; // R5
823 const Register Rresult_handler = R6;
824
825 const Register Rsaved_result_lo = Rtmp_save0; // R4
826 const Register Rsaved_result_hi = Rtmp_save1; // R5
827 FloatRegister saved_result_fp;
828
1120
1121 // Restore FP/LR, sender_sp and return
1122 __ mov(Rtemp, FP);
1123 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1124 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1125
1126 __ ret();
1127
1128 if (inc_counter) {
1129 // Handle overflow of counter and compile method
1130 __ bind(invocation_counter_overflow);
1131 generate_counter_overflow(continue_after_compile);
1132 }
1133
1134 return entry_point;
1135 }
1136
1137 //
1138 // Generic interpreted method entry to (asm) interpreter
1139 //
1140 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1141 // determine code generation flags
1142 bool inc_counter = UseCompiler || CountCompiledCalls;
1143
1144 // Rmethod: Method*
1145 // Rthread: thread
1146 // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1147 // Rparams: pointer to the last parameter in the stack
1148
1149 address entry_point = __ pc();
1150
1151 const Register RconstMethod = R3;
1152
1153
1154 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1155
1156 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1157 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1158
1159 // setup Rlocals
1160 __ sub(Rlocals, Rparams, wordSize);
1161 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1162
1544 lep = __ pc(); __ push(ltos); __ b(L);
1545
1546 if (VerifyOops) { // can't share atos entry if VerifyOops
1547 aep = __ pc(); __ push(atos); __ b(L);
1548 } else {
1549 aep = __ pc(); // fall through
1550 }
1551
1552 #ifdef __SOFTFP__
1553 fep = __ pc(); // fall through
1554 #endif // __SOFTFP__
1555
1556 bep = cep = sep = // fall through
1557 iep = __ pc(); __ push(itos); // fall through
1558 vep = __ pc(); __ bind(L); // fall through
1559 generate_and_dispatch(t);
1560 }
1561
1562 //------------------------------------------------------------------------------------------------------------------------
1563
1564 // Non-product code
1565 #ifndef PRODUCT
1566 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1567 address entry = __ pc();
1568
1569 // prepare expression stack
1570 __ push(state); // save tosca
1571
1572 // pass tosca registers as arguments
1573 __ mov(R2, R0_tos);
1574 __ mov(R3, R1_tos_hi);
1575 __ mov(R1, LR); // save return address
1576
1577 // call tracer
1578 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1579
1580 __ mov(LR, R0); // restore return address
1581 __ pop(state); // restore tosca
1582
1583 // return
1584 __ ret();
1585
1586 return entry;
1587 }
1588
1589
1590 void TemplateInterpreterGenerator::count_bytecode() {
1591 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1592 }
1593
1594
1595 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1596 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1597 }
1598
1599
1600 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1601 const Register Rindex_addr = R2_tmp;
1602 Label Lcontinue;
1603 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1604 InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1605 const Register Rcounters_addr = R2_tmp;
1606 const Register Rindex = R4_tmp;
1607
1608 // calculate new index for counter:
1609 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1610 // (_index >> log2_number_of_codes) is previous bytecode
1611
1612 __ ldr_literal(Rindex_addr, Lindex);
1613 __ ldr_s32(Rindex, Address(Rindex_addr));
1614 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1615 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1616 __ str_32(Rindex, Address(Rindex_addr));
1617
1618 // Rindex (R4) contains index of counter
1619
|
786 // generate a vanilla interpreter entry as the slow path
787 __ bind(slow_path);
788 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
789 return entry;
790 }
791
792 // Not supported
793 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
794 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
795 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
796 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
797 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
798 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
799
800 //
801 // Interpreter stub for calling a native method. (asm interpreter)
802 // This sets up a somewhat different looking stack for calling the native method
803 // than the typical interpreter frame setup.
804 //
805
806 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
807 // determine code generation flags
808 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
809
810 // Incoming registers:
811 //
812 // Rmethod: Method*
813 // Rthread: thread
814 // Rsender_sp: sender sp
815 // Rparams: parameters
816
817 address entry_point = __ pc();
818
819 // Register allocation
820 const Register Rsize_of_params = R6;
821 const Register Rsig_handler = Rtmp_save0; // R4
822 const Register Rnative_code = Rtmp_save1; // R5
823 const Register Rresult_handler = R6;
824
825 const Register Rsaved_result_lo = Rtmp_save0; // R4
826 const Register Rsaved_result_hi = Rtmp_save1; // R5
827 FloatRegister saved_result_fp;
828
1120
1121 // Restore FP/LR, sender_sp and return
1122 __ mov(Rtemp, FP);
1123 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1124 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1125
1126 __ ret();
1127
1128 if (inc_counter) {
1129 // Handle overflow of counter and compile method
1130 __ bind(invocation_counter_overflow);
1131 generate_counter_overflow(continue_after_compile);
1132 }
1133
1134 return entry_point;
1135 }
1136
1137 //
1138 // Generic interpreted method entry to (asm) interpreter
1139 //
1140 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1141 // determine code generation flags
1142 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1143
1144 // Rmethod: Method*
1145 // Rthread: thread
1146 // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1147 // Rparams: pointer to the last parameter in the stack
1148
1149 address entry_point = __ pc();
1150
1151 const Register RconstMethod = R3;
1152
1153
1154 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1155
1156 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1157 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1158
1159 // setup Rlocals
1160 __ sub(Rlocals, Rparams, wordSize);
1161 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1162
1544 lep = __ pc(); __ push(ltos); __ b(L);
1545
1546 if (VerifyOops) { // can't share atos entry if VerifyOops
1547 aep = __ pc(); __ push(atos); __ b(L);
1548 } else {
1549 aep = __ pc(); // fall through
1550 }
1551
1552 #ifdef __SOFTFP__
1553 fep = __ pc(); // fall through
1554 #endif // __SOFTFP__
1555
1556 bep = cep = sep = // fall through
1557 iep = __ pc(); __ push(itos); // fall through
1558 vep = __ pc(); __ bind(L); // fall through
1559 generate_and_dispatch(t);
1560 }
1561
1562 //------------------------------------------------------------------------------------------------------------------------
1563
1564 void TemplateInterpreterGenerator::count_bytecode() {
1565 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1566 }
1567
1568
1569 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1570 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1571 }
1572
1573 // Non-product code
1574 #ifndef PRODUCT
1575 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1576 address entry = __ pc();
1577
1578 // prepare expression stack
1579 __ push(state); // save tosca
1580
1581 // pass tosca registers as arguments
1582 __ mov(R2, R0_tos);
1583 __ mov(R3, R1_tos_hi);
1584 __ mov(R1, LR); // save return address
1585
1586 // call tracer
1587 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1588
1589 __ mov(LR, R0); // restore return address
1590 __ pop(state); // restore tosca
1591
1592 // return
1593 __ ret();
1594
1595 return entry;
1596 }
1597
1598 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1599 const Register Rindex_addr = R2_tmp;
1600 Label Lcontinue;
1601 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1602 InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1603 const Register Rcounters_addr = R2_tmp;
1604 const Register Rindex = R4_tmp;
1605
1606 // calculate new index for counter:
1607 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1608 // (_index >> log2_number_of_codes) is previous bytecode
1609
1610 __ ldr_literal(Rindex_addr, Lindex);
1611 __ ldr_s32(Rindex, Address(Rindex_addr));
1612 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1613 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1614 __ str_32(Rindex, Address(Rindex_addr));
1615
1616 // Rindex (R4) contains index of counter
1617
|