791 }
792
793 // Not supported
794 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
795 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
796 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
797 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
798 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
799 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
800 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
801 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
802 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
803 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
804
805 //
806 // Interpreter stub for calling a native method. (asm interpreter)
807 // This sets up a somewhat different looking stack for calling the native method
808 // than the typical interpreter frame setup.
809 //
810
811 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
812 // determine code generation flags
813 bool inc_counter = UseCompiler || CountCompiledCalls;
814
815 // Incoming registers:
816 //
817 // Rmethod: Method*
818 // Rthread: thread
819 // Rsender_sp: sender sp
820 // Rparams: parameters
821
822 address entry_point = __ pc();
823
824 // Register allocation
825 const Register Rsize_of_params = R6;
826 const Register Rsig_handler = Rtmp_save0; // R4
827 const Register Rnative_code = Rtmp_save1; // R5
828 const Register Rresult_handler = R6;
829
830 const Register Rsaved_result_lo = Rtmp_save0; // R4
831 const Register Rsaved_result_hi = Rtmp_save1; // R5
1125
1126 // Restore FP/LR, sender_sp and return
1127 __ mov(Rtemp, FP);
1128 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1129 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1130
1131 __ ret();
1132
1133 if (inc_counter) {
1134 // Handle overflow of counter and compile method
1135 __ bind(invocation_counter_overflow);
1136 generate_counter_overflow(continue_after_compile);
1137 }
1138
1139 return entry_point;
1140 }
1141
1142 //
1143 // Generic interpreted method entry to (asm) interpreter
1144 //
1145 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1146 // determine code generation flags
1147 bool inc_counter = UseCompiler || CountCompiledCalls;
1148
1149 // Rmethod: Method*
1150 // Rthread: thread
1151 // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1152 // Rparams: pointer to the last parameter in the stack
1153
1154 address entry_point = __ pc();
1155
1156 const Register RconstMethod = R3;
1157
1158
1159 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1160
1161 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1162 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1163
1164 // setup Rlocals
1165 __ sub(Rlocals, Rparams, wordSize);
1549 lep = __ pc(); __ push(ltos); __ b(L);
1550
1551 if (VerifyOops) { // can't share atos entry if VerifyOops
1552 aep = __ pc(); __ push(atos); __ b(L);
1553 } else {
1554 aep = __ pc(); // fall through
1555 }
1556
1557 #ifdef __SOFTFP__
1558 fep = __ pc(); // fall through
1559 #endif // __SOFTFP__
1560
1561 bep = cep = sep = // fall through
1562 iep = __ pc(); __ push(itos); // fall through
1563 vep = __ pc(); __ bind(L); // fall through
1564 generate_and_dispatch(t);
1565 }
1566
1567 //------------------------------------------------------------------------------------------------------------------------
1568
1569 // Non-product code
1570 #ifndef PRODUCT
1571 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1572 address entry = __ pc();
1573
1574 // prepare expression stack
1575 __ push(state); // save tosca
1576
1577 // pass tosca registers as arguments
1578 __ mov(R2, R0_tos);
1579 __ mov(R3, R1_tos_hi);
1580 __ mov(R1, LR); // save return address
1581
1582 // call tracer
1583 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1584
1585 __ mov(LR, R0); // restore return address
1586 __ pop(state); // restore tosca
1587
1588 // return
1589 __ ret();
1590
1591 return entry;
1592 }
1593
1594
1595 void TemplateInterpreterGenerator::count_bytecode() {
1596 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1597 }
1598
1599
1600 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1601 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1602 }
1603
1604
1605 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1606 const Register Rindex_addr = R2_tmp;
1607 Label Lcontinue;
1608 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1609 InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1610 const Register Rcounters_addr = R2_tmp;
1611 const Register Rindex = R4_tmp;
1612
1613 // calculate new index for counter:
1614 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1615 // (_index >> log2_number_of_codes) is previous bytecode
1616
1617 __ ldr_literal(Rindex_addr, Lindex);
1618 __ ldr_s32(Rindex, Address(Rindex_addr));
1619 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1620 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1621 __ str_32(Rindex, Address(Rindex_addr));
1622
1623 // Rindex (R4) contains index of counter
1624
|
791 }
792
793 // Not supported
794 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
795 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
796 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
797 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
798 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
799 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
800 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
801 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
802 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
803 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
804
805 //
806 // Interpreter stub for calling a native method. (asm interpreter)
807 // This sets up a somewhat different looking stack for calling the native method
808 // than the typical interpreter frame setup.
809 //
810
811 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
812 // determine code generation flags
813 bool inc_counter = UseCompiler || CountCompiledCalls;
814
815 // Incoming registers:
816 //
817 // Rmethod: Method*
818 // Rthread: thread
819 // Rsender_sp: sender sp
820 // Rparams: parameters
821
822 address entry_point = __ pc();
823
824 // Register allocation
825 const Register Rsize_of_params = R6;
826 const Register Rsig_handler = Rtmp_save0; // R4
827 const Register Rnative_code = Rtmp_save1; // R5
828 const Register Rresult_handler = R6;
829
830 const Register Rsaved_result_lo = Rtmp_save0; // R4
831 const Register Rsaved_result_hi = Rtmp_save1; // R5
1125
1126 // Restore FP/LR, sender_sp and return
1127 __ mov(Rtemp, FP);
1128 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1129 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1130
1131 __ ret();
1132
1133 if (inc_counter) {
1134 // Handle overflow of counter and compile method
1135 __ bind(invocation_counter_overflow);
1136 generate_counter_overflow(continue_after_compile);
1137 }
1138
1139 return entry_point;
1140 }
1141
1142 //
1143 // Generic interpreted method entry to (asm) interpreter
1144 //
1145 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1146 // determine code generation flags
1147 bool inc_counter = UseCompiler || CountCompiledCalls;
1148
1149 // Rmethod: Method*
1150 // Rthread: thread
1151 // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1152 // Rparams: pointer to the last parameter in the stack
1153
1154 address entry_point = __ pc();
1155
1156 const Register RconstMethod = R3;
1157
1158
1159 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1160
1161 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1162 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1163
1164 // setup Rlocals
1165 __ sub(Rlocals, Rparams, wordSize);
1549 lep = __ pc(); __ push(ltos); __ b(L);
1550
1551 if (VerifyOops) { // can't share atos entry if VerifyOops
1552 aep = __ pc(); __ push(atos); __ b(L);
1553 } else {
1554 aep = __ pc(); // fall through
1555 }
1556
1557 #ifdef __SOFTFP__
1558 fep = __ pc(); // fall through
1559 #endif // __SOFTFP__
1560
1561 bep = cep = sep = // fall through
1562 iep = __ pc(); __ push(itos); // fall through
1563 vep = __ pc(); __ bind(L); // fall through
1564 generate_and_dispatch(t);
1565 }
1566
1567 //------------------------------------------------------------------------------------------------------------------------
1568
1569 void TemplateInterpreterGenerator::count_bytecode() {
1570 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1571 }
1572
1573
1574 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1575 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1576 }
1577
1578 // Non-product code
1579 #ifndef PRODUCT
1580 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1581 address entry = __ pc();
1582
1583 // prepare expression stack
1584 __ push(state); // save tosca
1585
1586 // pass tosca registers as arguments
1587 __ mov(R2, R0_tos);
1588 __ mov(R3, R1_tos_hi);
1589 __ mov(R1, LR); // save return address
1590
1591 // call tracer
1592 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1593
1594 __ mov(LR, R0); // restore return address
1595 __ pop(state); // restore tosca
1596
1597 // return
1598 __ ret();
1599
1600 return entry;
1601 }
1602
1603 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1604 const Register Rindex_addr = R2_tmp;
1605 Label Lcontinue;
1606 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1607 InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1608 const Register Rcounters_addr = R2_tmp;
1609 const Register Rindex = R4_tmp;
1610
1611 // calculate new index for counter:
1612 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1613 // (_index >> log2_number_of_codes) is previous bytecode
1614
1615 __ ldr_literal(Rindex_addr, Lindex);
1616 __ ldr_s32(Rindex, Address(Rindex_addr));
1617 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1618 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1619 __ str_32(Rindex, Address(Rindex_addr));
1620
1621 // Rindex (R4) contains index of counter
1622
|