970
971 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
972 __ bgtu(sp, t0, L_done);
973
974 for (int p = 1; p <= n_shadow_pages; p++) {
975 __ bang_stack_with_offset(p * page_size);
976 }
977
978 // Record the new watermark, but only if the update is above the safe limit.
979 // Otherwise, the next time around the check above would pass the safe limit.
980 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
981 __ bleu(sp, t0, L_done);
982 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
983
984 __ bind(L_done);
985 }
986
987 // Interpreter stub for calling a native method. (asm interpreter)
988 // This sets up a somewhat different looking stack for calling the
989 // native method than the typical interpreter frame setup.
990 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
991 // determine code generation flags
992 bool inc_counter = UseCompiler || CountCompiledCalls;
993
994 // x11: Method*
995 // x30: sender sp
996
997 address entry_point = __ pc();
998
999 const Address constMethod (xmethod, Method::const_offset());
1000 const Address access_flags (xmethod, Method::access_flags_offset());
1001 const Address size_of_parameters(x12, ConstMethod::
1002 size_of_parameters_offset());
1003
1004 // get parameter size (always needed)
1005 __ ld(x12, constMethod);
1006 __ load_unsigned_short(x12, size_of_parameters);
1007
1008 // Native calls don't need the stack size check since they have no
1009 // expression stack and the arguments are already on the stack and
1010 // we only add a handful of words to the stack.
1011
1012 // xmethod: Method*
1393 // remove frame anchor
1394 __ leave();
1395
1396 // restore sender sp
1397 __ mv(sp, esp);
1398
1399 __ ret();
1400
1401 if (inc_counter) {
1402 // Handle overflow of counter and compile method
1403 __ bind(invocation_counter_overflow);
1404 generate_counter_overflow(continue_after_compile);
1405 }
1406
1407 return entry_point;
1408 }
1409
1410 //
1411 // Generic interpreted method entry to (asm) interpreter
1412 //
1413 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1414
1415 // determine code generation flags
1416 const bool inc_counter = UseCompiler || CountCompiledCalls;
1417
1418 // t0: sender sp
1419 address entry_point = __ pc();
1420
1421 const Address constMethod(xmethod, Method::const_offset());
1422 const Address access_flags(xmethod, Method::access_flags_offset());
1423 const Address size_of_parameters(x13,
1424 ConstMethod::size_of_parameters_offset());
1425 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1426
1427 // get parameter size (always needed)
1428 // need to load the const method first
1429 __ ld(x13, constMethod);
1430 __ load_unsigned_short(x12, size_of_parameters);
1431
1432 // x12: size of parameters
1433
1434 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1435 __ sub(x13, x13, x12); // x13 = no. of additional locals
1436
1809 __ push_ptr();
1810 __ j(L);
1811 fep = __ pc(); // ftos entry point
1812 __ push_f();
1813 __ j(L);
1814 dep = __ pc(); // dtos entry point
1815 __ push_d();
1816 __ j(L);
1817 lep = __ pc(); // ltos entry point
1818 __ push_l();
1819 __ j(L);
1820 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point
1821 __ push_i();
1822 vep = __ pc(); // vtos entry point
1823 __ bind(L);
1824 generate_and_dispatch(t);
1825 }
1826
1827 //-----------------------------------------------------------------------------
1828
1829 // Non-product code
1830 #ifndef PRODUCT
1831 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1832 address entry = __ pc();
1833
1834 __ push_reg(ra);
1835 __ push(state);
1836 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1837 __ mv(c_rarg2, x10); // Pass itos
1838 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1839 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1840 __ pop(state);
1841 __ pop_reg(ra);
1842 __ ret(); // return from result handler
1843
1844 return entry;
1845 }
1846
1847 void TemplateInterpreterGenerator::count_bytecode() {
1848 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1849 __ atomic_addw(noreg, 1, x7);
1850 }
1851
1852 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1853 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1854 __ atomic_addw(noreg, 1, x7);
1855 }
1856
1857 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1858 // Calculate new index for counter:
1859 // _index = (_index >> log2_number_of_codes) |
1860 // (bytecode << log2_number_of_codes);
1861 Register index_addr = t1;
1862 Register index = t0;
1863 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1864 __ lw(index, index_addr);
1865 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1866 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1867 __ orrw(index, x7, index);
1868 __ sw(index, index_addr);
1869 // Bump bucket contents:
1870 // _counters[_index] ++;
1871 Register counter_addr = t1;
1872 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1873 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1874 __ atomic_addw(noreg, 1, counter_addr);
1875 }
1876
|
970
971 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
972 __ bgtu(sp, t0, L_done);
973
974 for (int p = 1; p <= n_shadow_pages; p++) {
975 __ bang_stack_with_offset(p * page_size);
976 }
977
978 // Record the new watermark, but only if the update is above the safe limit.
979 // Otherwise, the next time around the check above would pass the safe limit.
980 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
981 __ bleu(sp, t0, L_done);
982 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
983
984 __ bind(L_done);
985 }
986
987 // Interpreter stub for calling a native method. (asm interpreter)
988 // This sets up a somewhat different looking stack for calling the
989 // native method than the typical interpreter frame setup.
990 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
991 // determine code generation flags
992 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
993
994 // x11: Method*
995 // x30: sender sp
996
997 address entry_point = __ pc();
998
999 const Address constMethod (xmethod, Method::const_offset());
1000 const Address access_flags (xmethod, Method::access_flags_offset());
1001 const Address size_of_parameters(x12, ConstMethod::
1002 size_of_parameters_offset());
1003
1004 // get parameter size (always needed)
1005 __ ld(x12, constMethod);
1006 __ load_unsigned_short(x12, size_of_parameters);
1007
1008 // Native calls don't need the stack size check since they have no
1009 // expression stack and the arguments are already on the stack and
1010 // we only add a handful of words to the stack.
1011
1012 // xmethod: Method*
1393 // remove frame anchor
1394 __ leave();
1395
1396 // restore sender sp
1397 __ mv(sp, esp);
1398
1399 __ ret();
1400
1401 if (inc_counter) {
1402 // Handle overflow of counter and compile method
1403 __ bind(invocation_counter_overflow);
1404 generate_counter_overflow(continue_after_compile);
1405 }
1406
1407 return entry_point;
1408 }
1409
1410 //
1411 // Generic interpreted method entry to (asm) interpreter
1412 //
1413 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1414
1415 // determine code generation flags
1416 const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1417
1418 // t0: sender sp
1419 address entry_point = __ pc();
1420
1421 const Address constMethod(xmethod, Method::const_offset());
1422 const Address access_flags(xmethod, Method::access_flags_offset());
1423 const Address size_of_parameters(x13,
1424 ConstMethod::size_of_parameters_offset());
1425 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1426
1427 // get parameter size (always needed)
1428 // need to load the const method first
1429 __ ld(x13, constMethod);
1430 __ load_unsigned_short(x12, size_of_parameters);
1431
1432 // x12: size of parameters
1433
1434 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1435 __ sub(x13, x13, x12); // x13 = no. of additional locals
1436
1809 __ push_ptr();
1810 __ j(L);
1811 fep = __ pc(); // ftos entry point
1812 __ push_f();
1813 __ j(L);
1814 dep = __ pc(); // dtos entry point
1815 __ push_d();
1816 __ j(L);
1817 lep = __ pc(); // ltos entry point
1818 __ push_l();
1819 __ j(L);
1820 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point
1821 __ push_i();
1822 vep = __ pc(); // vtos entry point
1823 __ bind(L);
1824 generate_and_dispatch(t);
1825 }
1826
1827 //-----------------------------------------------------------------------------
1828
1829 void TemplateInterpreterGenerator::count_bytecode() {
1830 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1831 __ atomic_addw(noreg, 1, x7);
1832 }
1833
1834 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1835 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1836 __ atomic_addw(noreg, 1, x7);
1837 }
1838
1839 // Non-product code
1840 #ifndef PRODUCT
1841 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1842 address entry = __ pc();
1843
1844 __ push_reg(ra);
1845 __ push(state);
1846 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1847 __ mv(c_rarg2, x10); // Pass itos
1848 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1849 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1850 __ pop(state);
1851 __ pop_reg(ra);
1852 __ ret(); // return from result handler
1853
1854 return entry;
1855 }
1856
1857 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1858 // Calculate new index for counter:
1859 // _index = (_index >> log2_number_of_codes) |
1860 // (bytecode << log2_number_of_codes);
1861 Register index_addr = t1;
1862 Register index = t0;
1863 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1864 __ lw(index, index_addr);
1865 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1866 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1867 __ orrw(index, x7, index);
1868 __ sw(index, index_addr);
1869 // Bump bucket contents:
1870 // _counters[_index] ++;
1871 Register counter_addr = t1;
1872 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1873 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1874 __ atomic_addw(noreg, 1, counter_addr);
1875 }
1876
|