971
972 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
973 __ bgtu(sp, t0, L_done);
974
975 for (int p = 1; p <= n_shadow_pages; p++) {
976 __ bang_stack_with_offset(p * page_size);
977 }
978
979 // Record the new watermark, but only if the update is above the safe limit.
980 // Otherwise, the next time around the check above would pass the safe limit.
981 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
982 __ bleu(sp, t0, L_done);
983 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
984
985 __ bind(L_done);
986 }
987
988 // Interpreter stub for calling a native method. (asm interpreter)
989 // This sets up a somewhat different looking stack for calling the
990 // native method than the typical interpreter frame setup.
991 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
992 // determine code generation flags
993 bool inc_counter = UseCompiler || CountCompiledCalls;
994
995 // x11: Method*
996 // x30: sender sp
997
998 address entry_point = __ pc();
999
1000 const Address constMethod (xmethod, Method::const_offset());
1001 const Address access_flags (xmethod, Method::access_flags_offset());
1002 const Address size_of_parameters(x12, ConstMethod::
1003 size_of_parameters_offset());
1004
1005 // get parameter size (always needed)
1006 __ ld(x12, constMethod);
1007 __ load_unsigned_short(x12, size_of_parameters);
1008
1009 // Native calls don't need the stack size check since they have no
1010 // expression stack and the arguments are already on the stack and
1011 // we only add a handful of words to the stack.
1394 // remove frame anchor
1395 __ leave();
1396
1397 // restore sender sp
1398 __ mv(sp, esp);
1399
1400 __ ret();
1401
1402 if (inc_counter) {
1403 // Handle overflow of counter and compile method
1404 __ bind(invocation_counter_overflow);
1405 generate_counter_overflow(continue_after_compile);
1406 }
1407
1408 return entry_point;
1409 }
1410
1411 //
1412 // Generic interpreted method entry to (asm) interpreter
1413 //
1414 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1415
1416 // determine code generation flags
1417 const bool inc_counter = UseCompiler || CountCompiledCalls;
1418
1419 // t0: sender sp
1420 address entry_point = __ pc();
1421
1422 const Address constMethod(xmethod, Method::const_offset());
1423 const Address access_flags(xmethod, Method::access_flags_offset());
1424 const Address size_of_parameters(x13,
1425 ConstMethod::size_of_parameters_offset());
1426 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1427
1428 // get parameter size (always needed)
1429 // need to load the const method first
1430 __ ld(x13, constMethod);
1431 __ load_unsigned_short(x12, size_of_parameters);
1432
1433 // x12: size of parameters
1434
1810 __ push_ptr();
1811 __ j(L);
1812 fep = __ pc(); // ftos entry point
1813 __ push_f();
1814 __ j(L);
1815 dep = __ pc(); // dtos entry point
1816 __ push_d();
1817 __ j(L);
1818 lep = __ pc(); // ltos entry point
1819 __ push_l();
1820 __ j(L);
1821 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point
1822 __ push_i();
1823 vep = __ pc(); // vtos entry point
1824 __ bind(L);
1825 generate_and_dispatch(t);
1826 }
1827
1828 //-----------------------------------------------------------------------------
1829
1830 // Non-product code
1831 #ifndef PRODUCT
1832 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1833 address entry = __ pc();
1834
1835 __ push_reg(ra);
1836 __ push(state);
1837 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1838 __ mv(c_rarg2, x10); // Pass itos
1839 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1840 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1841 __ pop(state);
1842 __ pop_reg(ra);
1843 __ ret(); // return from result handler
1844
1845 return entry;
1846 }
1847
1848 void TemplateInterpreterGenerator::count_bytecode() {
1849 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1850 __ atomic_addw(noreg, 1, x7);
1851 }
1852
1853 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1854 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1855 __ atomic_addw(noreg, 1, x7);
1856 }
1857
1858 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1859 // Calculate new index for counter:
1860 // _index = (_index >> log2_number_of_codes) |
1861 // (bytecode << log2_number_of_codes);
1862 Register index_addr = t1;
1863 Register index = t0;
1864 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1865 __ lw(index, index_addr);
1866 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1867 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1868 __ orrw(index, x7, index);
1869 __ sw(index, index_addr);
1870 // Bump bucket contents:
1871 // _counters[_index] ++;
1872 Register counter_addr = t1;
1873 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1874 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1875 __ atomic_addw(noreg, 1, counter_addr);
1876 }
1877
|
971
972 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
973 __ bgtu(sp, t0, L_done);
974
975 for (int p = 1; p <= n_shadow_pages; p++) {
976 __ bang_stack_with_offset(p * page_size);
977 }
978
979 // Record the new watermark, but only if the update is above the safe limit.
980 // Otherwise, the next time around the check above would pass the safe limit.
981 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
982 __ bleu(sp, t0, L_done);
983 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
984
985 __ bind(L_done);
986 }
987
988 // Interpreter stub for calling a native method. (asm interpreter)
989 // This sets up a somewhat different looking stack for calling the
990 // native method than the typical interpreter frame setup.
991 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
992 // determine code generation flags
993 bool inc_counter = UseCompiler || CountCompiledCalls;
994
995 // x11: Method*
996 // x30: sender sp
997
998 address entry_point = __ pc();
999
1000 const Address constMethod (xmethod, Method::const_offset());
1001 const Address access_flags (xmethod, Method::access_flags_offset());
1002 const Address size_of_parameters(x12, ConstMethod::
1003 size_of_parameters_offset());
1004
1005 // get parameter size (always needed)
1006 __ ld(x12, constMethod);
1007 __ load_unsigned_short(x12, size_of_parameters);
1008
1009 // Native calls don't need the stack size check since they have no
1010 // expression stack and the arguments are already on the stack and
1011 // we only add a handful of words to the stack.
1394 // remove frame anchor
1395 __ leave();
1396
1397 // restore sender sp
1398 __ mv(sp, esp);
1399
1400 __ ret();
1401
1402 if (inc_counter) {
1403 // Handle overflow of counter and compile method
1404 __ bind(invocation_counter_overflow);
1405 generate_counter_overflow(continue_after_compile);
1406 }
1407
1408 return entry_point;
1409 }
1410
1411 //
1412 // Generic interpreted method entry to (asm) interpreter
1413 //
1414 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1415
1416 // determine code generation flags
1417 const bool inc_counter = UseCompiler || CountCompiledCalls;
1418
1419 // t0: sender sp
1420 address entry_point = __ pc();
1421
1422 const Address constMethod(xmethod, Method::const_offset());
1423 const Address access_flags(xmethod, Method::access_flags_offset());
1424 const Address size_of_parameters(x13,
1425 ConstMethod::size_of_parameters_offset());
1426 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1427
1428 // get parameter size (always needed)
1429 // need to load the const method first
1430 __ ld(x13, constMethod);
1431 __ load_unsigned_short(x12, size_of_parameters);
1432
1433 // x12: size of parameters
1434
1810 __ push_ptr();
1811 __ j(L);
1812 fep = __ pc(); // ftos entry point
1813 __ push_f();
1814 __ j(L);
1815 dep = __ pc(); // dtos entry point
1816 __ push_d();
1817 __ j(L);
1818 lep = __ pc(); // ltos entry point
1819 __ push_l();
1820 __ j(L);
1821 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point
1822 __ push_i();
1823 vep = __ pc(); // vtos entry point
1824 __ bind(L);
1825 generate_and_dispatch(t);
1826 }
1827
1828 //-----------------------------------------------------------------------------
1829
1830 void TemplateInterpreterGenerator::count_bytecode() {
1831 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1832 __ atomic_addw(noreg, 1, x7);
1833 }
1834
1835 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1836 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1837 __ atomic_addw(noreg, 1, x7);
1838 }
1839
1840 // Non-product code
1841 #ifndef PRODUCT
1842 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1843 address entry = __ pc();
1844
1845 __ push_reg(ra);
1846 __ push(state);
1847 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1848 __ mv(c_rarg2, x10); // Pass itos
1849 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1850 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1851 __ pop(state);
1852 __ pop_reg(ra);
1853 __ ret(); // return from result handler
1854
1855 return entry;
1856 }
1857
1858 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1859 // Calculate new index for counter:
1860 // _index = (_index >> log2_number_of_codes) |
1861 // (bytecode << log2_number_of_codes);
1862 Register index_addr = t1;
1863 Register index = t0;
1864 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1865 __ lw(index, index_addr);
1866 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1867 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1868 __ orrw(index, x7, index);
1869 __ sw(index, index_addr);
1870 // Bump bucket contents:
1871 // _counters[_index] ++;
1872 Register counter_addr = t1;
1873 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1874 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1875 __ atomic_addw(noreg, 1, counter_addr);
1876 }
1877
|