966
967 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
968 __ bgtu(sp, t0, L_done);
969
970 for (int p = 1; p <= n_shadow_pages; p++) {
971 __ bang_stack_with_offset(p * page_size);
972 }
973
974 // Record the new watermark, but only if the update is above the safe limit.
975 // Otherwise, the next time around the check above would pass the safe limit.
976 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
977 __ bleu(sp, t0, L_done);
978 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
979
980 __ bind(L_done);
981 }
982
983 // Interpreter stub for calling a native method. (asm interpreter)
984 // This sets up a somewhat different looking stack for calling the
985 // native method than the typical interpreter frame setup.
986 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
987 // determine code generation flags
988 bool inc_counter = UseCompiler || CountCompiledCalls;
989
990 // x11: Method*
991 // x30: sender sp
992
993 address entry_point = __ pc();
994
995 const Address constMethod (xmethod, Method::const_offset());
996 const Address access_flags (xmethod, Method::access_flags_offset());
997 const Address size_of_parameters(x12, ConstMethod::
998 size_of_parameters_offset());
999
1000 // get parameter size (always needed)
1001 __ ld(x12, constMethod);
1002 __ load_unsigned_short(x12, size_of_parameters);
1003
1004 // Native calls don't need the stack size check since they have no
1005 // expression stack and the arguments are already on the stack and
1006 // we only add a handful of words to the stack.
1007
1008 // xmethod: Method*
1389 // remove frame anchor
1390 __ leave();
1391
1392 // restore sender sp
1393 __ mv(sp, esp);
1394
1395 __ ret();
1396
1397 if (inc_counter) {
1398 // Handle overflow of counter and compile method
1399 __ bind(invocation_counter_overflow);
1400 generate_counter_overflow(continue_after_compile);
1401 }
1402
1403 return entry_point;
1404 }
1405
1406 //
1407 // Generic interpreted method entry to (asm) interpreter
1408 //
1409 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1410
1411 // determine code generation flags
1412 const bool inc_counter = UseCompiler || CountCompiledCalls;
1413
1414 // t0: sender sp
1415 address entry_point = __ pc();
1416
1417 const Address constMethod(xmethod, Method::const_offset());
1418 const Address access_flags(xmethod, Method::access_flags_offset());
1419 const Address size_of_parameters(x13,
1420 ConstMethod::size_of_parameters_offset());
1421 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1422
1423 // get parameter size (always needed)
1424 // need to load the const method first
1425 __ ld(x13, constMethod);
1426 __ load_unsigned_short(x12, size_of_parameters);
1427
1428 // x12: size of parameters
1429
1430 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1431 __ sub(x13, x13, x12); // x13 = no. of additional locals
1432
1822
1823 //-----------------------------------------------------------------------------
1824
1825 // Non-product code
1826 #ifndef PRODUCT
1827 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1828 address entry = __ pc();
1829
1830 __ push_reg(ra);
1831 __ push(state);
1832 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1833 __ mv(c_rarg2, x10); // Pass itos
1834 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1835 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1836 __ pop(state);
1837 __ pop_reg(ra);
1838 __ ret(); // return from result handler
1839
1840 return entry;
1841 }
1842
1843 void TemplateInterpreterGenerator::count_bytecode() {
1844 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1845 __ atomic_add(noreg, 1, x7);
1846 }
1847
1848 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1849 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1850 __ atomic_addw(noreg, 1, x7);
1851 }
1852
1853 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1854 // Calculate new index for counter:
1855 // _index = (_index >> log2_number_of_codes) |
1856 // (bytecode << log2_number_of_codes);
1857 Register index_addr = t1;
1858 Register index = t0;
1859 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1860 __ lw(index, index_addr);
1861 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1862 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1863 __ orrw(index, x7, index);
1864 __ sw(index, index_addr);
1865 // Bump bucket contents:
1866 // _counters[_index] ++;
1867 Register counter_addr = t1;
1868 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1869 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1870 __ atomic_addw(noreg, 1, counter_addr);
1871 }
1872
|
966
967 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
968 __ bgtu(sp, t0, L_done);
969
970 for (int p = 1; p <= n_shadow_pages; p++) {
971 __ bang_stack_with_offset(p * page_size);
972 }
973
974 // Record the new watermark, but only if the update is above the safe limit.
975 // Otherwise, the next time around the check above would pass the safe limit.
976 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
977 __ bleu(sp, t0, L_done);
978 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
979
980 __ bind(L_done);
981 }
982
983 // Interpreter stub for calling a native method. (asm interpreter)
984 // This sets up a somewhat different looking stack for calling the
985 // native method than the typical interpreter frame setup.
986 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
987 // determine code generation flags
988 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
989
990 // x11: Method*
991 // x30: sender sp
992
993 address entry_point = __ pc();
994
995 const Address constMethod (xmethod, Method::const_offset());
996 const Address access_flags (xmethod, Method::access_flags_offset());
997 const Address size_of_parameters(x12, ConstMethod::
998 size_of_parameters_offset());
999
1000 // get parameter size (always needed)
1001 __ ld(x12, constMethod);
1002 __ load_unsigned_short(x12, size_of_parameters);
1003
1004 // Native calls don't need the stack size check since they have no
1005 // expression stack and the arguments are already on the stack and
1006 // we only add a handful of words to the stack.
1007
1008 // xmethod: Method*
1389 // remove frame anchor
1390 __ leave();
1391
1392 // restore sender sp
1393 __ mv(sp, esp);
1394
1395 __ ret();
1396
1397 if (inc_counter) {
1398 // Handle overflow of counter and compile method
1399 __ bind(invocation_counter_overflow);
1400 generate_counter_overflow(continue_after_compile);
1401 }
1402
1403 return entry_point;
1404 }
1405
1406 //
1407 // Generic interpreted method entry to (asm) interpreter
1408 //
1409 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1410
1411 // determine code generation flags
1412 const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1413
1414 // t0: sender sp
1415 address entry_point = __ pc();
1416
1417 const Address constMethod(xmethod, Method::const_offset());
1418 const Address access_flags(xmethod, Method::access_flags_offset());
1419 const Address size_of_parameters(x13,
1420 ConstMethod::size_of_parameters_offset());
1421 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1422
1423 // get parameter size (always needed)
1424 // need to load the const method first
1425 __ ld(x13, constMethod);
1426 __ load_unsigned_short(x12, size_of_parameters);
1427
1428 // x12: size of parameters
1429
1430 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1431 __ sub(x13, x13, x12); // x13 = no. of additional locals
1432
1822
1823 //-----------------------------------------------------------------------------
1824
1825 // Non-product code
1826 #ifndef PRODUCT
1827 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1828 address entry = __ pc();
1829
1830 __ push_reg(ra);
1831 __ push(state);
1832 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1833 __ mv(c_rarg2, x10); // Pass itos
1834 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1835 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1836 __ pop(state);
1837 __ pop_reg(ra);
1838 __ ret(); // return from result handler
1839
1840 return entry;
1841 }
1842 #endif // PRODUCT
1843
1844 void TemplateInterpreterGenerator::count_bytecode() {
1845 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1846 __ atomic_add(noreg, 1, x7);
1847 }
1848
1849 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1850 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1851 __ atomic_addw(noreg, 1, x7);
1852 }
1853
1854 #ifndef PRODUCT
1855 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1856 // Calculate new index for counter:
1857 // _index = (_index >> log2_number_of_codes) |
1858 // (bytecode << log2_number_of_codes);
1859 Register index_addr = t1;
1860 Register index = t0;
1861 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1862 __ lw(index, index_addr);
1863 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1864 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1865 __ orrw(index, x7, index);
1866 __ sw(index, index_addr);
1867 // Bump bucket contents:
1868 // _counters[_index] ++;
1869 Register counter_addr = t1;
1870 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1871 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1872 __ atomic_addw(noreg, 1, counter_addr);
1873 }
1874
|