972
973 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
974 __ bgtu(sp, t0, L_done);
975
976 for (int p = 1; p <= n_shadow_pages; p++) {
977 __ bang_stack_with_offset(p * page_size);
978 }
979
980 // Record the new watermark, but only if the update is above the safe limit.
981 // Otherwise, the next time around the check above would pass the safe limit.
982 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
983 __ bleu(sp, t0, L_done);
984 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
985
986 __ bind(L_done);
987 }
988
989 // Interpreter stub for calling a native method. (asm interpreter)
990 // This sets up a somewhat different looking stack for calling the
991 // native method than the typical interpreter frame setup.
992 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
993 // determine code generation flags
994 bool inc_counter = UseCompiler || CountCompiledCalls;
995
996 // x11: Method*
997 // x30: sender sp
998
999 address entry_point = __ pc();
1000
1001 const Address constMethod (xmethod, Method::const_offset());
1002 const Address access_flags (xmethod, Method::access_flags_offset());
1003 const Address size_of_parameters(x12, ConstMethod::
1004 size_of_parameters_offset());
1005
1006 // get parameter size (always needed)
1007 __ ld(x12, constMethod);
1008 __ load_unsigned_short(x12, size_of_parameters);
1009
1010 // Native calls don't need the stack size check since they have no
1011 // expression stack and the arguments are already on the stack and
1012 // we only add a handful of words to the stack.
1013
1014 // xmethod: Method*
1408
1409 JFR_ONLY(__ leave_jfr_critical_section();)
1410
1411 // restore sender sp
1412 __ mv(sp, esp);
1413
1414 __ ret();
1415
1416 if (inc_counter) {
1417 // Handle overflow of counter and compile method
1418 __ bind(invocation_counter_overflow);
1419 generate_counter_overflow(continue_after_compile);
1420 }
1421
1422 return entry_point;
1423 }
1424
1425 //
1426 // Generic interpreted method entry to (asm) interpreter
1427 //
1428 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1429
1430 // determine code generation flags
1431 const bool inc_counter = UseCompiler || CountCompiledCalls;
1432
1433 // t0: sender sp
1434 address entry_point = __ pc();
1435
1436 const Address constMethod(xmethod, Method::const_offset());
1437 const Address access_flags(xmethod, Method::access_flags_offset());
1438 const Address size_of_parameters(x13,
1439 ConstMethod::size_of_parameters_offset());
1440 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1441
1442 // get parameter size (always needed)
1443 // need to load the const method first
1444 __ ld(x13, constMethod);
1445 __ load_unsigned_short(x12, size_of_parameters);
1446
1447 // x12: size of parameters
1448
1449 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1450 __ sub(x13, x13, x12); // x13 = no. of additional locals
1451
1840
1841 //-----------------------------------------------------------------------------
1842
1843 // Non-product code
1844 #ifndef PRODUCT
1845 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1846 address entry = __ pc();
1847
1848 __ push_reg(ra);
1849 __ push(state);
1850 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1851 __ mv(c_rarg2, x10); // Pass itos
1852 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1853 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1854 __ pop(state);
1855 __ pop_reg(ra);
1856 __ ret(); // return from result handler
1857
1858 return entry;
1859 }
1860
1861 void TemplateInterpreterGenerator::count_bytecode() {
1862 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1863 __ atomic_add(noreg, 1, x7);
1864 }
1865
1866 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1867 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1868 __ atomic_addw(noreg, 1, x7);
1869 }
1870
1871 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1872 // Calculate new index for counter:
1873 // _index = (_index >> log2_number_of_codes) |
1874 // (bytecode << log2_number_of_codes);
1875 Register index_addr = t1;
1876 Register index = t0;
1877 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1878 __ lw(index, index_addr);
1879 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1880 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1881 __ orrw(index, x7, index);
1882 __ sw(index, index_addr);
1883 // Bump bucket contents:
1884 // _counters[_index] ++;
1885 Register counter_addr = t1;
1886 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1887 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1888 __ atomic_addw(noreg, 1, counter_addr);
1889 }
1890
|
972
973 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
974 __ bgtu(sp, t0, L_done);
975
976 for (int p = 1; p <= n_shadow_pages; p++) {
977 __ bang_stack_with_offset(p * page_size);
978 }
979
980 // Record the new watermark, but only if the update is above the safe limit.
981 // Otherwise, the next time around the check above would pass the safe limit.
982 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
983 __ bleu(sp, t0, L_done);
984 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
985
986 __ bind(L_done);
987 }
988
989 // Interpreter stub for calling a native method. (asm interpreter)
990 // This sets up a somewhat different looking stack for calling the
991 // native method than the typical interpreter frame setup.
992 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
993 // determine code generation flags
994 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
995
996 // x11: Method*
997 // x30: sender sp
998
999 address entry_point = __ pc();
1000
1001 const Address constMethod (xmethod, Method::const_offset());
1002 const Address access_flags (xmethod, Method::access_flags_offset());
1003 const Address size_of_parameters(x12, ConstMethod::
1004 size_of_parameters_offset());
1005
1006 // get parameter size (always needed)
1007 __ ld(x12, constMethod);
1008 __ load_unsigned_short(x12, size_of_parameters);
1009
1010 // Native calls don't need the stack size check since they have no
1011 // expression stack and the arguments are already on the stack and
1012 // we only add a handful of words to the stack.
1013
1014 // xmethod: Method*
1408
1409 JFR_ONLY(__ leave_jfr_critical_section();)
1410
1411 // restore sender sp
1412 __ mv(sp, esp);
1413
1414 __ ret();
1415
1416 if (inc_counter) {
1417 // Handle overflow of counter and compile method
1418 __ bind(invocation_counter_overflow);
1419 generate_counter_overflow(continue_after_compile);
1420 }
1421
1422 return entry_point;
1423 }
1424
1425 //
1426 // Generic interpreted method entry to (asm) interpreter
1427 //
1428 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1429
1430 // determine code generation flags
1431 const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1432
1433 // t0: sender sp
1434 address entry_point = __ pc();
1435
1436 const Address constMethod(xmethod, Method::const_offset());
1437 const Address access_flags(xmethod, Method::access_flags_offset());
1438 const Address size_of_parameters(x13,
1439 ConstMethod::size_of_parameters_offset());
1440 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1441
1442 // get parameter size (always needed)
1443 // need to load the const method first
1444 __ ld(x13, constMethod);
1445 __ load_unsigned_short(x12, size_of_parameters);
1446
1447 // x12: size of parameters
1448
1449 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1450 __ sub(x13, x13, x12); // x13 = no. of additional locals
1451
1840
1841 //-----------------------------------------------------------------------------
1842
1843 // Non-product code
1844 #ifndef PRODUCT
1845 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1846 address entry = __ pc();
1847
1848 __ push_reg(ra);
1849 __ push(state);
1850 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1851 __ mv(c_rarg2, x10); // Pass itos
1852 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1853 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1854 __ pop(state);
1855 __ pop_reg(ra);
1856 __ ret(); // return from result handler
1857
1858 return entry;
1859 }
1860 #endif // PRODUCT
1861
1862 void TemplateInterpreterGenerator::count_bytecode() {
1863 __ mv(x7, (address) &BytecodeCounter::_counter_value);
1864 __ atomic_add(noreg, 1, x7);
1865 }
1866
1867 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1868 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1869 __ atomic_addw(noreg, 1, x7);
1870 }
1871
1872 #ifndef PRODUCT
1873 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1874 // Calculate new index for counter:
1875 // _index = (_index >> log2_number_of_codes) |
1876 // (bytecode << log2_number_of_codes);
1877 Register index_addr = t1;
1878 Register index = t0;
1879 __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1880 __ lw(index, index_addr);
1881 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1882 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1883 __ orrw(index, x7, index);
1884 __ sw(index, index_addr);
1885 // Bump bucket contents:
1886 // _counters[_index] ++;
1887 Register counter_addr = t1;
1888 __ mv(x7, (address) &BytecodePairHistogram::_counters);
1889 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1890 __ atomic_addw(noreg, 1, counter_addr);
1891 }
1892
|