< prev index next > src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
Print this page
{
if (!RewriteBytecodes) return;
Label L_patch_done;
switch (bc) {
+ case Bytecodes::_fast_vputfield:
case Bytecodes::_fast_aputfield:
case Bytecodes::_fast_bputfield:
case Bytecodes::_fast_zputfield:
case Bytecodes::_fast_cputfield:
case Bytecodes::_fast_dputfield:
assert(r1 != array, "different registers");
__ mov(r1, index);
}
Label ok;
__ br(Assembler::LO, ok);
! // ??? convention: move array into r3 for exception message
! __ mov(r3, array);
! __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
! __ br(rscratch1);
__ bind(ok);
}
void TemplateTable::iaload()
{
assert(r1 != array, "different registers");
__ mov(r1, index);
}
Label ok;
__ br(Assembler::LO, ok);
! // ??? convention: move array into r3 for exception message
! __ mov(r3, array);
! __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
! __ br(rscratch1);
__ bind(ok);
}
void TemplateTable::iaload()
{
__ mov(r1, r0);
__ pop_ptr(r0);
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
! __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
! do_oop_load(_masm,
! Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
! r0,
! IS_ARRAY);
}
void TemplateTable::baload()
{
transition(itos, itos);
__ mov(r1, r0);
__ pop_ptr(r0);
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
! __ profile_array_type<ArrayLoadData>(r2, r0, r4);
! if (UseFlatArray) {
! Label is_flat_array, done;
!
! __ test_flat_array_oop(r0, r8 /*temp*/, is_flat_array);
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
+ do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
+
+ __ b(done);
+ __ bind(is_flat_array);
+ __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1);
+ // Ensure the stores to copy the inline field contents are visible
+ // before any subsequent store that publishes this reference.
+ __ membar(Assembler::StoreStore);
+ __ bind(done);
+ } else {
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
+ do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
+ }
+ __ profile_element_type(r2, r0, r4);
}
void TemplateTable::baload()
{
transition(itos, itos);
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
__ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
}
void TemplateTable::aastore() {
! Label is_null, ok_is_subtype, done;
transition(vtos, vtos);
// stack: ..., array, index, value
__ ldr(r0, at_tos()); // value
__ ldr(r2, at_tos_p1()); // index
__ ldr(r3, at_tos_p2()); // array
- Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
-
index_check(r3, r2); // kills r1
__ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
// do array store check - check for null value first
__ cbz(r0, is_null);
// Move subklass into r1
__ load_klass(r1, r0);
! // Move superklass into r0
! __ load_klass(r0, r3);
! __ ldr(r0, Address(r0,
- ObjArrayKlass::element_klass_offset()));
// Compress array + index*oopSize + 12 into a single register. Frees r2.
// Generate subtype check. Blows r2, r5
// Superklass in r0. Subklass in r1.
! __ gen_subtype_check(r1, ok_is_subtype);
// Come here on failure
// object is at TOS
__ b(Interpreter::_throw_ArrayStoreException_entry);
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
__ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
}
void TemplateTable::aastore() {
! Label is_null, is_flat_array, ok_is_subtype, done;
transition(vtos, vtos);
// stack: ..., array, index, value
__ ldr(r0, at_tos()); // value
__ ldr(r2, at_tos_p1()); // index
__ ldr(r3, at_tos_p2()); // array
index_check(r3, r2); // kills r1
+
+ __ profile_array_type<ArrayStoreData>(r4, r3, r5);
+ __ profile_multiple_element_types(r4, r0, r5, r6);
+
__ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
+ Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
+ // Be careful not to clobber r4 below
// do array store check - check for null value first
__ cbz(r0, is_null);
+ // Move array class to r5
+ __ load_klass(r5, r3);
+
+ if (UseFlatArray) {
+ __ ldrw(r6, Address(r5, Klass::layout_helper_offset()));
+ __ test_flat_array_layout(r6, is_flat_array);
+ }
+
// Move subklass into r1
__ load_klass(r1, r0);
!
! // Move array element superklass into r0
! __ ldr(r0, Address(r5, ObjArrayKlass::element_klass_offset()));
// Compress array + index*oopSize + 12 into a single register. Frees r2.
// Generate subtype check. Blows r2, r5
// Superklass in r0. Subklass in r1.
!
+ // is "r1 <: r0" ? (value subclass <: array element superclass)
+ __ gen_subtype_check(r1, ok_is_subtype, false);
// Come here on failure
// object is at TOS
__ b(Interpreter::_throw_ArrayStoreException_entry);
do_oop_store(_masm, element_address, r0, IS_ARRAY);
__ b(done);
// Have a null in r0, r3=array, r2=index. Store null at ary[idx]
__ bind(is_null);
! __ profile_null_seen(r2);
// Store a null
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
// Pop stack arguments
__ bind(done);
__ add(esp, esp, 3 * Interpreter::stackElementSize);
}
do_oop_store(_masm, element_address, r0, IS_ARRAY);
__ b(done);
// Have a null in r0, r3=array, r2=index. Store null at ary[idx]
__ bind(is_null);
! if (EnableValhalla) {
+ Label is_null_into_value_array_npe, store_null;
+
+ // No way to store null in flat null-free array
+ __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe);
+ __ b(store_null);
+
+ __ bind(is_null_into_value_array_npe);
+ __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
+
+ __ bind(store_null);
+ }
// Store a null
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
+ __ b(done);
+
+ if (UseFlatArray) {
+ Label is_type_ok;
+ __ bind(is_flat_array); // Store non-null value to flat
+
+ // Simplistic type check...
+ // r0 - value, r2 - index, r3 - array.
+
+ // Profile the not-null value's klass.
+ // Load value class
+ __ load_klass(r1, r0);
+
+ // Move element klass into r7
+ __ ldr(r7, Address(r5, ArrayKlass::element_klass_offset()));
+
+ // flat value array needs exact type match
+ // is "r1 == r7" (value subclass == array element superclass)
+
+ __ cmp(r7, r1);
+ __ br(Assembler::EQ, is_type_ok);
+
+ __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
+
+ __ bind(is_type_ok);
+ // r1: value's klass
+ // r3: array
+ // r5: array klass
+ __ test_klass_is_empty_inline_type(r1, r7, done);
+
+ // calc dst for copy
+ __ ldrw(r7, at_tos_p1()); // index
+ __ data_for_value_array_index(r3, r5, r7, r7);
+
+ // ...and src for copy
+ __ ldr(r6, at_tos()); // value
+ __ data_for_oop(r6, r6, r1);
+
+ __ mov(r4, r1); // Shuffle arguments to avoid conflict with c_rarg1
+ __ access_value_copy(IN_HEAP, r6, r7, r4);
+ }
// Pop stack arguments
__ bind(done);
__ add(esp, esp, 3 * Interpreter::stackElementSize);
}
branch(false, false);
__ bind(not_taken);
__ profile_not_taken_branch(r0);
}
! void TemplateTable::if_acmp(Condition cc)
- {
transition(atos, vtos);
// assume branch is more often taken than not (loops use backward branches)
! Label not_taken;
__ pop_ptr(r1);
__ cmpoop(r1, r0);
__ br(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
! __ profile_not_taken_branch(r0);
}
void TemplateTable::ret() {
transition(vtos, vtos);
locals_index(r1);
__ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
__ profile_ret(r1, r2);
branch(false, false);
__ bind(not_taken);
__ profile_not_taken_branch(r0);
}
! void TemplateTable::if_acmp(Condition cc) {
transition(atos, vtos);
// assume branch is more often taken than not (loops use backward branches)
! Label taken, not_taken;
__ pop_ptr(r1);
+
+ __ profile_acmp(r2, r1, r0, r4);
+
+ Register is_inline_type_mask = rscratch1;
+ __ mov(is_inline_type_mask, markWord::inline_type_pattern);
+
+ if (EnableValhalla) {
+ __ cmp(r1, r0);
+ __ br(Assembler::EQ, (cc == equal) ? taken : not_taken);
+
+ // might be substitutable, test if either r0 or r1 is null
+ __ andr(r2, r0, r1);
+ __ cbz(r2, (cc == equal) ? not_taken : taken);
+
+ // and both are values ?
+ __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
+ __ andr(r2, r2, is_inline_type_mask);
+ __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes()));
+ __ andr(r4, r4, is_inline_type_mask);
+ __ andr(r2, r2, r4);
+ __ cmp(r2, is_inline_type_mask);
+ __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
+
+ // same value klass ?
+ __ load_metadata(r2, r1);
+ __ load_metadata(r4, r0);
+ __ cmp(r2, r4);
+ __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
+
+ // Know both are the same type, let's test for substitutability...
+ if (cc == equal) {
+ invoke_is_substitutable(r0, r1, taken, not_taken);
+ } else {
+ invoke_is_substitutable(r0, r1, not_taken, taken);
+ }
+ __ stop("Not reachable");
+ }
+
__ cmpoop(r1, r0);
__ br(j_not(cc), not_taken);
+ __ bind(taken);
branch(false, false);
__ bind(not_taken);
! __ profile_not_taken_branch(r0, true);
+ }
+
+ void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
+ Label& is_subst, Label& not_subst) {
+
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
+ // Restored... r0 answer, jmp to outcome...
+ __ cbz(r0, not_subst);
+ __ b(is_subst);
}
+
void TemplateTable::ret() {
transition(vtos, vtos);
locals_index(r1);
__ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
__ profile_ret(r1, r2);
__ verify_oop(r);
}
void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
{
! const Register cache = r4;
const Register obj = r4;
const Register index = r3;
const Register tos_state = r3;
const Register off = r19;
const Register flags = r6;
const Register bc = r4; // uses same reg as obj, so don't mix them
resolve_cache_and_index_for_field(byte_no, cache, index);
jvmti_post_field_access(cache, index, is_static, false);
load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
if (!is_static) {
// obj is on the stack
pop_and_check_object(obj);
__ verify_oop(r);
}
void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
{
! const Register cache = r2;
const Register obj = r4;
+ const Register klass = r5;
+ const Register inline_klass = r7;
+ const Register field_index = r23;
const Register index = r3;
const Register tos_state = r3;
const Register off = r19;
const Register flags = r6;
const Register bc = r4; // uses same reg as obj, so don't mix them
resolve_cache_and_index_for_field(byte_no, cache, index);
jvmti_post_field_access(cache, index, is_static, false);
+
+ // Valhalla extras
+ __ load_unsigned_short(field_index, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset())));
+ __ ldr(klass, Address(cache, ResolvedFieldEntry::field_holder_offset()));
+
load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
if (!is_static) {
// obj is on the stack
pop_and_check_object(obj);
__ bind(notBool);
__ cmp(tos_state, (u1)atos);
__ br(Assembler::NE, notObj);
// atos
! do_oop_load(_masm, field, r0, IN_HEAP);
! __ push(atos);
! if (rc == may_rewrite) {
! patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
}
- __ b(Done);
__ bind(notObj);
__ cmp(tos_state, (u1)itos);
__ br(Assembler::NE, notInt);
// itos
__ bind(notBool);
__ cmp(tos_state, (u1)atos);
__ br(Assembler::NE, notObj);
// atos
! if (!EnableValhalla) {
! do_oop_load(_masm, field, r0, IN_HEAP);
! __ push(atos);
! if (rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
+ }
+ __ b(Done);
+ } else { // Valhalla
+ if (is_static) {
+ __ load_heap_oop(r0, field, rscratch1, rscratch2);
+ Label is_null_free_inline_type, uninitialized;
+ // Issue below if the static field has not been initialized yet
+ __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_null_free_inline_type);
+ // field is not a null free inline type
+ __ push(atos);
+ __ b(Done);
+ // field is a null free inline type, must not return null even if uninitialized
+ __ bind(is_null_free_inline_type);
+ __ cbz(r0, uninitialized);
+ __ push(atos);
+ __ b(Done);
+ __ bind(uninitialized);
+ Label slow_case, finish;
+ __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));
+ __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized);
+ __ br(Assembler::NE, slow_case);
+ __ get_default_value_oop(klass, off /* temp */, r0);
+ __ b(finish);
+ __ bind(slow_case);
+ __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_inline_type_field), obj, cache);
+ __ bind(finish);
+ __ verify_oop(r0);
+ __ push(atos);
+ __ b(Done);
+ } else {
+ Label is_flat, nonnull, is_inline_type, has_null_marker, rewrite_inline;
+ __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type);
+ __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker);
+ // Non-inline field case
+ __ load_heap_oop(r0, field, rscratch1, rscratch2);
+ __ push(atos);
+ if (rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
+ }
+ __ b(Done);
+ __ bind(is_inline_type);
+ __ test_field_is_flat(flags, noreg /* temp */, is_flat);
+ // field is not flat
+ __ load_heap_oop(r0, field, rscratch1, rscratch2);
+ __ cbnz(r0, nonnull);
+ __ get_inline_type_field_klass(klass, field_index, inline_klass);
+ __ get_default_value_oop(inline_klass, klass /* temp */, r0);
+ __ bind(nonnull);
+ __ verify_oop(r0);
+ __ push(atos);
+ __ b(rewrite_inline);
+ __ bind(is_flat);
+ // field is flat
+ __ mov(r0, obj);
+ __ read_flat_field(klass, field_index, off, inline_klass /* temp */, r0);
+ __ verify_oop(r0);
+ __ push(atos);
+ __ b(rewrite_inline);
+ __ bind(has_null_marker);
+ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), obj, cache);
+ __ verify_oop(r0);
+ __ push(atos);
+ __ bind(rewrite_inline);
+ if (rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_vgetfield, bc, r1);
+ }
+ __ b(Done);
+ }
}
__ bind(notObj);
__ cmp(tos_state, (u1)itos);
__ br(Assembler::NE, notInt);
// itos
const Register cache = r2;
const Register index = r3;
const Register tos_state = r3;
const Register obj = r2;
const Register off = r19;
! const Register flags = r0;
const Register bc = r4;
resolve_cache_and_index_for_field(byte_no, cache, index);
jvmti_post_field_mod(cache, index, is_static);
load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
Label Done;
- __ mov(r5, flags);
-
{
Label notVolatile;
! __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
__ bind(notVolatile);
}
// field address
const Register cache = r2;
const Register index = r3;
const Register tos_state = r3;
const Register obj = r2;
const Register off = r19;
! const Register flags = r6;
const Register bc = r4;
+ const Register inline_klass = r5;
resolve_cache_and_index_for_field(byte_no, cache, index);
jvmti_post_field_mod(cache, index, is_static);
load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
Label Done;
{
Label notVolatile;
! __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
__ bind(notVolatile);
}
// field address
__ cmp(tos_state, (u1)atos);
__ br(Assembler::NE, notObj);
// atos
{
! __ pop(atos);
! if (!is_static) pop_and_check_object(obj);
! // Store into the field
! do_oop_store(_masm, field, r0, IN_HEAP);
! if (rc == may_rewrite) {
! patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
! }
! __ b(Done);
}
__ bind(notObj);
__ cmp(tos_state, (u1)itos);
__ br(Assembler::NE, notInt);
__ cmp(tos_state, (u1)atos);
__ br(Assembler::NE, notObj);
// atos
{
! if (!EnableValhalla) {
! __ pop(atos);
! if (!is_static) pop_and_check_object(obj);
! // Store into the field
! do_oop_store(_masm, field, r0, IN_HEAP);
! if (rc == may_rewrite) {
! patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
! }
+ __ b(Done);
+ } else { // Valhalla
+ __ pop(atos);
+ if (is_static) {
+ Label is_inline_type;
+ __ test_field_is_not_null_free_inline_type(flags, noreg /* temp */, is_inline_type);
+ __ null_check(r0);
+ __ bind(is_inline_type);
+ do_oop_store(_masm, field, r0, IN_HEAP);
+ __ b(Done);
+ } else {
+ Label is_inline_type, is_flat, has_null_marker, rewrite_not_inline, rewrite_inline;
+ __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type);
+ __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker);
+ // Not an inline type
+ pop_and_check_object(obj);
+ // Store into the field
+ do_oop_store(_masm, field, r0, IN_HEAP);
+ __ bind(rewrite_not_inline);
+ if (rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no);
+ }
+ __ b(Done);
+ // Implementation of the inline type semantic
+ __ bind(is_inline_type);
+ __ null_check(r0);
+ __ test_field_is_flat(flags, noreg /*temp*/, is_flat);
+ // field is not flat
+ pop_and_check_object(obj);
+ // Store into the field
+ do_oop_store(_masm, field, r0, IN_HEAP);
+ __ b(rewrite_inline);
+ __ bind(is_flat);
+ // field is flat
+ pop_and_check_object(obj);
+ assert_different_registers(r0, inline_klass, obj, off);
+ __ load_klass(inline_klass, r0);
+ __ data_for_oop(r0, r0, inline_klass);
+ __ add(obj, obj, off);
+ __ access_value_copy(IN_HEAP, r0, obj, inline_klass);
+ __ b(rewrite_inline);
+ __ bind(has_null_marker);
+ assert_different_registers(r0, cache, r19);
+ pop_and_check_object(r19);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r19, r0, cache);
+ __ bind(rewrite_inline);
+ if (rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_vputfield, bc, r19, true, byte_no);
+ }
+ __ b(Done);
+ }
+ } // Valhalla
}
__ bind(notObj);
__ cmp(tos_state, (u1)itos);
__ br(Assembler::NE, notInt);
__ bind(Done);
{
Label notVolatile;
! __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
__ bind(notVolatile);
}
}
__ bind(Done);
{
Label notVolatile;
! __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
__ bind(notVolatile);
}
}
__ push_ptr(r19); // put the object pointer back on tos
// Save tos values before call_VM() clobbers them. Since we have
// to do it for every data type, we use the saved values as the
// jvalue object.
switch (bytecode()) { // load values into the jvalue object
+ case Bytecodes::_fast_vputfield: //fall through
case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
case Bytecodes::_fast_bputfield: // fall through
case Bytecodes::_fast_zputfield: // fall through
case Bytecodes::_fast_sputfield: // fall through
case Bytecodes::_fast_cputfield: // fall through
CAST_FROM_FN_PTR(address,
InterpreterRuntime::post_field_modification),
r19, c_rarg2, c_rarg3);
switch (bytecode()) { // restore tos values
+ case Bytecodes::_fast_vputfield: //fall through
case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
case Bytecodes::_fast_bputfield: // fall through
case Bytecodes::_fast_zputfield: // fall through
case Bytecodes::_fast_sputfield: // fall through
case Bytecodes::_fast_cputfield: // fall through
// field address
const Address field(r2, r1);
// access field
switch (bytecode()) {
+ case Bytecodes::_fast_vputfield:
+ {
+ Label is_flat, has_null_marker, done;
+ __ test_field_has_null_marker(r3, noreg /* temp */, has_null_marker);
+ __ null_check(r0);
+ __ test_field_is_flat(r3, noreg /* temp */, is_flat);
+ // field is not flat
+ do_oop_store(_masm, field, r0, IN_HEAP);
+ __ b(done);
+ __ bind(is_flat);
+ // field is flat
+ __ load_klass(r4, r0);
+ __ data_for_oop(r0, r0, r4);
+ __ lea(rscratch1, field);
+ __ access_value_copy(IN_HEAP, r0, rscratch1, r4);
+ __ b(done);
+ __ bind(has_null_marker);
+ __ load_field_entry(r4, r1);
+ __ mov(r1, r2);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r1, r0, r4);
+ __ bind(done);
+ }
+ break;
case Bytecodes::_fast_aputfield:
do_oop_store(_masm, field, r0, IN_HEAP);
break;
case Bytecodes::_fast_lputfield:
__ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
__ bind(notVolatile);
}
// access field
switch (bytecode()) {
+ case Bytecodes::_fast_vgetfield:
+ {
+ Register index = r4, klass = r5, inline_klass = r6, tmp = r7;
+ Label is_flat, has_null_marker, nonnull, Done;
+ __ test_field_has_null_marker(r3, noreg /*temp*/, has_null_marker);
+ __ test_field_is_flat(r3, noreg /* temp */, is_flat);
+ // field is not flat
+ __ load_heap_oop(r0, field, rscratch1, rscratch2);
+ __ cbnz(r0, nonnull);
+ __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset())));
+ __ ldr(klass, Address(r2, in_bytes(ResolvedFieldEntry::field_holder_offset())));
+ __ get_inline_type_field_klass(klass, index, inline_klass);
+ __ get_default_value_oop(inline_klass, tmp /* temp */, r0);
+ __ bind(nonnull);
+ __ verify_oop(r0);
+ __ b(Done);
+ __ bind(is_flat);
+ // field is flat
+ __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset())));
+ __ ldr(klass, Address(r2, in_bytes(ResolvedFieldEntry::field_holder_offset())));
+ __ read_flat_field(klass, index, r1, tmp /* temp */, r0);
+ __ verify_oop(r0);
+ __ b(Done);
+ __ bind(has_null_marker);
+ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), r0, r2);
+ __ verify_oop(r0);
+ __ bind(Done);
+ }
+ break;
case Bytecodes::_fast_agetfield:
do_oop_load(_masm, field, r0, IN_HEAP);
__ verify_oop(r0);
break;
case Bytecodes::_fast_lgetfield:
// make sure klass is initialized
assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks");
__ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case);
! // get instance_size in InstanceKlass (scaled to a count of bytes)
! __ ldrw(r3,
- Address(r4,
- Klass::layout_helper_offset()));
- // test to see if it is malformed in some way
- __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
-
- // Allocate the instance:
- // If TLAB is enabled:
- // Try to allocate in the TLAB.
- // If fails, go to the slow path.
- // Initialize the allocation.
- // Exit.
- //
- // Go to slow path.
-
- if (UseTLAB) {
- __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
-
- if (ZeroTLAB) {
- // the fields have been already cleared
- __ b(initialize_header);
- }
-
- // The object is initialized before the header. If the object size is
- // zero, go directly to the header initialization.
- __ sub(r3, r3, sizeof(oopDesc));
- __ cbz(r3, initialize_header);
-
- // Initialize object fields
- {
- __ add(r2, r0, sizeof(oopDesc));
- Label loop;
- __ bind(loop);
- __ str(zr, Address(__ post(r2, BytesPerLong)));
- __ sub(r3, r3, BytesPerLong);
- __ cbnz(r3, loop);
- }
-
- // initialize object header only.
- __ bind(initialize_header);
- __ mov(rscratch1, (intptr_t)markWord::prototype().value());
- __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
- __ store_klass_gap(r0, zr); // zero klass gap for compressed oops
- __ store_klass(r0, r4); // store klass last
-
- {
- SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
- // Trigger dtrace event for fastpath
- __ push(atos); // save the return value
- __ call_VM_leaf(
- CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), r0);
- __ pop(atos); // restore the return value
-
- }
- __ b(done);
- }
// slow case
__ bind(slow_case);
__ get_constant_pool(c_rarg1);
__ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
// make sure klass is initialized
assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks");
__ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case);
! __ allocate_instance(r4, r0, r3, r1, true, slow_case);
! __ b(done);
// slow case
__ bind(slow_case);
__ get_constant_pool(c_rarg1);
__ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
// Come here on success
__ bind(ok_is_subtype);
__ mov(r0, r3); // Restore object in r3
// Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) {
- __ b(done);
- __ bind(is_null);
__ profile_null_seen(r2);
- } else {
- __ bind(is_null); // same as 'done'
}
__ bind(done);
}
void TemplateTable::instanceof() {
transition(atos, itos);
// Come here on success
__ bind(ok_is_subtype);
__ mov(r0, r3); // Restore object in r3
+ __ b(done);
+ __ bind(is_null);
+
// Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) {
__ profile_null_seen(r2);
}
+
__ bind(done);
}
void TemplateTable::instanceof() {
transition(atos, itos);
transition(atos, vtos);
// check for null object
__ null_check(r0);
+ Label is_inline_type;
+ __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
+ __ test_markword_is_inline_type(rscratch1, is_inline_type);
+
const Address monitor_block_top(
rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
__ generate_stack_overflow_check(0);
// The bcp has already been incremented. Just need to dispatch to
// next instruction.
__ dispatch_next(vtos);
+
+ __ bind(is_inline_type);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::throw_identity_exception), r0);
+ __ should_not_reach_here();
}
void TemplateTable::monitorexit()
{
transition(atos, vtos);
// check for null object
__ null_check(r0);
+ const int is_inline_type_mask = markWord::inline_type_pattern;
+ Label has_identity;
+ __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
+ __ mov(rscratch2, is_inline_type_mask);
+ __ andr(rscratch1, rscratch1, rscratch2);
+ __ cmp(rscratch1, rscratch2);
+ __ br(Assembler::NE, has_identity);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::throw_illegal_monitor_state_exception));
+ __ should_not_reach_here();
+ __ bind(has_identity);
+
const Address monitor_block_top(
rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
< prev index next >