< prev index next >
src/cpu/x86/vm/templateTable_x86_32.cpp
Print this page
@@ -34,10 +34,13 @@
#include "prims/methodHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "shenandoahBarrierSetAssembler_x86.hpp"
+#endif
#ifndef CC_INTERP
#define __ _masm->
//----------------------------------------------------------------------------------------------------
@@ -163,10 +166,45 @@
}
__ restore_bcp();
}
break;
+ case BarrierSet::ShenandoahBarrierSet:
+ {
+ // flatten object address if needed
+ // We do it regardless of precise because we need the registers
+ if (obj.index() == noreg && obj.disp() == 0) {
+ if (obj.base() != rdx) {
+ __ movl(rdx, obj.base());
+ }
+ } else {
+ __ leal(rdx, obj);
+ }
+ __ get_thread(rcx);
+ __ save_bcp();
+ if (ShenandoahSATBBarrier) {
+ __ g1_write_barrier_pre(rdx /* obj */,
+ rbx /* pre_val */,
+ rcx /* thread */,
+ rsi /* tmp */,
+ val != noreg /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Do the actual store
+ // noreg means NULL
+ if (val == noreg) {
+ __ movptr(Address(rdx, 0), NULL_WORD);
+ // No post barrier for NULL
+ } else {
+ ShenandoahBarrierSetAssembler::bsasm()->storeval_barrier(_masm, val, rsi);
+ __ movl(Address(rdx, 0), val);
+ }
+ __ restore_bcp();
+
+ }
+ break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
if (val == noreg) {
@@ -666,11 +704,18 @@
void TemplateTable::aaload() {
transition(itos, atos);
// rdx: array
index_check(rdx, rax); // kills rbx,
// rax,: index
+#if INCLUDE_ALL_GCS
+ if (UseShenandoahGC) {
+ // Needs GC barriers
+ __ load_heap_oop(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+ } else
+#endif
__ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+
}
void TemplateTable::baload() {
transition(itos, itos);
@@ -2301,10 +2346,16 @@
__ bind(notInt);
// atos
__ cmpl(flags, atos );
__ jcc(Assembler::notEqual, notObj);
+#if INCLUDE_ALL_GCS
+ if (UseShenandoahGC) {
+ // Needs GC barriers
+ __ load_heap_oop(rax, lo);
+ } else
+#endif
__ movl(rax, lo );
__ push(atos);
if (!is_static) {
patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
}
@@ -2869,11 +2920,20 @@
case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
- case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
+ case Bytecodes::_fast_agetfield:
+#if INCLUDE_ALL_GCS
+ if (UseShenandoahGC) {
+ // Needs GC barriers
+ __ load_heap_oop(rax, lo);
+ } else
+#endif
+ __ movptr(rax, lo);
+ __ verify_oop(rax);
+ break;
default:
ShouldNotReachHere();
}
// Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
@@ -2895,10 +2955,16 @@
__ null_check(rax);
const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
if (state == itos) {
__ movl(rax, lo);
} else if (state == atos) {
+#if INCLUDE_ALL_GCS
+ if (UseShenandoahGC) {
+ // Needs GC barriers
+ __ load_heap_oop(rax, lo);
+ } else
+#endif
__ movptr(rax, lo);
__ verify_oop(rax);
} else if (state == ftos) {
__ fld_s(lo);
} else {
@@ -2950,10 +3016,16 @@
// maybe push appendix to arguments (just before return address)
if (is_invokedynamic || is_invokehandle) {
Label L_no_push;
__ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
+#if INCLUDE_ALL_GCS
+ if (UseShenandoahGC) {
+ // Shenandoah barrier is too large to make short jump.
+ __ jcc(Assembler::zero, L_no_push);
+ } else
+#endif
__ jccb(Assembler::zero, L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ push(rbx);
< prev index next >