< prev index next > src/hotspot/cpu/aarch64/aarch64.ad
Print this page
_NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
// r27 is not allocatable when compressed oops is on and heapbase is not
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
- if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
+ if (UseCompressedOops && (CompressedOops::ptrs_base() != nullptr)) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
}
bool needs_releasing_store(const Node *n)
{
// assert n->is_Store();
StoreNode *st = n->as_Store();
- return st->trailing_membar() != NULL;
+ return st->trailing_membar() != nullptr;
}
// predicate controlling translation of CAS
//
// returns true if CAS needs to use an acquiring load otherwise false
bool needs_acquiring_load_exclusive(const Node *n)
{
assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
LoadStoreNode* ldst = n->as_LoadStore();
if (is_CAS(n->Opcode(), false)) {
- assert(ldst->trailing_membar() != NULL, "expected trailing membar");
+ assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
} else {
- return ldst->trailing_membar() != NULL;
+ return ldst->trailing_membar() != nullptr;
}
// so we can just return true here
return true;
}
// stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
// blr(rscratch1)
CodeBlob *cb = CodeCache::find_blob(_entry_point);
if (cb) {
return 1 * NativeInstruction::instruction_size;
+ } else if (_entry_point == nullptr) {
+ // See CallLeafNoFPIndirect
+ return 1 * NativeInstruction::instruction_size;
} else {
return 6 * NativeInstruction::instruction_size;
}
}
st->print("stp lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
if (PreserveFramePointer) st->print("mov rfp, sp\n\t");
st->print("mov rscratch1, #%d\n\t", framesize - 2 * wordSize);
st->print("sub sp, sp, rscratch1");
}
- if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
+ if (C->stub_function() == nullptr && BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
st->print("\n\t");
st->print("ldr rscratch1, [guard]\n\t");
st->print("dmb ishld\n\t");
st->print("ldr rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
st->print("cmp rscratch1, rscratch2\n\t");
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
- // n.b. frame size includes space for return pc and rfp
- const int framesize = C->output()->frame_size_in_bytes();
-
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
__ nop();
- if (C->clinit_barrier_on_entry()) {
- assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
-
- Label L_skip_barrier;
+ __ verified_entry(C, 0);
- __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
- __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
+ if (C->stub_function() == nullptr) {
+ __ entry_barrier();
}
- if (C->max_vector_size() > 0) {
- __ reinitialize_ptrue();
- }
-
- int bangsize = C->output()->bang_size_in_bytes();
- if (C->output()->need_stack_bang(bangsize))
- __ generate_stack_overflow_check(bangsize);
-
- __ build_frame(framesize);
-
- if (C->stub_function() == NULL) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
- // Dummy labels for just measuring the code size
- Label dummy_slow_path;
- Label dummy_continuation;
- Label dummy_guard;
- Label* slow_path = &dummy_slow_path;
- Label* continuation = &dummy_continuation;
- Label* guard = &dummy_guard;
- if (!Compile::current()->output()->in_scratch_emit_size()) {
- // Use real labels from actual stub when not emitting code for the purpose of measuring its size
- C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
- Compile::current()->output()->add_stub(stub);
- slow_path = &stub->entry();
- continuation = &stub->continuation();
- guard = &stub->guard();
- }
- // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
- bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
- }
+ if (!Compile::current()->output()->in_scratch_emit_size()) {
+ __ bind(*_verified_entry);
}
if (VerifyStackAtCalls) {
Unimplemented();
}
ConstantTable& constant_table = C->output()->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
}
- uint MachPrologNode::size(PhaseRegAlloc* ra_) const
- {
- return MachNode::size(ra_); // too many variables; just compute it
- // the hard way
- }
-
int MachPrologNode::reloc() const
{
return 0;
}
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_slots() << LogBytesPerInt;
- __ remove_frame(framesize);
+ __ remove_frame(framesize, C->needs_stack_repair());
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
}
__ relocate(relocInfo::poll_return_type);
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
}
}
- uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
- // Variable size. Determine dynamically.
- return MachNode::size(ra_);
- }
-
int MachEpilogNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // 1 for polling page.
}
#ifndef PRODUCT
void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
if (!ra_)
st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
else
- implementation(NULL, ra_, false, st);
+ implementation(nullptr, ra_, false, st);
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation(&cbuf, ra_, false, NULL);
+ implementation(&cbuf, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
} else {
return 2 * NativeInstruction::instruction_size;
}
}
- //=============================================================================
+ ///=============================================================================
+ #ifndef PRODUCT
+ void MachVEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
+ {
+ st->print_cr("# MachVEPNode");
+ if (!_verified) {
+ st->print_cr("\t load_class");
+ } else {
+ st->print_cr("\t unpack_inline_arg");
+ }
+ }
+ #endif
+
+ void MachVEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+ {
+ C2_MacroAssembler _masm(&cbuf);
+
+ if (!_verified) {
+ Label skip;
+ __ cmp_klass(j_rarg0, rscratch2, rscratch1);
+ __ br(Assembler::EQ, skip);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ __ bind(skip);
+ } else {
+ // insert a nop at the start of the prolog so we can patch in a
+ // branch if we need to invalidate the method later
+ __ nop();
+
+ // TODO 8284443 Avoid creation of temporary frame
+ if (ra_->C->stub_function() == nullptr) {
+ __ verified_entry(ra_->C, 0);
+ __ entry_barrier();
+ int framesize = ra_->C->output()->frame_slots() << LogBytesPerInt;
+ __ remove_frame(framesize, false);
+ }
+ // Unpack inline type args passed as oop and then jump to
+ // the verified entry point (skipping the unverified entry).
+ int sp_inc = __ unpack_inline_args(ra_->C, _receiver_only);
+ // Emit code for verified entry and save increment for stack repair on return
+ __ verified_entry(ra_->C, sp_inc);
+ if (Compile::current()->output()->in_scratch_emit_size()) {
+ Label dummy_verified_entry;
+ __ b(dummy_verified_entry);
+ } else {
+ __ b(*_verified_entry);
+ }
+ }
+ }
+
+ //=============================================================================
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
+ Label skip;
+ // UseCompressedClassPointers logic are inside cmp_klass
__ cmp_klass(j_rarg0, rscratch2, rscratch1);
- Label skip;
+
// TODO
// can we avoid this skip and still use a reloc?
__ br(Assembler::EQ, skip);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(skip);
}
- uint MachUEPNode::size(PhaseRegAlloc* ra_) const
- {
- return MachNode::size(ra_);
- }
-
// REQUIRED EMIT CODE
//=============================================================================
// Emit exception handler code.
// br rscratch1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
- if (base == NULL) {
+ if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
- if (base == NULL) {
+ if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
case Op_VecA: return new vecAOper();
case Op_VecD: return new vecDOper();
case Op_VecX: return new vecXOper();
}
ShouldNotReachHere();
- return NULL;
+ return nullptr;
}
bool Matcher::is_reg2reg_move(MachNode* m) {
return false;
}
return result;
}
// Binary src (Replicate con)
bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
- if (n == NULL || m == NULL) {
+ if (n == nullptr || m == nullptr) {
return false;
}
if (UseSVE == 0 || !VectorNode::is_invariant_vector(m)) {
return false;
}
// (XorV src (Replicate m1))
// (XorVMask src (MaskAll m1))
bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
- if (n != NULL && m != NULL) {
+ if (n != nullptr && m != nullptr) {
return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
VectorNode::is_all_ones_vector(m);
}
return false;
}
enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
- if (con == NULL || con == (address)1) {
+ if (con == nullptr || con == (address)1) {
ShouldNotReachHere();
} else {
relocInfo::relocType rtype = $src->constant_reloc();
if (rtype == relocInfo::oop_type) {
__ movoop(dst_reg, (jobject)con);
enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
- if (con == NULL) {
+ if (con == nullptr) {
ShouldNotReachHere();
} else {
relocInfo::relocType rtype = $src->constant_reloc();
assert(rtype == relocInfo::oop_type, "unexpected reloc type");
__ set_narrow_oop(dst_reg, (jobject)con);
enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
- if (con == NULL) {
+ if (con == nullptr) {
ShouldNotReachHere();
} else {
relocInfo::relocType rtype = $src->constant_reloc();
assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
__ set_narrow_klass(dst_reg, (Klass *)con);
Register result_reg = as_Register($result$$reg);
Label miss;
C2_MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
- NULL, &miss,
+ nullptr, &miss,
/*set_cond_codes:*/ true);
if ($primary) {
__ mov(result_reg, zr);
}
__ bind(miss);
address addr = (address)$meth$$method;
address call;
if (!_method) {
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
- if (call == NULL) {
+ if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
// The NOP here is purely to ensure that eliding a call to
} else {
int method_index = resolved_method_index(cbuf);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
call = __ trampoline_call(Address(addr, rspec));
- if (call == NULL) {
+ if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
- if (stub == NULL) {
+ if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
}
enc_class aarch64_enc_java_dynamic_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
int method_index = resolved_method_index(cbuf);
address call = __ ic_call((address)$meth$$method, method_index);
- if (call == NULL) {
+ if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
__ post_call_nop();
if (Compile::current()->max_vector_size() > 0) {
C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
__ call_Unimplemented();
}
+ if (tf()->returns_inline_type_as_fields() && !_method->is_method_handle_intrinsic()) {
+ if (!_method->signature()->returns_null_free_inline_type()) {
+ // The last return value is not set by the callee but used to pass IsInit information to compiled code.
+ // Search for the corresponding projection, get the register and emit code that initialized it.
+ uint con = (tf()->range_cc()->cnt() - 1);
+ for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+ ProjNode* proj = fast_out(i)->as_Proj();
+ if (proj->_con == con) {
+ // Set IsInit if r0 is non-null (a non-null value is returned buffered or scalarized)
+ OptoReg::Name optoReg = ra_->get_reg_first(proj);
+ VMReg reg = OptoReg::as_VMReg(optoReg, ra_->_framesize, OptoReg::reg2stack(ra_->_matcher._new_SP));
+ Register toReg = reg->is_reg() ? reg->as_Register() : rscratch1;
+ __ cmp(r0, zr);
+ __ cset(toReg, Assembler::NE);
+ if (reg->is_stack()) {
+ int st_off = reg->reg2stack() * VMRegImpl::stack_slot_size;
+ __ str(toReg, Address(sp, st_off));
+ }
+ break;
+ }
+ }
+ }
+ if (return_value_is_used()) {
+ // An inline type is returned as fields in multiple registers.
+ // R0 either contains an oop if the inline type is buffered or a pointer
+ // to the corresponding InlineKlass with the lowest bit set to 1. Zero r0
+ // if the lowest bit is set to allow C2 to use the oop after null checking.
+ // r0 &= (r0 & 1) - 1
+ __ andr(rscratch1, r0, 0x1);
+ __ sub(rscratch1, rscratch1, 0x1);
+ __ andr(r0, r0, rscratch1);
+ }
+ }
%}
enc_class aarch64_enc_java_to_runtime(method meth) %{
C2_MacroAssembler _masm(&cbuf);
// which loads the absolute address into a register.
address entry = (address)$meth$$method;
CodeBlob *cb = CodeCache::find_blob(entry);
if (cb) {
address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
- if (call == NULL) {
+ if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
__ post_call_nop();
} else {
__ b(cont);
} else if (LockingMode == LM_LEGACY) {
// Set tmp to be (markWord of object | UNLOCK_VALUE).
__ orr(tmp, disp_hdr, markWord::unlocked_value);
+ if (EnableValhalla) {
+ // Mask inline_type bit such that we go to the slow path if object is an inline type
+ __ andr(tmp, tmp, ~((int) markWord::inline_type_bit_in_place));
+ }
+
// Initialize the box. (Must happen before we update the object mark!)
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// Compare object markWord with an unlocked value (tmp) and if
// equal exchange the stack address of our box with object markWord.
}
// Handle existing monitor.
__ bind(object_has_monitor);
- // The object's monitor m is unlocked iff m->owner == NULL,
+ // The object's monitor m is unlocked iff m->owner == nullptr,
// otherwise m->owner may contain a thread or a stack address.
//
- // Try to CAS m->owner from NULL to current thread.
+ // Try to CAS m->owner from nullptr to current thread.
__ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
__ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
if (LockingMode != LM_LIGHTWEIGHT) {
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
- // NULL Pointer Immediate
+ // nullptr Pointer Immediate
operand immP0()
%{
predicate(n->get_ptr() == 0);
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
- // Narrow NULL Pointer Immediate
+ // Narrow nullptr Pointer Immediate
operand immN0()
%{
predicate(n->get_narrowcon() == 0);
match(ConN);
%{
match(Set dst con);
ins_cost(INSN_COST * 4);
format %{
- "mov $dst, $con\t# ptr\n\t"
+ "mov $dst, $con\t# ptr"
%}
ins_encode(aarch64_enc_mov_p(dst, con));
ins_pipe(ialu_imm);
instruct loadConP0(iRegPNoSp dst, immP0 con)
%{
match(Set dst con);
ins_cost(INSN_COST);
- format %{ "mov $dst, $con\t# NULL ptr" %}
+ format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p0(dst, con));
ins_pipe(ialu_imm);
%}
instruct loadConP1(iRegPNoSp dst, immP_1 con)
%{
match(Set dst con);
ins_cost(INSN_COST);
- format %{ "mov $dst, $con\t# NULL ptr" %}
+ format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p1(dst, con));
ins_pipe(ialu_imm);
%}
instruct loadConN0(iRegNNoSp dst, immN0 con)
%{
match(Set dst con);
ins_cost(INSN_COST);
- format %{ "mov $dst, $con\t# compressed NULL ptr" %}
+ format %{ "mov $dst, $con\t# compressed nullptr ptr" %}
ins_encode(aarch64_enc_mov_n0(dst, con));
ins_pipe(ialu_imm);
%}
%}
ins_pipe(ialu_reg);
%}
+ instruct castN2X(iRegLNoSp dst, iRegN src) %{
+ match(Set dst (CastP2X src));
+
+ ins_cost(INSN_COST);
+ format %{ "mov $dst, $src\t# ptr -> long" %}
+
+ ins_encode %{
+ if ($dst$$reg != $src$$reg) {
+ __ mov(as_Register($dst$$reg), as_Register($src$$reg));
+ }
+ %}
+
+ ins_pipe(ialu_reg);
+ %}
+
instruct castP2X(iRegLNoSp dst, iRegP src) %{
match(Set dst (CastP2X src));
ins_cost(INSN_COST);
format %{ "mov $dst, $src\t# ptr -> long" %}
%}
// ============================================================================
// clearing of an array
- instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
+ instruct clearArray_reg_reg_immL0(iRegL_R11 cnt, iRegP_R10 base, immL0 zero, Universe dummy, rFlagsReg cr)
%{
- match(Set dummy (ClearArray cnt base));
+ match(Set dummy (ClearArray (Binary cnt base) zero));
effect(USE_KILL cnt, USE_KILL base, KILL cr);
ins_cost(4 * INSN_COST);
format %{ "ClearArray $cnt, $base" %}
ins_encode %{
address tpc = __ zero_words($base$$Register, $cnt$$Register);
- if (tpc == NULL) {
+ if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
+ instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, iRegL val, Universe dummy, rFlagsReg cr)
+ %{
+ predicate(((ClearArrayNode*)n)->word_copy_only());
+ match(Set dummy (ClearArray (Binary cnt base) val));
+ effect(USE_KILL cnt, USE_KILL base, KILL cr);
+
+ ins_cost(4 * INSN_COST);
+ format %{ "ClearArray $cnt, $base, $val" %}
+
+ ins_encode %{
+ __ fill_words($base$$Register, $cnt$$Register, $val$$Register);
+ %}
+
+ ins_pipe(pipe_class_memory);
+ %}
+
instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
%{
predicate((uint64_t)n->in(2)->get_long()
- < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
+ < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)
+ && !((ClearArrayNode*)n)->word_copy_only());
match(Set dummy (ClearArray cnt base));
effect(TEMP temp, USE_KILL base, KILL cr);
ins_cost(4 * INSN_COST);
format %{ "ClearArray $cnt, $base" %}
ins_encode %{
address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
- if (tpc == NULL) {
+ if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_call);
%}
// Call Runtime Instruction
+ // entry point is null, target holds the address to call
+ instruct CallLeafNoFPIndirect(iRegP target)
+ %{
+ predicate(n->as_Call()->entry_point() == nullptr);
+
+ match(CallLeafNoFP target);
+
+ ins_cost(CALL_COST);
+
+ format %{ "CALL, runtime leaf nofp indirect $target" %}
+
+ ins_encode %{
+ __ blr($target$$Register);
+ %}
+
+ ins_pipe(pipe_class_call);
+ %}
+
instruct CallLeafNoFPDirect(method meth)
%{
+ predicate(n->as_Call()->entry_point() != nullptr);
+
match(CallLeafNoFP);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
ins_encode %{
address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
$result$$Register, $tmp$$Register, 1);
- if (tpc == NULL) {
+ if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
ins_encode %{
address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
$result$$Register, $tmp$$Register, 2);
- if (tpc == NULL) {
+ if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
match(Set result (CountPositives ary1 len));
effect(USE_KILL ary1, USE_KILL len, KILL cr);
format %{ "count positives byte[] $ary1,$len -> $result" %}
ins_encode %{
address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
- if (tpc == NULL) {
+ if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe( pipe_slow );
format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
ins_encode %{
address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
$vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
$vtmp2$$FloatRegister, $tmp$$Register);
- if (tpc == NULL) {
+ if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
< prev index next >