< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page
*** 26,10 ***
--- 26,11 ---
  #include "jvm.h"
  #include "asm/assembler.hpp"
  #include "asm/assembler.inline.hpp"
  #include "compiler/compiler_globals.hpp"
  #include "compiler/disassembler.hpp"
+ #include "ci/ciInlineKlass.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/barrierSetAssembler.hpp"
  #include "gc/shared/collectedHeap.inline.hpp"
  #include "gc/shared/tlab_globals.hpp"
  #include "interpreter/bytecodeHistogram.hpp"

*** 46,14 ***
--- 47,19 ---
  #include "runtime/objectMonitor.hpp"
  #include "runtime/os.hpp"
  #include "runtime/safepoint.hpp"
  #include "runtime/safepointMechanism.hpp"
  #include "runtime/sharedRuntime.hpp"
+ #include "runtime/signature_cc.hpp"
  #include "runtime/stubRoutines.hpp"
  #include "runtime/thread.hpp"
  #include "utilities/macros.hpp"
+ #include "vmreg_x86.inline.hpp"
  #include "crc32c.h"
+ #ifdef COMPILER2
+ #include "opto/output.hpp"
+ #endif
  
  #ifdef PRODUCT
  #define BLOCK_COMMENT(str) /* nothing */
  #define STOP(error) stop(error)
  #else

*** 1648,10 ***
--- 1654,14 ---
    pass_arg1(this, arg_1);
    pass_arg0(this, arg_0);
    call_VM_leaf(entry_point, 3);
  }
  
+ void MacroAssembler::super_call_VM_leaf(address entry_point) {
+   MacroAssembler::call_VM_leaf_base(entry_point, 1);
+ }
+ 
  void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
    pass_arg0(this, arg_0);
    MacroAssembler::call_VM_leaf_base(entry_point, 1);
  }
  

*** 2724,10 ***
--- 2734,150 ---
      // nothing to do, (later) access of M[reg + offset]
      // will provoke OS NULL exception if reg = NULL
    }
  }
  
+ void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
+   andptr(markword, markWord::inline_type_mask_in_place);
+   cmpptr(markword, markWord::inline_type_pattern);
+   jcc(Assembler::equal, is_inline_type);
+ }
+ 
+ void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
+   movl(temp_reg, Address(klass, Klass::access_flags_offset()));
+   testl(temp_reg, JVM_ACC_INLINE);
+   jcc(Assembler::notZero, is_inline_type);
+ }
+ 
+ void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
+   testptr(object, object);
+   jcc(Assembler::equal, not_inline_type);
+   const int is_inline_type_mask = markWord::inline_type_pattern;
+   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
+   andptr(tmp, is_inline_type_mask);
+   cmpptr(tmp, is_inline_type_mask);
+   jcc(Assembler::notEqual, not_inline_type);
+ }
+ 
+ void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
+ #ifdef ASSERT
+   {
+     Label done_check;
+     test_klass_is_inline_type(klass, temp_reg, done_check);
+     stop("test_klass_is_empty_inline_type with non inline type klass");
+     bind(done_check);
+   }
+ #endif
+   movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
+   testl(temp_reg, InstanceKlass::misc_flags_is_empty_inline_type());
+   jcc(Assembler::notZero, is_empty_inline_type);
+ }
+ 
+ void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
+   movl(temp_reg, flags);
+   shrl(temp_reg, ConstantPoolCacheEntry::is_null_free_inline_type_shift);
+   andl(temp_reg, 0x1);
+   testl(temp_reg, temp_reg);
+   jcc(Assembler::notZero, is_null_free_inline_type);
+ }
+ 
+ void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
+   movl(temp_reg, flags);
+   shrl(temp_reg, ConstantPoolCacheEntry::is_null_free_inline_type_shift);
+   andl(temp_reg, 0x1);
+   testl(temp_reg, temp_reg);
+   jcc(Assembler::zero, not_null_free_inline_type);
+ }
+ 
+ void MacroAssembler::test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined) {
+   movl(temp_reg, flags);
+   shrl(temp_reg, ConstantPoolCacheEntry::is_inlined_shift);
+   andl(temp_reg, 0x1);
+   testl(temp_reg, temp_reg);
+   jcc(Assembler::notZero, is_inlined);
+ }
+ 
+ void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
+   Label test_mark_word;
+   // load mark word
+   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
+   // check displaced
+   testl(temp_reg, markWord::unlocked_value);
+   jccb(Assembler::notZero, test_mark_word);
+   // slow path use klass prototype
+   push(rscratch1);
+   load_prototype_header(temp_reg, oop, rscratch1);
+   pop(rscratch1);
+ 
+   bind(test_mark_word);
+   testl(temp_reg, test_bit);
+   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
+ }
+ 
+ void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg,
+                                               Label&is_flattened_array) {
+ #ifdef _LP64
+   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flattened_array);
+ #else
+   load_klass(temp_reg, oop, noreg);
+   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
+   test_flattened_array_layout(temp_reg, is_flattened_array);
+ #endif
+ }
+ 
+ void MacroAssembler::test_non_flattened_array_oop(Register oop, Register temp_reg,
+                                                   Label&is_non_flattened_array) {
+ #ifdef _LP64
+   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flattened_array);
+ #else
+   load_klass(temp_reg, oop, noreg);
+   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
+   test_non_flattened_array_layout(temp_reg, is_non_flattened_array);
+ #endif
+ }
+ 
+ void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
+ #ifdef _LP64
+   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
+ #else
+   load_klass(temp_reg, oop, noreg);
+   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
+   test_null_free_array_layout(temp_reg, is_null_free_array);
+ #endif
+ }
+ 
+ void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
+ #ifdef _LP64
+   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
+ #else
+   load_klass(temp_reg, oop, noreg);
+   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
+   test_non_null_free_array_layout(temp_reg, is_non_null_free_array);
+ #endif
+ }
+ 
+ void MacroAssembler::test_flattened_array_layout(Register lh, Label& is_flattened_array) {
+   testl(lh, Klass::_lh_array_tag_vt_value_bit_inplace);
+   jcc(Assembler::notZero, is_flattened_array);
+ }
+ 
+ void MacroAssembler::test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array) {
+   testl(lh, Klass::_lh_array_tag_vt_value_bit_inplace);
+   jcc(Assembler::zero, is_non_flattened_array);
+ }
+ 
+ void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) {
+   testl(lh, Klass::_lh_null_free_bit_inplace);
+   jcc(Assembler::notZero, is_null_free_array);
+ }
+ 
+ void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) {
+   testl(lh, Klass::_lh_null_free_bit_inplace);
+   jcc(Assembler::zero, is_non_null_free_array);
+ }
+ 
+ 
  void MacroAssembler::os_breakpoint() {
    // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
    // (e.g., MSVC can't call ps() otherwise)
    call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
  }

*** 3550,10 ***
--- 3700,129 ---
  
  void MacroAssembler::testptr(Register dst, Register src) {
    LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
  }
  
+ // Object / value buffer allocation...
+ //
+ // Kills klass and rsi on LP64
+ void MacroAssembler::allocate_instance(Register klass, Register new_obj,
+                                        Register t1, Register t2,
+                                        bool clear_fields, Label& alloc_failed)
+ {
+   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
+   Register layout_size = t1;
+   assert(new_obj == rax, "needs to be rax, according to barrier asm eden_allocate");
+   assert_different_registers(klass, new_obj, t1, t2);
+ 
+   // get instance_size in InstanceKlass (scaled to a count of bytes)
+   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
+   // test to see if it has a finalizer or is malformed in some way
+   testl(layout_size, Klass::_lh_instance_slow_path_bit);
+   jcc(Assembler::notZero, slow_case_no_pop);
+ 
+   // Allocate the instance:
+   //  If TLAB is enabled:
+   //    Try to allocate in the TLAB.
+   //    If fails, go to the slow path.
+   //  Else If inline contiguous allocations are enabled:
+   //    Try to allocate in eden.
+   //    If fails due to heap end, go to slow path.
+   //
+   //  If TLAB is enabled OR inline contiguous is enabled:
+   //    Initialize the allocation.
+   //    Exit.
+   //
+   //  Go to slow path.
+   const bool allow_shared_alloc =
+     Universe::heap()->supports_inline_contig_alloc();
+ 
+   push(klass);
+   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass);
+ #ifndef _LP64
+   if (UseTLAB || allow_shared_alloc) {
+     get_thread(thread);
+   }
+ #endif // _LP64
+ 
+   if (UseTLAB) {
+     tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
+     if (ZeroTLAB || (!clear_fields)) {
+       // the fields have been already cleared
+       jmp(initialize_header);
+     } else {
+       // initialize both the header and fields
+       jmp(initialize_object);
+     }
+   } else {
+     // Allocation in the shared Eden, if allowed.
+     //
+     eden_allocate(thread, new_obj, layout_size, 0, t2, slow_case);
+   }
+ 
+   // If UseTLAB or allow_shared_alloc are true, the object is created above and
+   // there is an initialize need. Otherwise, skip and go to the slow path.
+   if (UseTLAB || allow_shared_alloc) {
+     if (clear_fields) {
+       // The object is initialized before the header.  If the object size is
+       // zero, go directly to the header initialization.
+       bind(initialize_object);
+       decrement(layout_size, sizeof(oopDesc));
+       jcc(Assembler::zero, initialize_header);
+ 
+       // Initialize topmost object field, divide size by 8, check if odd and
+       // test if zero.
+       Register zero = klass;
+       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
+       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
+ 
+   #ifdef ASSERT
+       // make sure instance_size was multiple of 8
+       Label L;
+       // Ignore partial flag stall after shrl() since it is debug VM
+       jcc(Assembler::carryClear, L);
+       stop("object size is not multiple of 2 - adjust this code");
+       bind(L);
+       // must be > 0, no extra check needed here
+   #endif
+ 
+       // initialize remaining object fields: instance_size was a multiple of 8
+       {
+         Label loop;
+         bind(loop);
+         movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero);
+         NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero));
+         decrement(layout_size);
+         jcc(Assembler::notZero, loop);
+       }
+     } // clear_fields
+ 
+     // initialize object header only.
+     bind(initialize_header);
+     pop(klass);
+     Register mark_word = t2;
+     movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
+     movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
+ #ifdef _LP64
+     xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
+     store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
+ #endif
+     movptr(t2, klass);         // preserve klass
+     Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+     store_klass(new_obj, t2, tmp_store_klass);  // src klass reg is potentially compressed
+ 
+     jmp(done);
+   }
+ 
+   bind(slow_case);
+   pop(klass);
+   bind(slow_case_no_pop);
+   jmp(alloc_failed);
+ 
+   bind(done);
+ }
+ 
  // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
  void MacroAssembler::tlab_allocate(Register thread, Register obj,
                                     Register var_size_in_bytes,
                                     int con_size_in_bytes,
                                     Register t1,

*** 3627,10 ***
--- 3896,60 ---
    }
  
    bind(done);
  }
  
+ void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
+   movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
+ #ifdef ASSERT
+   {
+     Label done;
+     cmpptr(inline_klass, 0);
+     jcc(Assembler::notEqual, done);
+     stop("get_inline_type_field_klass contains no inline klass");
+     bind(done);
+   }
+ #endif
+   movptr(inline_klass, Address(inline_klass, index, Address::times_ptr));
+ }
+ 
+ void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
+ #ifdef ASSERT
+   {
+     Label done_check;
+     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
+     stop("get_default_value_oop from non inline type klass");
+     bind(done_check);
+   }
+ #endif
+   Register offset = temp_reg;
+   // Getting the offset of the pre-allocated default value
+   movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
+   movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
+ 
+   // Getting the mirror
+   movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
+   resolve_oop_handle(obj, inline_klass);
+ 
+   // Getting the pre-allocated default value from the mirror
+   Address field(obj, offset, Address::times_1);
+   load_heap_oop(obj, field);
+ }
+ 
+ void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
+ #ifdef ASSERT
+   {
+     Label done_check;
+     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
+     stop("get_empty_value from non-empty inline klass");
+     bind(done_check);
+   }
+ #endif
+   get_default_value_oop(inline_klass, temp_reg, obj);
+ }
+ 
+ 
  // Look up the method for a megamorphic invokeinterface call.
  // The target method is determined by <intf_klass, itable_index>.
  // The receiver klass is in recv_klass.
  // On success, the result will be in method_result, and execution falls through.
  // On failure, execution transfers to the given label.

*** 3975,11 ***
      bind(L);
    }
  }
  
  void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
!   if (!VerifyOops) return;
  
    // Pass register number to verify_oop_subroutine
    const char* b = NULL;
    {
      ResourceMark rm;
--- 4294,15 ---
      bind(L);
    }
  }
  
  void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
!   if (!VerifyOops || VerifyAdapterSharing) {
+     // Below address of the code string confuses VerifyAdapterSharing
+     // because it may differ between otherwise equivalent adapters.
+     return;
+   }
  
    // Pass register number to verify_oop_subroutine
    const char* b = NULL;
    {
      ResourceMark rm;

*** 4034,11 ***
    offset += wordSize;           // return PC is on stack
    return Address(rsp, scale_reg, scale_factor, offset);
  }
  
  void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
!   if (!VerifyOops) return;
  
    // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
    // Pass register number to verify_oop_subroutine
    const char* b = NULL;
    {
--- 4357,15 ---
    offset += wordSize;           // return PC is on stack
    return Address(rsp, scale_reg, scale_factor, offset);
  }
  
  void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
!   if (!VerifyOops || VerifyAdapterSharing) {
+     // Below address of the code string confuses VerifyAdapterSharing
+     // because it may differ between otherwise equivalent adapters.
+     return;
+   }
  
    // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
    // Pass register number to verify_oop_subroutine
    const char* b = NULL;
    {

*** 4536,20 ***
    movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
    movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
    movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
  }
  
  void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
    if (UseCompressedClassPointers) {
      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
      decode_klass_not_null(dst, tmp);
    } else
  #endif
!     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
  }
  
  void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
--- 4863,33 ---
    movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
    movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
    movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
  }
  
+ void MacroAssembler::load_metadata(Register dst, Register src) {
+   if (UseCompressedClassPointers) {
+     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+   } else {
+     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+   }
+ }
+ 
  void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);
  #ifdef _LP64
    if (UseCompressedClassPointers) {
      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
      decode_klass_not_null(dst, tmp);
    } else
  #endif
!   movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ }
+ 
+ void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
+   load_klass(dst, src, tmp);
+   movptr(dst, Address(dst, Klass::prototype_header_offset()));
  }
  
  void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
    assert_different_registers(src, tmp);
    assert_different_registers(dst, tmp);

*** 4573,21 ***
      bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
    }
  }
  
  void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
!                                      Register tmp1, Register tmp2) {
    BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
    decorators = AccessInternal::decorator_fixup(decorators);
    bool as_raw = (decorators & AS_RAW) != 0;
    if (as_raw) {
!     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2);
    } else {
!     bs->store_at(this, decorators, type, dst, src, tmp1, tmp2);
    }
  }
  
  void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
                                     Register thread_tmp, DecoratorSet decorators) {
    access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
  }
  
--- 4913,61 ---
      bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
    }
  }
  
  void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
!                                      Register tmp1, Register tmp2, Register tmp3) {
    BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
    decorators = AccessInternal::decorator_fixup(decorators);
    bool as_raw = (decorators & AS_RAW) != 0;
    if (as_raw) {
!     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
    } else {
!     bs->store_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
    }
  }
  
+ void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
+                                        Register inline_klass) {
+   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+   bs->value_copy(this, decorators, src, dst, inline_klass);
+ }
+ 
+ void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
+   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
+   movl(offset, Address(offset, InlineKlass::first_field_offset_offset()));
+ }
+ 
+ void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
+   // ((address) (void*) o) + vk->first_field_offset();
+   Register offset = (data == oop) ? rscratch1 : data;
+   first_field_offset(inline_klass, offset);
+   if (data == oop) {
+     addptr(data, offset);
+   } else {
+     lea(data, Address(oop, offset));
+   }
+ }
+ 
+ void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
+                                                 Register index, Register data) {
+   assert(index != rcx, "index needs to shift by rcx");
+   assert_different_registers(array, array_klass, index);
+   assert_different_registers(rcx, array, index);
+ 
+   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
+   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
+ 
+   // Klass::layout_helper_log2_element_size(lh)
+   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
+   shrl(rcx, Klass::_lh_log2_element_size_shift);
+   andl(rcx, Klass::_lh_log2_element_size_mask);
+   shlptr(index); // index << rcx
+ 
+   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_INLINE_TYPE)));
+ }
+ 
  void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
                                     Register thread_tmp, DecoratorSet decorators) {
    access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
  }
  

*** 4596,17 ***
                                              Register thread_tmp, DecoratorSet decorators) {
    access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
  }
  
  void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
!                                     Register tmp2, DecoratorSet decorators) {
!   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
  }
  
  // Used for storing NULLs.
  void MacroAssembler::store_heap_oop_null(Address dst) {
!   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg);
  }
  
  #ifdef _LP64
  void MacroAssembler::store_klass_gap(Register dst, Register src) {
    if (UseCompressedClassPointers) {
--- 4976,17 ---
                                              Register thread_tmp, DecoratorSet decorators) {
    access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
  }
  
  void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
!                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
!   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2, tmp3);
  }
  
  // Used for storing NULLs.
  void MacroAssembler::store_heap_oop_null(Address dst) {
!   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
  }
  
  #ifdef _LP64
  void MacroAssembler::store_klass_gap(Register dst, Register src) {
    if (UseCompressedClassPointers) {

*** 4916,12 ***
    }
  }
  
  #endif // _LP64
  
  // C2 compiled method's prolog code.
! void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub) {
  
    // WARNING: Initial instruction MUST be 5 bytes or longer so that
    // NativeJump::patch_verified_entry will be able to patch out the entry
    // code safely. The push to verify stack depth is ok at 5 bytes,
    // the frame allocation can be either 3 or 6 bytes. So if we don't do
--- 5296,17 ---
    }
  }
  
  #endif // _LP64
  
+ #ifdef COMPILER2
  // C2 compiled method's prolog code.
! void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
+   int framesize = C->output()->frame_size_in_bytes();
+   int bangsize = C->output()->bang_size_in_bytes();
+   bool fp_mode_24b = false;
+   int stack_bang_size = C->output()->need_stack_bang(bangsize) ? bangsize : 0;
  
    // WARNING: Initial instruction MUST be 5 bytes or longer so that
    // NativeJump::patch_verified_entry will be able to patch out the entry
    // code safely. The push to verify stack depth is ok at 5 bytes,
    // the frame allocation can be either 3 or 6 bytes. So if we don't do

*** 4970,10 ***
--- 5355,16 ---
          addptr(rbp, framesize);
        }
      }
    }
  
+   if (C->needs_stack_repair()) {
+     // Save stack increment just below the saved rbp (also account for fixed framesize and rbp)
+     assert((sp_inc & (StackAlignmentInBytes-1)) == 0, "stack increment not aligned");
+     movptr(Address(rsp, framesize - wordSize), sp_inc + framesize + wordSize);
+   }
+ 
    if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
      framesize -= wordSize;
      movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
    }
  

*** 4998,31 ***
      jcc(Assembler::equal, L);
      STOP("Stack is not properly aligned!");
      bind(L);
    }
  #endif
- 
-   if (!is_stub) {
-     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
-     bs->nmethod_entry_barrier(this);
-   }
  }
  
  #if COMPILER2_OR_JVMCI
  
  // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
! void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
    // cnt - number of qwords (8-byte words).
    // base - start address, qword aligned.
    Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
    bool use64byteVector = MaxVectorSize == 64 && AVX3Threshold == 0;
    if (use64byteVector) {
!     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
    } else if (MaxVectorSize >= 32) {
!     vpxor(xtmp, xtmp, xtmp, AVX_256bit);
    } else {
!     pxor(xtmp, xtmp);
    }
    jmp(L_zero_64_bytes);
  
    BIND(L_loop);
    if (MaxVectorSize >= 32) {
--- 5389,30 ---
      jcc(Assembler::equal, L);
      STOP("Stack is not properly aligned!");
      bind(L);
    }
  #endif
  }
+ #endif // COMPILER2
  
  #if COMPILER2_OR_JVMCI
  
  // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
! void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
    // cnt - number of qwords (8-byte words).
    // base - start address, qword aligned.
    Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
    bool use64byteVector = MaxVectorSize == 64 && AVX3Threshold == 0;
    if (use64byteVector) {
!     evpbroadcastq(xtmp, val, AVX_512bit);
    } else if (MaxVectorSize >= 32) {
!     movdq(xtmp, val);
+     punpcklqdq(xtmp, xtmp);
+     vinserti128_high(xtmp, xtmp);
    } else {
!     movdq(xtmp, val);
+     punpcklqdq(xtmp, xtmp);
    }
    jmp(L_zero_64_bytes);
  
    BIND(L_loop);
    if (MaxVectorSize >= 32) {

*** 5041,11 ***
  
    // Copy trailing 64 bytes
    if (use64byteVector) {
      addptr(cnt, 8);
      jccb(Assembler::equal, L_end);
!     fill64_masked_avx(3, base, 0, xtmp, mask, cnt, rtmp, true);
      jmp(L_end);
    } else {
      addptr(cnt, 4);
      jccb(Assembler::less, L_tail);
      if (MaxVectorSize >= 32) {
--- 5431,11 ---
  
    // Copy trailing 64 bytes
    if (use64byteVector) {
      addptr(cnt, 8);
      jccb(Assembler::equal, L_end);
!     fill64_masked_avx(3, base, 0, xtmp, mask, cnt, val, true);
      jmp(L_end);
    } else {
      addptr(cnt, 4);
      jccb(Assembler::less, L_tail);
      if (MaxVectorSize >= 32) {

*** 5060,11 ***
  
    BIND(L_tail);
    addptr(cnt, 4);
    jccb(Assembler::lessEqual, L_end);
    if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
!     fill32_masked_avx(3, base, 0, xtmp, mask, cnt, rtmp);
    } else {
      decrement(cnt);
  
      BIND(L_sloop);
      movq(Address(base, 0), xtmp);
--- 5450,11 ---
  
    BIND(L_tail);
    addptr(cnt, 4);
    jccb(Assembler::lessEqual, L_end);
    if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
!     fill32_masked_avx(3, base, 0, xtmp, mask, cnt, val);
    } else {
      decrement(cnt);
  
      BIND(L_sloop);
      movq(Address(base, 0), xtmp);

*** 5073,10 ***
--- 5463,328 ---
      jccb(Assembler::greaterEqual, L_sloop);
    }
    BIND(L_end);
  }
  
+ int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
+   // An inline type might be returned. If fields are in registers we
+   // need to allocate an inline type instance and initialize it with
+   // the value of the fields.
+   Label skip;
+   // We only need a new buffered inline type if a new one is not returned
+   testptr(rax, 1);
+   jcc(Assembler::zero, skip);
+   int call_offset = -1;
+ 
+ #ifdef _LP64
+   // The following code is similar to allocate_instance but has some slight differences,
+   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
+   // allocating is not necessary if vk != NULL, etc. allocate_instance is not aware of these.
+   Label slow_case;
+   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
+   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
+   if (vk != NULL) {
+     // Called from C1, where the return type is statically known.
+     movptr(rbx, (intptr_t)vk->get_InlineKlass());
+     jint obj_size = vk->layout_helper();
+     assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
+     if (UseTLAB) {
+       tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case);
+     } else {
+       eden_allocate(r15_thread, rax, noreg, obj_size, r13, slow_case);
+     }
+   } else {
+     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
+     mov(rbx, rax);
+     andptr(rbx, -2);
+     movl(r14, Address(rbx, Klass::layout_helper_offset()));
+     if (UseTLAB) {
+       tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
+     } else {
+       eden_allocate(r15_thread, rax, r14, 0, r13, slow_case);
+     }
+   }
+   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
+     // 2. Initialize buffered inline instance header
+     Register buffer_obj = rax;
+     movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
+     xorl(r13, r13);
+     store_klass_gap(buffer_obj, r13);
+     if (vk == NULL) {
+       // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
+       mov(r13, rbx);
+     }
+     Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+     store_klass(buffer_obj, rbx, tmp_store_klass);
+     // 3. Initialize its fields with an inline class specific handler
+     if (vk != NULL) {
+       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
+     } else {
+       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
+       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
+       call(rbx);
+     }
+     jmp(skip);
+   }
+   bind(slow_case);
+   // We failed to allocate a new inline type, fall back to a runtime
+   // call. Some oop field may be live in some registers but we can't
+   // tell. That runtime call will take care of preserving them
+   // across a GC if there's one.
+   mov(rax, rscratch1);
+ #endif
+ 
+   if (from_interpreter) {
+     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
+   } else {
+     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
+     call_offset = offset();
+   }
+ 
+   bind(skip);
+   return call_offset;
+ }
+ 
+ // Move a value between registers/stack slots and update the reg_state
+ bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
+   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
+   if (reg_state[to->value()] == reg_written) {
+     return true; // Already written
+   }
+   if (from != to && bt != T_VOID) {
+     if (reg_state[to->value()] == reg_readonly) {
+       return false; // Not yet writable
+     }
+     if (from->is_reg()) {
+       if (to->is_reg()) {
+         if (from->is_XMMRegister()) {
+           if (bt == T_DOUBLE) {
+             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
+           } else {
+             assert(bt == T_FLOAT, "must be float");
+             movflt(to->as_XMMRegister(), from->as_XMMRegister());
+           }
+         } else {
+           movq(to->as_Register(), from->as_Register());
+         }
+       } else {
+         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+         Address to_addr = Address(rsp, st_off);
+         if (from->is_XMMRegister()) {
+           if (bt == T_DOUBLE) {
+             movdbl(to_addr, from->as_XMMRegister());
+           } else {
+             assert(bt == T_FLOAT, "must be float");
+             movflt(to_addr, from->as_XMMRegister());
+           }
+         } else {
+           movq(to_addr, from->as_Register());
+         }
+       }
+     } else {
+       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
+       if (to->is_reg()) {
+         if (to->is_XMMRegister()) {
+           if (bt == T_DOUBLE) {
+             movdbl(to->as_XMMRegister(), from_addr);
+           } else {
+             assert(bt == T_FLOAT, "must be float");
+             movflt(to->as_XMMRegister(), from_addr);
+           }
+         } else {
+           movq(to->as_Register(), from_addr);
+         }
+       } else {
+         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+         movq(r13, from_addr);
+         movq(Address(rsp, st_off), r13);
+       }
+     }
+   }
+   // Update register states
+   reg_state[from->value()] = reg_writable;
+   reg_state[to->value()] = reg_written;
+   return true;
+ }
+ 
+ // Calculate the extra stack space required for packing or unpacking inline
+ // args and adjust the stack pointer
+ int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
+   // Two additional slots to account for return address
+   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
+   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
+   // Save the return address, adjust the stack (make sure it is properly
+   // 16-byte aligned) and copy the return address to the new top of the stack.
+   // The stack will be repaired on return (see MacroAssembler::remove_frame).
+   assert(sp_inc > 0, "sanity");
+   pop(r13);
+   subptr(rsp, sp_inc);
+   push(r13);
+   return sp_inc;
+ }
+ 
+ // Read all fields from an inline type buffer and store the field values in registers/stack slots.
+ bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
+                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
+                                           RegState reg_state[]) {
+   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
+   assert(from->is_valid(), "source must be valid");
+   Register fromReg;
+   if (from->is_reg()) {
+     fromReg = from->as_Register();
+   } else {
+     int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+     movq(r10, Address(rsp, st_off));
+     fromReg = r10;
+   }
+ 
+   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
+   bool done = true;
+   bool mark_done = true;
+   VMReg toReg;
+   BasicType bt;
+   while (stream.next(toReg, bt)) {
+     assert(toReg->is_valid(), "destination must be valid");
+     int off = sig->at(stream.sig_index())._offset;
+     assert(off > 0, "offset in object should be positive");
+     Address fromAddr = Address(fromReg, off);
+ 
+     int idx = (int)toReg->value();
+     if (reg_state[idx] == reg_readonly) {
+      if (idx != from->value()) {
+        mark_done = false;
+      }
+      done = false;
+      continue;
+     } else if (reg_state[idx] == reg_written) {
+       continue;
+     } else {
+       assert(reg_state[idx] == reg_writable, "must be writable");
+       reg_state[idx] = reg_written;
+     }
+ 
+     if (!toReg->is_XMMRegister()) {
+       Register dst = toReg->is_stack() ? r13 : toReg->as_Register();
+       if (is_reference_type(bt)) {
+         load_heap_oop(dst, fromAddr);
+       } else {
+         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
+         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
+       }
+       if (toReg->is_stack()) {
+         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+         movq(Address(rsp, st_off), dst);
+       }
+     } else if (bt == T_DOUBLE) {
+       movdbl(toReg->as_XMMRegister(), fromAddr);
+     } else {
+       assert(bt == T_FLOAT, "must be float");
+       movflt(toReg->as_XMMRegister(), fromAddr);
+     }
+   }
+   sig_index = stream.sig_index();
+   to_index = stream.regs_index();
+ 
+   if (mark_done && reg_state[from->value()] != reg_written) {
+     // This is okay because no one else will write to that slot
+     reg_state[from->value()] = reg_writable;
+   }
+   from_index--;
+   return done;
+ }
+ 
+ bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
+                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
+                                         RegState reg_state[], Register val_array) {
+   assert(sig->at(sig_index)._bt == T_INLINE_TYPE, "should be at end delimiter");
+   assert(to->is_valid(), "destination must be valid");
+ 
+   if (reg_state[to->value()] == reg_written) {
+     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
+     return true; // Already written
+   }
+ 
+   Register val_obj_tmp = r11;
+   Register from_reg_tmp = r14; // Be careful with r14 because it's used for spilling
+   Register tmp1 = r10;
+   Register tmp2 = r13;
+   Register tmp3 = rbx;
+   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
+ 
+   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
+ 
+   if (reg_state[to->value()] == reg_readonly) {
+     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
+       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
+       return false; // Not yet writable
+     }
+     val_obj = val_obj_tmp;
+   }
+ 
+   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_INLINE_TYPE);
+   load_heap_oop(val_obj, Address(val_array, index));
+ 
+   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
+   VMReg fromReg;
+   BasicType bt;
+   while (stream.next(fromReg, bt)) {
+     assert(fromReg->is_valid(), "source must be valid");
+     int off = sig->at(stream.sig_index())._offset;
+     assert(off > 0, "offset in object should be positive");
+     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
+ 
+     Address dst(val_obj, off);
+     if (!fromReg->is_XMMRegister()) {
+       Register src;
+       if (fromReg->is_stack()) {
+         src = from_reg_tmp;
+         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
+       } else {
+         src = fromReg->as_Register();
+       }
+       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
+       if (is_reference_type(bt)) {
+         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
+       } else {
+         store_sized_value(dst, src, size_in_bytes);
+       }
+     } else if (bt == T_DOUBLE) {
+       movdbl(dst, fromReg->as_XMMRegister());
+     } else {
+       assert(bt == T_FLOAT, "must be float");
+       movflt(dst, fromReg->as_XMMRegister());
+     }
+     reg_state[fromReg->value()] = reg_writable;
+   }
+   sig_index = stream.sig_index();
+   from_index = stream.regs_index();
+ 
+   assert(reg_state[to->value()] == reg_writable, "must have already been read");
+   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
+   assert(success, "to register must be writeable");
+   return true;
+ }
+ 
+ VMReg MacroAssembler::spill_reg_for(VMReg reg) {
+   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
+ }
+ 
+ void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
+   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
+   if (needs_stack_repair) {
+     movq(rbp, Address(rsp, initial_framesize));
+     // The stack increment resides just below the saved rbp
+     addq(rsp, Address(rsp, initial_framesize - wordSize));
+   } else {
+     if (initial_framesize > 0) {
+       addq(rsp, initial_framesize);
+     }
+     pop(rbp);
+   }
+ }
+ 
  // Clearing constant sized memory using YMM/ZMM registers.
  void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
    assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
    bool use64byteVector = MaxVectorSize > 32 && AVX3Threshold == 0;
  

*** 5144,25 ***
          break;
      }
    }
  }
  
! void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
!                                bool is_large, KRegister mask) {
    // cnt      - number of qwords (8-byte words).
    // base     - start address, qword aligned.
    // is_large - if optimizers know cnt is larger than InitArrayShortSize
    assert(base==rdi, "base register must be edi for rep stos");
!   assert(tmp==rax,   "tmp register must be eax for rep stos");
    assert(cnt==rcx,   "cnt register must be ecx for rep stos");
    assert(InitArrayShortSize % BytesPerLong == 0,
      "InitArrayShortSize should be the multiple of BytesPerLong");
  
    Label DONE;
-   if (!is_large || !UseXMMForObjInit) {
-     xorptr(tmp, tmp);
-   }
  
    if (!is_large) {
      Label LOOP, LONG;
      cmpptr(cnt, InitArrayShortSize/BytesPerLong);
      jccb(Assembler::greater, LONG);
--- 5852,22 ---
          break;
      }
    }
  }
  
! void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
!                                bool is_large, bool word_copy_only, KRegister mask) {
    // cnt      - number of qwords (8-byte words).
    // base     - start address, qword aligned.
    // is_large - if optimizers know cnt is larger than InitArrayShortSize
    assert(base==rdi, "base register must be edi for rep stos");
!   assert(val==rax,   "val register must be eax for rep stos");
    assert(cnt==rcx,   "cnt register must be ecx for rep stos");
    assert(InitArrayShortSize % BytesPerLong == 0,
      "InitArrayShortSize should be the multiple of BytesPerLong");
  
    Label DONE;
  
    if (!is_large) {
      Label LOOP, LONG;
      cmpptr(cnt, InitArrayShortSize/BytesPerLong);
      jccb(Assembler::greater, LONG);

*** 5172,24 ***
      decrement(cnt);
      jccb(Assembler::negative, DONE); // Zero length
  
      // Use individual pointer-sized stores for small counts:
      BIND(LOOP);
!     movptr(Address(base, cnt, Address::times_ptr), tmp);
      decrement(cnt);
      jccb(Assembler::greaterEqual, LOOP);
      jmpb(DONE);
  
      BIND(LONG);
    }
  
    // Use longer rep-prefixed ops for non-small counts:
!   if (UseFastStosb) {
      shlptr(cnt, 3); // convert to number of bytes
      rep_stosb();
    } else if (UseXMMForObjInit) {
!     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
    } else {
      NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
      rep_stos();
    }
  
--- 5877,24 ---
      decrement(cnt);
      jccb(Assembler::negative, DONE); // Zero length
  
      // Use individual pointer-sized stores for small counts:
      BIND(LOOP);
!     movptr(Address(base, cnt, Address::times_ptr), val);
      decrement(cnt);
      jccb(Assembler::greaterEqual, LOOP);
      jmpb(DONE);
  
      BIND(LONG);
    }
  
    // Use longer rep-prefixed ops for non-small counts:
!   if (UseFastStosb && !word_copy_only) {
      shlptr(cnt, 3); // convert to number of bytes
      rep_stosb();
    } else if (UseXMMForObjInit) {
!     xmm_clear_mem(base, cnt, val, xtmp, mask);
    } else {
      NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
      rep_stos();
    }
  
< prev index next >