< prev index next >

src/hotspot/cpu/arm/macroAssembler_arm.cpp

Print this page

   1 /*
   2  * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2023, Red Hat, Inc.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.

1871   Register tmp2 = R5;
1872 
1873   // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1874   // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1875   // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1876   // before the inline cache check here, and not after
1877   align(end_alignment, offset() + ic_check_size());
1878 
1879   int uep_offset = offset();
1880 
1881   ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1882   ldr(tmp2, Address(Ricklass, CompiledICData::speculated_klass_offset()));
1883   cmp(tmp1, tmp2);
1884 
1885   Label dont;
1886   b(dont, eq);
1887   jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1888   bind(dont);
1889   return uep_offset;
1890 }



































   1 /*
   2  * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2023, Red Hat, Inc.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.

1871   Register tmp2 = R5;
1872 
1873   // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1874   // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1875   // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1876   // before the inline cache check here, and not after
1877   align(end_alignment, offset() + ic_check_size());
1878 
1879   int uep_offset = offset();
1880 
1881   ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1882   ldr(tmp2, Address(Ricklass, CompiledICData::speculated_klass_offset()));
1883   cmp(tmp1, tmp2);
1884 
1885   Label dont;
1886   b(dont, eq);
1887   jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1888   bind(dont);
1889   return uep_offset;
1890 }
1891 
1892 void MacroAssembler::remove_frame(int frame_size_in_bytes) {
1893   add_slow(SP, SP, frame_size_in_bytes);
1894   raw_pop(FP, LR);
1895 }
1896 
1897 // Unimplemented methods for inline types.
1898 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
1899    Unimplemented();
1900 }
1901 
1902 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
1903   Unimplemented();
1904 }
1905 
1906 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1907                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1908                             RegState reg_state[]) {
1909   Unimplemented();
1910 }
1911 
1912 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1913                           VMRegPair* from, int from_count, int& from_index, VMReg to,
1914                           RegState reg_state[], Register val_array) {
1915   Unimplemented();
1916 }
1917 
1918 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
1919   Unimplemented();
1920 }
1921 
1922 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
1923   Unimplemented();
1924 }
< prev index next >