< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp

Print this page

        

@@ -25,11 +25,10 @@
 
 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
 
 #include "asm/assembler.hpp"
-#include "oops/compressedOops.hpp"
 
 // MacroAssembler extends Assembler by frequently used macros.
 //
 // Instructions for which a 'better' code sequence exists depending
 // on arguments should also go in here.

@@ -84,14 +83,14 @@
   bool use_XOR_for_compressed_class_base;
 
  public:
   MacroAssembler(CodeBuffer* code) : Assembler(code) {
     use_XOR_for_compressed_class_base
-      = operand_valid_for_logical_immediate
-           (/*is32*/false, (uint64_t)CompressedKlassPointers::base())
-         && ((uint64_t)CompressedKlassPointers::base()
-             > (1UL << log2_intptr(CompressedKlassPointers::range())));
+      = (operand_valid_for_logical_immediate(false /*is32*/,
+                                             (uint64_t)Universe::narrow_klass_base())
+         && ((uint64_t)Universe::narrow_klass_base()
+             > (1UL << log2_intptr(Universe::narrow_klass_range()))));
   }
 
  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  // The implementation is only non-empty for the InterpreterMacroAssembler,
  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.

@@ -606,11 +605,10 @@
 
   static int patch_oop(address insn_addr, address o);
   static int patch_narrow_klass(address insn_addr, narrowKlass n);
 
   address emit_trampoline_stub(int insts_call_instruction_offset, address target);
-  void emit_static_call_stub();
 
   // The following 4 methods return the offset of the appropriate move instruction
 
   // Support for fast byte/short loading with zero extension (depending on particular CPU)
   int load_unsigned_byte(Register dst, Address src);
< prev index next >