< prev index next >

src/share/vm/c1/c1_Runtime1.cpp

Print this page

        

@@ -56,11 +56,14 @@
 #include "runtime/threadCritical.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/events.hpp"
-
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
+#endif
 
 // Implementation of StubAssembler
 
 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
   _name = name;

@@ -209,17 +212,19 @@
       break;
 
     // All other stubs should have oopmaps
     default:
       assert(oop_maps != NULL, "must have an oopmap");
+      break;
   }
 #endif
 
   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
   sasm->align(BytesPerWord);
   // make sure all code is in code buffer
   sasm->flush();
+
   // create blob - distinguish a few special cases
   CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
                                                  &code,
                                                  CodeOffsets::frame_never_safe,
                                                  sasm->frame_size(),

@@ -799,10 +804,15 @@
 //
 
 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 
+#ifdef AARCH64
+  // AArch64 does not patch C1-generated code.
+  ShouldNotReachHere();
+#endif
+
   ResourceMark rm(thread);
   RegisterMap reg_map(thread, false);
   frame runtime_frame = thread->last_frame();
   frame caller_frame = runtime_frame.sender(&reg_map);
 

@@ -945,11 +955,10 @@
 
     // Return to the now deoptimized frame.
   }
 
   // Now copy code back
-
   {
     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
     //
     // Deoptimization may have happened while we waited for the lock.
     // In that case we don't bother to do any patching we just return

@@ -1128,10 +1137,11 @@
               stub_id == Runtime1::load_appendix_patching_id) {
             relocInfo::relocType rtype =
               (stub_id == Runtime1::load_klass_patching_id) ?
                                    relocInfo::metadata_type :
                                    relocInfo::oop_type;
+
             // update relocInfo to metadata
             nmethod* nm = CodeCache::find_nmethod(instr_pc);
             assert(nm != NULL, "invalid nmethod_pc");
 
             // The old patch site is now a move instruction so update

@@ -1188,10 +1198,11 @@
 // if the calling nmethod was deoptimized. We do this by calling a
 // helper method which does the normal VM transition and when it
 // completes we can check for deoptimization. This simplifies the
 // assembly code in the cpu directories.
 //
+#ifndef TARGET_ARCH_aarch64
 int Runtime1::move_klass_patching(JavaThread* thread) {
 //
 // NOTE: we are still in Java
 //
   Thread* THREAD = thread;

@@ -1272,11 +1283,11 @@
 
   // Return true if calling code is deoptimized
 
   return caller_is_deopted();
 JRT_END
-
+#endif
 
 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
   // for now we just print out the block id
   tty->print("%d ", block_id);
 JRT_END

@@ -1298,10 +1309,17 @@
   // barrier. The assert will fail if this is not the case.
   // Note that we use the non-virtual inlineable variant of write_ref_array.
   BarrierSet* bs = Universe::heap()->barrier_set();
   assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
   assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
+
+#if INCLUDE_ALL_GCS
+  if (UseShenandoahGC) {
+    ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src_addr, dst_addr, length);
+  }
+#endif
+
   if (src == dst) {
     // same object, no check
     bs->write_ref_array_pre(dst_addr, length);
     Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
     bs->write_ref_array((HeapWord*)dst_addr, length);
< prev index next >