< prev index next >

src/hotspot/share/c1/c1_LIRAssembler.cpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -29,12 +29,14 @@
 #include "c1/c1_InstructionPrinter.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciInstance.hpp"
+#include "ci/ciValueKlass.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
 
 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
   // We must have enough patching space so that call can be inserted.
   // We cannot use fat nops here, since the concurrent code rewrite may transiently
   // create the illegal instruction sequence.

@@ -57,10 +59,11 @@
         ShouldNotReachHere();
     }
   } else if (patch->id() == PatchingStub::load_klass_id) {
     switch (code) {
       case Bytecodes::_new:
+      case Bytecodes::_defaultvalue:
       case Bytecodes::_anewarray:
       case Bytecodes::_multianewarray:
       case Bytecodes::_instanceof:
       case Bytecodes::_checkcast:
         break;

@@ -113,10 +116,11 @@
 
 LIR_Assembler::~LIR_Assembler() {
   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
   // Reset it here to avoid an assertion.
   _unwind_handler_entry.reset();
+  _verified_value_entry.reset();
 }
 
 
 void LIR_Assembler::check_codespace() {
   CodeSection* cs = _masm->code_section();

@@ -334,13 +338,13 @@
     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
   }
 }
 
 
-void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
+void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
   flush_debug_info(pc_offset);
-  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
+  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
   if (cinfo->exception_handlers() != NULL) {
     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   }
 }
 

@@ -479,10 +483,16 @@
   // Record if this method has MethodHandle invokes.
   if (op->is_method_handle_invoke()) {
     compilation()->set_has_method_handle_invokes(true);
   }
 
+  ciValueKlass* vk;
+  if (op->maybe_return_as_fields(&vk)) {
+    int offset = store_value_type_fields_to_buf(vk);
+    add_call_info(offset, op->info(), true);
+  }
+
 #if defined(X86) && defined(TIERED)
   // C2 leave fpu stack dirty clean it
   if (UseSSE < 2) {
     int i;
     for ( i = 1; i <= 7 ; i++ ) {

@@ -592,10 +602,146 @@
       Unimplemented();
       break;
   }
 }
 
+void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
+  flush_debug_info(pc_offset);
+  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
+  // The VEP and VVEP(RO) of a C1-compiled method call buffer_value_args_xxx()
+  // before doing any argument shuffling. This call may cause GC. When GC happens,
+  // all the parameters are still as passed by the caller, so we just use
+  // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
+  // There's no need to build a GC map here.
+  OopMap* oop_map = new OopMap(0, 0);
+  debug_info->add_safepoint(pc_offset, oop_map);
+  DebugToken* locvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??)
+  DebugToken* expvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??)
+  DebugToken* monvals = debug_info->create_monitor_values(NULL); // FIXME: need testing with synchronized method
+  bool reexecute = false;
+  bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
+  bool rethrow_exception = false;
+  bool is_method_handle_invoke = false;
+  debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
+  debug_info->end_safepoint(pc_offset);
+}
+
+// The entries points of C1-compiled methods can have the following types:
+// (1) Methods with no value args
+// (2) Methods with value receiver but no value args
+//     VVEP_RO is the same as VVEP
+// (3) Methods with non-value receiver and some value args
+//     VVEP_RO is the same as VEP
+// (4) Methods with value receiver and other value args
+//     Separate VEP, VVEP and VVEP_RO
+//
+// (1)               (2)                 (3)                    (4)
+// UEP/UVEP:         VEP:                UEP:                   UEP:
+//   check_icache      pack receiver       check_icache           check_icache
+// VEP/VVEP/VVEP_RO  UEP/UVEP:           VEP/VVEP_RO:           VVEP_RO:
+//   body              check_icache        pack value args        pack value args (except receiver)
+//                   VVEP/VVEP_RO        UVEP:                  VEP:
+//                     body                check_icache           pack all value args
+//                                       VVEP:                  UVEP:
+//                                         body                   check_icache
+//                                                              VVEP:
+//                                                                body
+//
+// Note: after packing, we jump to the method body.
+void LIR_Assembler::emit_std_entries() {
+  offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
+
+  const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
+
+  _masm->align(CodeEntryAlignment);
+
+  if (ces->has_scalarized_args()) {
+    assert(ValueTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
+
+    CodeOffsets::Entries ro_entry_type = ces->c1_value_ro_entry_type();
+
+    if (ro_entry_type != CodeOffsets::Verified_Value_Entry) {
+      // This is the UEP. It will fall-through to VEP or VVEP(RO)
+      offsets()->set_value(CodeOffsets::Entry, _masm->offset());
+      if (needs_icache(compilation()->method())) {
+        check_icache();
+      }
+    }
+
+    if (ro_entry_type == CodeOffsets::Verified_Value_Entry_RO) {
+      // VVEP(RO) = pack all value parameters, except the <this> object.
+      add_scalarized_entry_info(emit_std_entry(CodeOffsets::Verified_Value_Entry_RO, ces));
+    }
+
+    // VEP = pack all value parameters
+    _masm->align(CodeEntryAlignment);
+    add_scalarized_entry_info(emit_std_entry(CodeOffsets::Verified_Entry, ces));
+
+    _masm->align(CodeEntryAlignment);
+    // This is the UVEP. It will fall-through to VVEP.
+    offsets()->set_value(CodeOffsets::Value_Entry, _masm->offset());
+    if (ro_entry_type == CodeOffsets::Verified_Value_Entry) {
+      // Special case if we have VVEP == VVEP(RO):
+      // this means UVEP (called by C1) == UEP (called by C2).
+      offsets()->set_value(CodeOffsets::Entry, _masm->offset());
+    }
+
+    if (needs_icache(compilation()->method())) {
+      check_icache();
+    }
+    // VVEP = all value parameters are passed as refs - no packing.
+    emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);
+
+    if (ro_entry_type != CodeOffsets::Verified_Value_Entry_RO) {
+      // The VVEP(RO) is the same as VEP or VVEP
+      assert(ro_entry_type == CodeOffsets::Verified_Entry ||
+             ro_entry_type == CodeOffsets::Verified_Value_Entry, "must be");
+      offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO,
+                           offsets()->value(ro_entry_type));
+    }
+  } else {
+    // All 3 entries are the same (no value-type packing)
+    offsets()->set_value(CodeOffsets::Entry, _masm->offset());
+    offsets()->set_value(CodeOffsets::Value_Entry, _masm->offset());
+    if (needs_icache(compilation()->method())) {
+      check_icache();
+    }
+    int offset = emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);
+    offsets()->set_value(CodeOffsets::Verified_Entry, offset);
+    offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, offset);
+  }
+}
+
+int LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
+  offsets()->set_value(entry, _masm->offset());
+  int offset = _masm->offset();
+  switch (entry) {
+  case CodeOffsets::Verified_Entry:
+    offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry);
+    if (needs_clinit_barrier_on_entry(compilation()->method())) {
+      clinit_barrier(compilation()->method());
+    }
+    return offset;
+  case CodeOffsets::Verified_Value_Entry_RO:
+    offset = _masm->verified_value_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry);
+    if (needs_clinit_barrier_on_entry(compilation()->method())) {
+      clinit_barrier(compilation()->method());
+    }
+    return offset;
+  default:
+    {
+      assert(entry == CodeOffsets::Verified_Value_Entry, "must be");
+      _masm->verified_value_entry();
+      if (needs_clinit_barrier_on_entry(compilation()->method())) {
+        clinit_barrier(compilation()->method());
+      }
+      build_frame();
+      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
+      return offset;
+    }
+  }
+}
 
 void LIR_Assembler::emit_op0(LIR_Op0* op) {
   switch (op->code()) {
     case lir_word_align: {
       _masm->align(BytesPerWord);

@@ -614,23 +760,11 @@
     case lir_build_frame:
       build_frame();
       break;
 
     case lir_std_entry:
-      // init offsets
-      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
-      _masm->align(CodeEntryAlignment);
-      if (needs_icache(compilation()->method())) {
-        check_icache();
-      }
-      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
-      _masm->verified_entry();
-      if (needs_clinit_barrier_on_entry(compilation()->method())) {
-        clinit_barrier(compilation()->method());
-      }
-      build_frame();
-      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
+      emit_std_entries();
       break;
 
     case lir_osr_entry:
       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
       osr_entry();

@@ -779,11 +913,13 @@
   }
 }
 
 
 void LIR_Assembler::build_frame() {
-  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
+  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(),
+                     compilation()->compiled_entry_signature()->c1_needs_stack_repair(),
+                     &_verified_value_entry);
 }
 
 
 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
   assert((src->is_single_fpu() && dest->is_single_stack()) ||
< prev index next >