< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page
@@ -480,10 +480,11 @@
      case T_BYTE:
      case T_BOOLEAN:
      case T_INT:
      case T_ARRAY:
      case T_OBJECT:
+     case T_INLINE_TYPE:
      case T_ADDRESS:
        if( reg_arg0 == 9999 )  {
          reg_arg0 = i;
          regs[i].set1(rcx->as_VMReg());
        } else if( reg_arg1 == 9999 )  {

@@ -530,10 +531,19 @@
  
    // return value can be odd number of VMRegImpl stack slots make multiple of 2
    return align_up(stack, 2);
  }
  
+ const uint SharedRuntime::java_return_convention_max_int = 1;
+ const uint SharedRuntime::java_return_convention_max_float = 1;
+ int SharedRuntime::java_return_convention(const BasicType *sig_bt,
+                                           VMRegPair *regs,
+                                           int total_args_passed) {
+   Unimplemented();
+   return 0;
+ }
+ 
  // Patch the callers callsite with entry to compiled code if it exists.
  static void patch_callers_callsite(MacroAssembler *masm) {
    Label L;
    __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L);

@@ -591,15 +601,17 @@
    int next_off = st_off - Interpreter::stackElementSize;
    __ movdbl(Address(rsp, next_off), r);
  }
  
  static void gen_c2i_adapter(MacroAssembler *masm,
-                             int total_args_passed,
-                             int comp_args_on_stack,
-                             const BasicType *sig_bt,
+                             const GrowableArray<SigEntry>& sig_extended,
                              const VMRegPair *regs,
-                             Label& skip_fixup) {
+                             Label& skip_fixup,
+                             address start,
+                             OopMapSet*& oop_maps,
+                             int& frame_complete,
+                             int& frame_size_in_words) {
    // Before we get into the guts of the C2I adapter, see if we should be here
    // at all.  We've come from compiled code and are attempting to jump to the
    // interpreter, which means the caller made a static call to get here
    // (vcalls always get a compiled target if there is one).  Check for a
    // compiled target.  If there is one, we need to patch the caller's call.

@@ -617,29 +629,29 @@
  #endif /* COMPILER2 */
  
    // Since all args are passed on the stack, total_args_passed * interpreter_
    // stack_element_size  is the
    // space we need.
-   int extraspace = total_args_passed * Interpreter::stackElementSize;
+   int extraspace = sig_extended.length() * Interpreter::stackElementSize;
  
    // Get return address
    __ pop(rax);
  
    // set senderSP value
    __ movptr(rsi, rsp);
  
    __ subptr(rsp, extraspace);
  
    // Now write the args into the outgoing interpreter space
-   for (int i = 0; i < total_args_passed; i++) {
-     if (sig_bt[i] == T_VOID) {
-       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+   for (int i = 0; i < sig_extended.length(); i++) {
+     if (sig_extended.at(i)._bt == T_VOID) {
+       assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
        continue;
      }
  
      // st_off points to lowest address on stack.
-     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
+     int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize;
      int next_off = st_off - Interpreter::stackElementSize;
  
      // Say 4 args:
      // i   st_off
      // 0   12 T_LONG

@@ -685,11 +697,11 @@
        } else {
          // long/double in gpr
          NOT_LP64(ShouldNotReachHere());
          // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
          // T_DOUBLE and T_LONG use two slots in the interpreter
-         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+         if (sig_extended.at(i)._bt == T_LONG || sig_extended.at(i)._bt == T_DOUBLE) {
            // long/double in gpr
  #ifdef ASSERT
            // Overwrite the unused slot with known junk
            LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
            __ movptr(Address(rsp, st_off), rax);

@@ -702,11 +714,11 @@
      } else {
        assert(r_1->is_XMMRegister(), "");
        if (!r_2->is_valid()) {
          __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
        } else {
-         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
+         assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type");
          move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
        }
      }
    }
  

@@ -735,14 +747,14 @@
    __ jcc(Assembler::below, L_ok);
    __ bind(L_fail);
  }
  
  void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
-                                     int total_args_passed,
                                      int comp_args_on_stack,
-                                     const BasicType *sig_bt,
+                                     const GrowableArray<SigEntry>& sig_extended,
                                      const VMRegPair *regs) {
+ 
    // Note: rsi contains the senderSP on entry. We must preserve it since
    // we may do a i2c -> c2i transition if we lose a race where compiled
    // code goes non-entrant while we get args ready.
  
    // Adapters can be frameless because they do not require the caller

@@ -827,24 +839,24 @@
    // Pre-load the register-jump target early, to schedule it better.
    __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
  
    // Now generate the shuffle code.  Pick up all register args and move the
    // rest through the floating point stack top.
-   for (int i = 0; i < total_args_passed; i++) {
-     if (sig_bt[i] == T_VOID) {
+   for (int i = 0; i < sig_extended.length(); i++) {
+     if (sig_extended.at(i)._bt == T_VOID) {
        // Longs and doubles are passed in native word order, but misaligned
        // in the 32-bit build.
-       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+       assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
        continue;
      }
  
      // Pick up 0, 1 or 2 words from SP+offset.
  
      assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
              "scrambled load targets?");
      // Load in argument order going down.
-     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
+     int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize;
      // Point to interpreter value (vs. tag)
      int next_off = ld_off - Interpreter::stackElementSize;
      //
      //
      //

@@ -881,11 +893,11 @@
          //
          // Interpreter local[n] == MSW, local[n+1] == LSW however locals
          // are accessed as negative so LSW is at LOW address
  
          // ld_off is MSW so get LSW
-         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
+         const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)?
                             next_off : ld_off;
          __ movptr(rsi, Address(saved_sp, offset));
          __ movptr(Address(rsp, st_off), rsi);
  #ifndef _LP64
          __ movptr(rsi, Address(saved_sp, ld_off));

@@ -899,11 +911,11 @@
          //
          // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
          // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
          // So we must adjust where to pick up the data to match the interpreter.
  
-         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
+         const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)?
                             next_off : ld_off;
  
          // this can be a misaligned move
          __ movptr(r, Address(saved_sp, offset));
  #ifndef _LP64

@@ -947,18 +959,18 @@
    __ jmp(rdi);
  }
  
  // ---------------------------------------------------------------
  AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
-                                                             int total_args_passed,
                                                              int comp_args_on_stack,
-                                                             const BasicType *sig_bt,
+                                                             const GrowableArray<SigEntry>& sig_extended,
                                                              const VMRegPair *regs,
-                                                             AdapterFingerPrint* fingerprint) {
+                                                             AdapterFingerPrint* fingerprint,
+                                                             AdapterBlob*& new_adapter) {
    address i2c_entry = __ pc();
  
-   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
+   gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
  
    // -------------------------------------------------------------------------
    // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
    // to the interpreter.  The args start out packed in the compiled layout.  They
    // need to be unpacked into the interpreter layout.  This will almost always

@@ -994,13 +1006,17 @@
    address c2i_entry = __ pc();
  
    BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
    bs->c2i_entry_barrier(masm);
  
-   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
+   OopMapSet* oop_maps = NULL;
+   int frame_complete = CodeOffsets::frame_never_safe;
+   int frame_size_in_words = 0;
+   gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
  
    __ flush();
+   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
    return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  }
  
  int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                           VMRegPair *regs,

@@ -1020,10 +1036,11 @@
      case T_FLOAT:
      case T_BYTE:
      case T_SHORT:
      case T_INT:
      case T_OBJECT:
+     case T_INLINE_TYPE:
      case T_ARRAY:
      case T_ADDRESS:
      case T_METADATA:
        regs[i].set1(VMRegImpl::stack2reg(stack++));
        break;

@@ -1718,10 +1735,11 @@
            VMRegPair in_arg = in_regs[i];
            unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
            c_arg++;
            break;
          }
+       case T_INLINE_TYPE:
        case T_OBJECT:
          assert(!is_critical_native, "no oop arguments");
          object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                      ((i == 0) && (!is_static)),
                      &receiver_offset);

@@ -1889,10 +1907,11 @@
    case T_DOUBLE :
    case T_FLOAT  :
      // Result is in st0 we'll save as needed
      break;
    case T_ARRAY:                 // Really a handle
+   case T_INLINE_TYPE:           // Really a handle
    case T_OBJECT:                // Really a handle
        break; // can't de-handlize until after safepoint check
    case T_VOID: break;
    case T_LONG: break;
    default       : ShouldNotReachHere();

@@ -2983,10 +3002,15 @@
    // return the  blob
    // frame_size_words or bytes??
    return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
  }
  
+ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
+   Unimplemented();
+   return NULL;
+ }
+ 
  #ifdef COMPILER2
  RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
                                                  int shadow_space_bytes,
                                                  const GrowableArray<VMReg>& input_registers,
                                                  const GrowableArray<VMReg>& output_registers) {
< prev index next >