< prev index next >

src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp

Print this page

 222   // locals[nlocals-1..0]
 223   // monitors[0..number_of_locks]
 224   //
 225   // locals is a direct copy of the interpreter frame so in the osr buffer
 226   // so first slot in the local array is the last local from the interpreter
 227   // and last slot is local[0] (receiver) from the interpreter
 228   //
 229   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 230   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 231   // in the interpreter frame (the method lock if a sync method)
 232 
 233   // Initialize monitors in the compiled activation.
 234   //   x12: pointer to osr buffer
 235   // All other registers are dead at this point and the locals will be
 236   // copied into place by code emitted in the IR.
 237 
 238   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 239   {
 240     assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 241     int monitor_offset = BytesPerWord * method()->max_locals() +
 242       (2 * BytesPerWord) * (number_of_locks - 1);
 243     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 244     // the OSR buffer using 2 word entries: first the lock and then
 245     // the oop.
 246     for (int i = 0; i < number_of_locks; i++) {
 247       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 248 #ifdef ASSERT
 249       // verify the interpreter's monitor has a non-null object
 250       {
 251         Label L;
 252         __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
 253         __ bnez(t0, L);
 254         __ stop("locked object is NULL");
 255         __ bind(L);
 256       }
 257 #endif // ASSERT
 258       __ ld(x9, Address(OSR_buf, slot_offset + 0));
 259       __ sd(x9, frame_map()->address_for_monitor_lock(i));
 260       __ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
 261       __ sd(x9, frame_map()->address_for_monitor_object(i));
 262     }
 263   }
 264 }
 265 
 266 // inline cache check; done before the frame is built.
 267 int LIR_Assembler::check_icache() {
 268   Register receiver = FrameMap::receiver_opr->as_register();
 269   Register ic_klass = IC_Klass;
 270   int start_offset = __ offset();
 271   Label dont;
 272   __ inline_cache_check(receiver, ic_klass, dont);
 273 
 274   // if icache check fails, then jump to runtime routine
 275   // Note: RECEIVER must still contain the receiver!
 276   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 277 
 278   // We align the verified entry point unless the method body
 279   // (including its inline cache check) will fit in a single 64-byte
 280   // icache line.

 342   }
 343 #endif // PRODUCT
 344 
 345   int offset = code_offset();
 346 
 347   // Fetch the exception from TLS and clear out exception related thread state
 348   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
 349   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
 350   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
 351 
 352   __ bind(_unwind_handler_entry);
 353   __ verify_not_null_oop(x10);
 354   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 355     __ mv(x9, x10);   // Preserve the exception
 356   }
 357 
 358   // Perform needed unlocking
 359   MonitorExitStub* stub = NULL;
 360   if (method()->is_synchronized()) {
 361     monitor_address(0, FrameMap::r10_opr);
 362     stub = new MonitorExitStub(FrameMap::r10_opr, true, 0);

 363     if (UseHeavyMonitors) {
 364       __ j(*stub->entry());
 365     } else {
 366       __ unlock_object(x15, x14, x10, *stub->entry());
 367     }
 368     __ bind(*stub->continuation());
 369   }
 370 
 371   if (compilation()->env()->dtrace_method_probes()) {
 372     __ mv(c_rarg0, xthread);
 373     __ mov_metadata(c_rarg1, method()->constant_encoding());
 374     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 375   }
 376 
 377   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 378     __ mv(x10, x9);   // Restore the exception
 379   }
 380 
 381   // remove the activation and dispatch to the unwind handler
 382   __ block_comment("remove_frame and dispatch to the unwind handler");

1485       }
1486     } else {
1487       move_regs(left->as_register_lo(), dest->as_register_lo());
1488     }
1489   } else {
1490     ShouldNotReachHere();
1491   }
1492 }
1493 
1494 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
1495   Register obj = op->obj_opr()->as_register();  // may not be an oop
1496   Register hdr = op->hdr_opr()->as_register();
1497   Register lock = op->lock_opr()->as_register();
1498   if (UseHeavyMonitors) {
1499     if (op->info() != NULL) {
1500       add_debug_info_for_null_check_here(op->info());
1501       __ null_check(obj);
1502     }
1503     __ j(*op->stub()->entry());
1504   } else if (op->code() == lir_lock) {
1505     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
1506     // add debug info for NullPointerException only if one is possible
1507     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
1508     if (op->info() != NULL) {
1509       add_debug_info_for_null_check(null_check_offset, op->info());
1510     }
1511   } else if (op->code() == lir_unlock) {
1512     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
1513     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
1514   } else {
1515     Unimplemented();
1516   }
1517   __ bind(*op->stub()->continuation());
1518 }
1519 
1520 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
1521   Register obj = op->obj()->as_pointer_register();
1522   Register result = op->result_opr()->as_pointer_register();
1523 
1524   CodeEmitInfo* info = op->info();
1525   if (info != NULL) {
1526     add_debug_info_for_null_check_here(info);
1527   }
1528 
1529   if (UseCompressedClassPointers) {
1530     __ lwu(result, Address(obj, oopDesc::klass_offset_in_bytes()));
1531     __ decode_klass_not_null(result);
1532   } else {

1589       }
1590     } else {
1591       __ load_klass(recv, recv);
1592       Label update_done;
1593       type_profile_helper(mdo, md, data, recv, &update_done);
1594       // Receiver did not match any saved receiver and there is no empty row for it.
1595       // Increment total counter to indicate polymorphic case.
1596       __ increment(counter_addr, DataLayout::counter_increment);
1597 
1598       __ bind(update_done);
1599     }
1600   } else {
1601     // Static call
1602     __ increment(counter_addr, DataLayout::counter_increment);
1603   }
1604 }
1605 
1606 void LIR_Assembler::emit_delay(LIR_OpDelay*) { Unimplemented(); }
1607 
1608 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
1609   __ la(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
1610 }
1611 
1612 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); }
1613 
1614 void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
1615                                    Register tmp, Label &next, Label &none,
1616                                    Address mdo_addr) {
1617   if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
1618     if (exact_klass != NULL) {
1619       __ mov_metadata(tmp, exact_klass->constant_encoding());
1620     } else {
1621       __ load_klass(tmp, tmp);
1622     }
1623 
1624     __ ld(t1, mdo_addr);
1625     __ xorr(tmp, tmp, t1);
1626     __ andi(t0, tmp, TypeEntries::type_klass_mask);
1627     // klass seen before, nothing to do. The unknown bit may have been
1628     // set already but no need to check.
1629     __ beqz(t0, next);

 222   // locals[nlocals-1..0]
 223   // monitors[0..number_of_locks]
 224   //
 225   // locals is a direct copy of the interpreter frame so in the osr buffer
 226   // so first slot in the local array is the last local from the interpreter
 227   // and last slot is local[0] (receiver) from the interpreter
 228   //
 229   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 230   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 231   // in the interpreter frame (the method lock if a sync method)
 232 
 233   // Initialize monitors in the compiled activation.
 234   //   x12: pointer to osr buffer
 235   // All other registers are dead at this point and the locals will be
 236   // copied into place by code emitted in the IR.
 237 
 238   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 239   {
 240     assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 241     int monitor_offset = BytesPerWord * method()->max_locals() +
 242       BytesPerWord * (number_of_locks - 1);
 243     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 244     // the OSR buffer using 2 word entries: first the lock and then
 245     // the oop.
 246     for (int i = 0; i < number_of_locks; i++) {
 247       int slot_offset = monitor_offset - (i * BytesPerWord);
 248 #ifdef ASSERT
 249       // verify the interpreter's monitor has a non-null object
 250       {
 251         Label L;
 252         __ ld(t0, Address(OSR_buf, slot_offset));
 253         __ bnez(t0, L);
 254         __ stop("locked object is NULL");
 255         __ bind(L);
 256       }
 257 #endif // ASSERT
 258       __ ld(x9, Address(OSR_buf, slot_offset));


 259       __ sd(x9, frame_map()->address_for_monitor_object(i));
 260     }
 261   }
 262 }
 263 
 264 // inline cache check; done before the frame is built.
 265 int LIR_Assembler::check_icache() {
 266   Register receiver = FrameMap::receiver_opr->as_register();
 267   Register ic_klass = IC_Klass;
 268   int start_offset = __ offset();
 269   Label dont;
 270   __ inline_cache_check(receiver, ic_klass, dont);
 271 
 272   // if icache check fails, then jump to runtime routine
 273   // Note: RECEIVER must still contain the receiver!
 274   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 275 
 276   // We align the verified entry point unless the method body
 277   // (including its inline cache check) will fit in a single 64-byte
 278   // icache line.

 340   }
 341 #endif // PRODUCT
 342 
 343   int offset = code_offset();
 344 
 345   // Fetch the exception from TLS and clear out exception related thread state
 346   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
 347   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
 348   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
 349 
 350   __ bind(_unwind_handler_entry);
 351   __ verify_not_null_oop(x10);
 352   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 353     __ mv(x9, x10);   // Preserve the exception
 354   }
 355 
 356   // Perform needed unlocking
 357   MonitorExitStub* stub = NULL;
 358   if (method()->is_synchronized()) {
 359     monitor_address(0, FrameMap::r10_opr);
 360     __ ld(x14, Address(x10, BasicObjectLock::obj_offset_in_bytes()));
 361     stub = new MonitorExitStub(FrameMap::r14_opr);
 362     if (UseHeavyMonitors) {
 363       __ j(*stub->entry());
 364     } else {
 365       __ unlock_object(x15, x14, x10, *stub->entry());
 366     }
 367     __ bind(*stub->continuation());
 368   }
 369 
 370   if (compilation()->env()->dtrace_method_probes()) {
 371     __ mv(c_rarg0, xthread);
 372     __ mov_metadata(c_rarg1, method()->constant_encoding());
 373     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 374   }
 375 
 376   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 377     __ mv(x10, x9);   // Restore the exception
 378   }
 379 
 380   // remove the activation and dispatch to the unwind handler
 381   __ block_comment("remove_frame and dispatch to the unwind handler");

1484       }
1485     } else {
1486       move_regs(left->as_register_lo(), dest->as_register_lo());
1487     }
1488   } else {
1489     ShouldNotReachHere();
1490   }
1491 }
1492 
1493 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
1494   Register obj = op->obj_opr()->as_register();  // may not be an oop
1495   Register hdr = op->hdr_opr()->as_register();
1496   Register lock = op->lock_opr()->as_register();
1497   if (UseHeavyMonitors) {
1498     if (op->info() != NULL) {
1499       add_debug_info_for_null_check_here(op->info());
1500       __ null_check(obj);
1501     }
1502     __ j(*op->stub()->entry());
1503   } else if (op->code() == lir_lock) {

1504     // add debug info for NullPointerException only if one is possible
1505     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
1506     if (op->info() != NULL) {
1507       add_debug_info_for_null_check(null_check_offset, op->info());
1508     }
1509   } else if (op->code() == lir_unlock) {

1510     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
1511   } else {
1512     Unimplemented();
1513   }
1514   __ bind(*op->stub()->continuation());
1515 }
1516 
1517 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
1518   Register obj = op->obj()->as_pointer_register();
1519   Register result = op->result_opr()->as_pointer_register();
1520 
1521   CodeEmitInfo* info = op->info();
1522   if (info != NULL) {
1523     add_debug_info_for_null_check_here(info);
1524   }
1525 
1526   if (UseCompressedClassPointers) {
1527     __ lwu(result, Address(obj, oopDesc::klass_offset_in_bytes()));
1528     __ decode_klass_not_null(result);
1529   } else {

1586       }
1587     } else {
1588       __ load_klass(recv, recv);
1589       Label update_done;
1590       type_profile_helper(mdo, md, data, recv, &update_done);
1591       // Receiver did not match any saved receiver and there is no empty row for it.
1592       // Increment total counter to indicate polymorphic case.
1593       __ increment(counter_addr, DataLayout::counter_increment);
1594 
1595       __ bind(update_done);
1596     }
1597   } else {
1598     // Static call
1599     __ increment(counter_addr, DataLayout::counter_increment);
1600   }
1601 }
1602 
1603 void LIR_Assembler::emit_delay(LIR_OpDelay*) { Unimplemented(); }
1604 
1605 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
1606   __ la(dst->as_register(), frame_map()->address_for_monitor_object(monitor_no));
1607 }
1608 
1609 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); }
1610 
1611 void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
1612                                    Register tmp, Label &next, Label &none,
1613                                    Address mdo_addr) {
1614   if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
1615     if (exact_klass != NULL) {
1616       __ mov_metadata(tmp, exact_klass->constant_encoding());
1617     } else {
1618       __ load_klass(tmp, tmp);
1619     }
1620 
1621     __ ld(t1, mdo_addr);
1622     __ xorr(tmp, tmp, t1);
1623     __ andi(t0, tmp, TypeEntries::type_klass_mask);
1624     // klass seen before, nothing to do. The unknown bit may have been
1625     // set already but no need to check.
1626     __ beqz(t0, next);
< prev index next >