< prev index next >

src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp

Print this page




 332   __ pop(thread);
 333 #endif
 334 }
 335 
 336 void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address src) {
 337   if (!ShenandoahLoadRefBarrier) {
 338     return;
 339   }
 340 
 341   Label done;
 342   Label not_null;
 343   Label slow_path;
 344   __ block_comment("load_reference_barrier_native { ");
 345 
 346   // null check
 347   __ testptr(dst, dst);
 348   __ jcc(Assembler::notZero, not_null);
 349   __ jmp(done);
 350   __ bind(not_null);
 351 
 352 
 353 #ifdef _LP64
 354   Register thread = r15_thread;
 355 #else
 356   Register thread = rcx;
 357   if (thread == dst) {
 358     thread = rbx;
 359   }
 360   __ push(thread);
 361   __ get_thread(thread);
 362 #endif
 363   assert_different_registers(dst, thread);
 364 
 365   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 366   __ testb(gc_state, ShenandoahHeap::EVACUATION);
 367 #ifndef _LP64
 368   __ pop(thread);
 369 #endif
 370   __ jccb(Assembler::notZero, slow_path);
 371   __ jmp(done);
 372   __ bind(slow_path);


 400   __ pop(r12);
 401   __ pop(r11);
 402   __ pop(r10);
 403   __ pop(r9);
 404   __ pop(r8);
 405 #endif
 406   __ pop(rsi);
 407   __ pop(rdi);
 408   __ pop(rdx);
 409   __ pop(rcx);
 410 
 411   if (dst != rax) {
 412     __ movptr(dst, rax);
 413     __ pop(rax);
 414   }
 415 
 416   __ bind(done);
 417   __ block_comment("load_reference_barrier_native { ");
 418 }
 419 














































 420 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
 421   if (ShenandoahStoreValEnqueueBarrier) {
 422     storeval_barrier_impl(masm, dst, tmp);
 423   }
 424 }
 425 
 426 void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) {
 427   assert(ShenandoahStoreValEnqueueBarrier, "should be enabled");
 428 
 429   if (dst == noreg) return;
 430 
 431   if (ShenandoahStoreValEnqueueBarrier) {
 432     // The set of registers to be saved+restored is the same as in the write-barrier above.
 433     // Those are the commonly used registers in the interpreter.
 434     __ pusha();
 435     // __ push_callee_saved_registers();
 436     __ subptr(rsp, 2 * Interpreter::stackElementSize);
 437     __ movdbl(Address(rsp, 0), xmm0);
 438 
 439 #ifdef _LP64


 499   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 500 
 501   if (on_oop) {
 502     if (not_in_heap && !is_traversal_mode) {
 503       load_reference_barrier_native(masm, dst, src);
 504     } else {
 505       load_reference_barrier(masm, dst);
 506     }
 507 
 508     if (dst != result_dst) {
 509       __ movptr(result_dst, dst);
 510 
 511       if (!use_tmp1_for_dst) {
 512         __ pop(dst);
 513       }
 514 
 515       dst = result_dst;
 516     }
 517 
 518     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
 519       const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
 520       assert_different_registers(dst, tmp1, tmp_thread);






 521       NOT_LP64(__ get_thread(thread));
 522       // Generate the SATB pre-barrier code to log the value of
 523       // the referent field in an SATB buffer.
 524       shenandoah_write_barrier_pre(masm /* masm */,
 525                                    noreg /* obj */,
 526                                    dst /* pre_val */,
 527                                    thread /* thread */,
 528                                    tmp1 /* tmp */,
 529                                    true /* tosca_live */,
 530                                    true /* expand_call */);

 531     }
 532   }
 533 }
 534 
 535 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 536               Address dst, Register val, Register tmp1, Register tmp2) {
 537 
 538   bool on_oop = is_reference_type(type);
 539   bool in_heap = (decorators & IN_HEAP) != 0;
 540   bool as_normal = (decorators & AS_NORMAL) != 0;
 541   if (on_oop && in_heap) {
 542     bool needs_pre_barrier = as_normal;
 543 
 544     Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
 545     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 546     // flatten object address if needed
 547     // We do it regardless of precise because we need the registers
 548     if (dst.index() == noreg && dst.disp() == 0) {
 549       if (dst.base() != tmp1) {
 550         __ movptr(tmp1, dst.base());




 332   __ pop(thread);
 333 #endif
 334 }
 335 
 336 void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address src) {
 337   if (!ShenandoahLoadRefBarrier) {
 338     return;
 339   }
 340 
 341   Label done;
 342   Label not_null;
 343   Label slow_path;
 344   __ block_comment("load_reference_barrier_native { ");
 345 
 346   // null check
 347   __ testptr(dst, dst);
 348   __ jcc(Assembler::notZero, not_null);
 349   __ jmp(done);
 350   __ bind(not_null);
 351 

 352 #ifdef _LP64
 353   Register thread = r15_thread;
 354 #else
 355   Register thread = rcx;
 356   if (thread == dst) {
 357     thread = rbx;
 358   }
 359   __ push(thread);
 360   __ get_thread(thread);
 361 #endif
 362   assert_different_registers(dst, thread);
 363 
 364   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 365   __ testb(gc_state, ShenandoahHeap::EVACUATION);
 366 #ifndef _LP64
 367   __ pop(thread);
 368 #endif
 369   __ jccb(Assembler::notZero, slow_path);
 370   __ jmp(done);
 371   __ bind(slow_path);


 399   __ pop(r12);
 400   __ pop(r11);
 401   __ pop(r10);
 402   __ pop(r9);
 403   __ pop(r8);
 404 #endif
 405   __ pop(rsi);
 406   __ pop(rdi);
 407   __ pop(rdx);
 408   __ pop(rcx);
 409 
 410   if (dst != rax) {
 411     __ movptr(dst, rax);
 412     __ pop(rax);
 413   }
 414 
 415   __ bind(done);
 416   __ block_comment("load_reference_barrier_native { ");
 417 }
 418 
 419 #ifdef _LP64
 420 void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
 421   // Use default version
 422   BarrierSetAssembler::c2i_entry_barrier(masm);
 423 }
 424 #else
 425 void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
 426   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 427   if (bs == NULL) {
 428     return;
 429   }
 430 
 431   Label bad_call;
 432   __ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
 433   __ jcc(Assembler::equal, bad_call);
 434 
 435   Register tmp1 = rax;
 436   Register tmp2 = rcx;
 437 
 438   __ push(tmp1);
 439   __ push(tmp2);
 440 
 441   // Pointer chase to the method holder to find out if the method is concurrently unloading.
 442   Label method_live;
 443   __ load_method_holder_cld(tmp1, rbx);
 444 
 445    // Is it a strong CLD?
 446   __ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0);
 447   __ jcc(Assembler::greater, method_live);
 448 
 449    // Is it a weak but alive CLD?
 450   __ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset()));
 451   __ resolve_weak_handle(tmp1, tmp2);
 452   __ cmpptr(tmp1, 0);
 453   __ jcc(Assembler::notEqual, method_live);
 454   __ pop(tmp2);
 455   __ pop(tmp1);
 456 
 457   __ bind(bad_call);
 458   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 459   __ bind(method_live);
 460   __ pop(tmp2);
 461   __ pop(tmp1);
 462 }
 463 #endif
 464 
 465 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
 466   if (ShenandoahStoreValEnqueueBarrier) {
 467     storeval_barrier_impl(masm, dst, tmp);
 468   }
 469 }
 470 
 471 void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) {
 472   assert(ShenandoahStoreValEnqueueBarrier, "should be enabled");
 473 
 474   if (dst == noreg) return;
 475 
 476   if (ShenandoahStoreValEnqueueBarrier) {
 477     // The set of registers to be saved+restored is the same as in the write-barrier above.
 478     // Those are the commonly used registers in the interpreter.
 479     __ pusha();
 480     // __ push_callee_saved_registers();
 481     __ subptr(rsp, 2 * Interpreter::stackElementSize);
 482     __ movdbl(Address(rsp, 0), xmm0);
 483 
 484 #ifdef _LP64


 544   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 545 
 546   if (on_oop) {
 547     if (not_in_heap && !is_traversal_mode) {
 548       load_reference_barrier_native(masm, dst, src);
 549     } else {
 550       load_reference_barrier(masm, dst);
 551     }
 552 
 553     if (dst != result_dst) {
 554       __ movptr(result_dst, dst);
 555 
 556       if (!use_tmp1_for_dst) {
 557         __ pop(dst);
 558       }
 559 
 560       dst = result_dst;
 561     }
 562 
 563     if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
 564       __ push_IU_state();
 565 
 566       Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
 567       if (!thread->is_valid()) {
 568         thread = rdx;
 569       }
 570 
 571       assert_different_registers(dst, tmp1, thread);
 572       NOT_LP64(__ get_thread(thread));
 573       // Generate the SATB pre-barrier code to log the value of
 574       // the referent field in an SATB buffer.
 575       shenandoah_write_barrier_pre(masm /* masm */,
 576                                    noreg /* obj */,
 577                                    dst /* pre_val */,
 578                                    thread /* thread */,
 579                                    tmp1 /* tmp */,
 580                                    true /* tosca_live */,
 581                                    true /* expand_call */);
 582       __ pop_IU_state();
 583     }
 584   }
 585 }
 586 
 587 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 588               Address dst, Register val, Register tmp1, Register tmp2) {
 589 
 590   bool on_oop = is_reference_type(type);
 591   bool in_heap = (decorators & IN_HEAP) != 0;
 592   bool as_normal = (decorators & AS_NORMAL) != 0;
 593   if (on_oop && in_heap) {
 594     bool needs_pre_barrier = as_normal;
 595 
 596     Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
 597     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 598     // flatten object address if needed
 599     // We do it regardless of precise because we need the registers
 600     if (dst.index() == noreg && dst.disp() == 0) {
 601       if (dst.base() != tmp1) {
 602         __ movptr(tmp1, dst.base());


< prev index next >