< prev index next >

src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp

Print this page


   1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 329   __ jmp(done);
 330 
 331   __ bind(runtime);
 332   // save the live input values
 333   __ push(store_addr);
 334   __ push(new_val);
 335 #ifdef _LP64
 336   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, r15_thread);
 337 #else
 338   __ push(thread);
 339   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
 340   __ pop(thread);
 341 #endif
 342   __ pop(new_val);
 343   __ pop(store_addr);
 344 
 345   __ bind(done);
 346 }
 347 
 348 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 349                                          Address dst, Register val, Register tmp1, Register tmp2) {
 350   bool in_heap = (decorators & IN_HEAP) != 0;
 351   bool as_normal = (decorators & AS_NORMAL) != 0;
 352   assert((decorators & IS_DEST_UNINITIALIZED) == 0, "unsupported");
 353 
 354   bool needs_pre_barrier = as_normal;
 355   bool needs_post_barrier = val != noreg && in_heap;
 356 
 357   Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);


 358   Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 359   // flatten object address if needed
 360   // We do it regardless of precise because we need the registers
 361   if (dst.index() == noreg && dst.disp() == 0) {
 362     if (dst.base() != tmp1) {
 363       __ movptr(tmp1, dst.base());
 364     }
 365   } else {
 366     __ lea(tmp1, dst);
 367   }
 368 
 369 #ifndef _LP64
 370   InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm);
 371 #endif
 372 
 373   NOT_LP64(__ get_thread(rcx));
 374   NOT_LP64(imasm->save_bcp());
 375 
 376   if (needs_pre_barrier) {
 377     g1_write_barrier_pre(masm /*masm*/,
 378                          tmp1 /* obj */,
 379                          tmp2 /* pre_val */,
 380                          rthread /* thread */,
 381                          tmp3  /* tmp */,
 382                          val != noreg /* tosca_live */,
 383                          false /* expand_call */);
 384   }
 385   if (val == noreg) {
 386     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg);
 387   } else {
 388     Register new_val = val;
 389     if (needs_post_barrier) {
 390       // G1 barrier needs uncompressed oop for region cross check.
 391       if (UseCompressedOops) {
 392         new_val = tmp2;
 393         __ movptr(new_val, val);
 394       }
 395     }
 396     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg);
 397     if (needs_post_barrier) {
 398       g1_write_barrier_post(masm /*masm*/,
 399                             tmp1 /* store_adr */,
 400                             new_val /* new_val */,
 401                             rthread /* thread */,
 402                             tmp3 /* tmp */,
 403                             tmp2 /* tmp2 */);
 404     }
 405   }
 406   NOT_LP64(imasm->restore_bcp());
 407 }
 408 
 409 #ifdef COMPILER1
 410 
 411 #undef __
 412 #define __ ce->masm()->
 413 
 414 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
 415   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 416   // At this point we know that marking is in progress.


   1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 329   __ jmp(done);
 330 
 331   __ bind(runtime);
 332   // save the live input values
 333   __ push(store_addr);
 334   __ push(new_val);
 335 #ifdef _LP64
 336   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, r15_thread);
 337 #else
 338   __ push(thread);
 339   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
 340   __ pop(thread);
 341 #endif
 342   __ pop(new_val);
 343   __ pop(store_addr);
 344 
 345   __ bind(done);
 346 }
 347 
 348 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 349                                          Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 350   bool in_heap = (decorators & IN_HEAP) != 0;
 351   bool as_normal = (decorators & AS_NORMAL) != 0;
 352   assert((decorators & IS_DEST_UNINITIALIZED) == 0, "unsupported");
 353 
 354   bool needs_pre_barrier = as_normal;
 355   bool needs_post_barrier = val != noreg && in_heap;
 356 
 357   if (tmp3 == noreg) {
 358     tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
 359   }
 360   Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 361   // flatten object address if needed
 362   // We do it regardless of precise because we need the registers
 363   if (dst.index() == noreg && dst.disp() == 0) {
 364     if (dst.base() != tmp1) {
 365       __ movptr(tmp1, dst.base());
 366     }
 367   } else {
 368     __ lea(tmp1, dst);
 369   }
 370 
 371 #ifndef _LP64
 372   InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm);
 373 #endif
 374 
 375   NOT_LP64(__ get_thread(rcx));
 376   NOT_LP64(imasm->save_bcp());
 377 
 378   if (needs_pre_barrier) {
 379     g1_write_barrier_pre(masm /*masm*/,
 380                          tmp1 /* obj */,
 381                          tmp2 /* pre_val */,
 382                          rthread /* thread */,
 383                          tmp3  /* tmp */,
 384                          val != noreg /* tosca_live */,
 385                          false /* expand_call */);
 386   }
 387   if (val == noreg) {
 388     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
 389   } else {
 390     Register new_val = val;
 391     if (needs_post_barrier) {
 392       // G1 barrier needs uncompressed oop for region cross check.
 393       if (UseCompressedOops) {
 394         new_val = tmp2;
 395         __ movptr(new_val, val);
 396       }
 397     }
 398     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
 399     if (needs_post_barrier) {
 400       g1_write_barrier_post(masm /*masm*/,
 401                             tmp1 /* store_adr */,
 402                             new_val /* new_val */,
 403                             rthread /* thread */,
 404                             tmp3 /* tmp */,
 405                             tmp2 /* tmp2 */);
 406     }
 407   }
 408   NOT_LP64(imasm->restore_bcp());
 409 }
 410 
 411 #ifdef COMPILER1
 412 
 413 #undef __
 414 #define __ ce->masm()->
 415 
 416 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
 417   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 418   // At this point we know that marking is in progress.


< prev index next >