< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page




2422          "fieldOffset must be byte-scaled");
2423   // 32-bit machines ignore the high half!
2424   offset = ConvL2X(offset);
2425   adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2426 
2427   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2428     heap_base_oop = base;
2429   } else if (type == T_OBJECT) {
2430     return false; // off-heap oop accesses are not supported
2431   }
2432 
2433   // Can base be NULL? Otherwise, always on-heap access.
2434   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2435 
2436   if (!can_access_non_heap) {
2437     decorators |= IN_HEAP;
2438   }
2439 
2440   val = is_store ? argument(4) : NULL;
2441 
2442   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2443   if (adr_type == TypePtr::NULL_PTR) {
2444     return false; // off-heap access with zero address
2445   }
2446 
2447   // Try to categorize the address.
2448   Compile::AliasType* alias_type = C->alias_type(adr_type);
2449   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2450 
2451   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2452       alias_type->adr_type() == TypeAryPtr::RANGE) {
2453     return false; // not supported
2454   }
2455 
2456   bool mismatched = false;
2457   BasicType bt = alias_type->basic_type();
2458   if (bt != T_ILLEGAL) {
2459     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2460     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2461       // Alias type doesn't differentiate between byte[] and boolean[]).
2462       // Use address type to get the element type.
2463       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2464     }
2465     if (bt == T_ARRAY || bt == T_NARROWOOP) {


4484     set_all_memory( _gvn.transform(result_mem));
4485   } // original reexecute is set back here
4486 
4487   set_result(_gvn.transform(result_val));
4488   return true;
4489 }
4490 
4491 // If we have a tightly coupled allocation, the arraycopy may take care
4492 // of the array initialization. If one of the guards we insert between
4493 // the allocation and the arraycopy causes a deoptimization, an
4494 // unitialized array will escape the compiled method. To prevent that
4495 // we set the JVM state for uncommon traps between the allocation and
4496 // the arraycopy to the state before the allocation so, in case of
4497 // deoptimization, we'll reexecute the allocation and the
4498 // initialization.
4499 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4500   if (alloc != NULL) {
4501     ciMethod* trap_method = alloc->jvms()->method();
4502     int trap_bci = alloc->jvms()->bci();
4503 
4504     if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4505         !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4506       // Make sure there's no store between the allocation and the
4507       // arraycopy otherwise visible side effects could be rexecuted
4508       // in case of deoptimization and cause incorrect execution.
4509       bool no_interfering_store = true;
4510       Node* mem = alloc->in(TypeFunc::Memory);
4511       if (mem->is_MergeMem()) {
4512         for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4513           Node* n = mms.memory();
4514           if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4515             assert(n->is_Store(), "what else?");
4516             no_interfering_store = false;
4517             break;
4518           }
4519         }
4520       } else {
4521         for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4522           Node* n = mms.memory();
4523           if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4524             assert(n->is_Store(), "what else?");
4525             no_interfering_store = false;




2422          "fieldOffset must be byte-scaled");
2423   // 32-bit machines ignore the high half!
2424   offset = ConvL2X(offset);
2425   adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2426 
2427   if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2428     heap_base_oop = base;
2429   } else if (type == T_OBJECT) {
2430     return false; // off-heap oop accesses are not supported
2431   }
2432 
2433   // Can base be NULL? Otherwise, always on-heap access.
2434   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2435 
2436   if (!can_access_non_heap) {
2437     decorators |= IN_HEAP;
2438   }
2439 
2440   val = is_store ? argument(4) : NULL;
2441 
2442   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();



2443 
2444   // Try to categorize the address.
2445   Compile::AliasType* alias_type = C->alias_type(adr_type);
2446   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2447 
2448   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2449       alias_type->adr_type() == TypeAryPtr::RANGE) {
2450     return false; // not supported
2451   }
2452 
2453   bool mismatched = false;
2454   BasicType bt = alias_type->basic_type();
2455   if (bt != T_ILLEGAL) {
2456     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2457     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2458       // Alias type doesn't differentiate between byte[] and boolean[]).
2459       // Use address type to get the element type.
2460       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2461     }
2462     if (bt == T_ARRAY || bt == T_NARROWOOP) {


4481     set_all_memory( _gvn.transform(result_mem));
4482   } // original reexecute is set back here
4483 
4484   set_result(_gvn.transform(result_val));
4485   return true;
4486 }
4487 
4488 // If we have a tightly coupled allocation, the arraycopy may take care
4489 // of the array initialization. If one of the guards we insert between
4490 // the allocation and the arraycopy causes a deoptimization, an
4491 // unitialized array will escape the compiled method. To prevent that
4492 // we set the JVM state for uncommon traps between the allocation and
4493 // the arraycopy to the state before the allocation so, in case of
4494 // deoptimization, we'll reexecute the allocation and the
4495 // initialization.
4496 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4497   if (alloc != NULL) {
4498     ciMethod* trap_method = alloc->jvms()->method();
4499     int trap_bci = alloc->jvms()->bci();
4500 
4501     if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &
4502           !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4503       // Make sure there's no store between the allocation and the
4504       // arraycopy otherwise visible side effects could be rexecuted
4505       // in case of deoptimization and cause incorrect execution.
4506       bool no_interfering_store = true;
4507       Node* mem = alloc->in(TypeFunc::Memory);
4508       if (mem->is_MergeMem()) {
4509         for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4510           Node* n = mms.memory();
4511           if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4512             assert(n->is_Store(), "what else?");
4513             no_interfering_store = false;
4514             break;
4515           }
4516         }
4517       } else {
4518         for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4519           Node* n = mms.memory();
4520           if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4521             assert(n->is_Store(), "what else?");
4522             no_interfering_store = false;


< prev index next >