41 #include "gc_interface/collectedHeap.hpp"
42 #include "interpreter/bytecode.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "jfr/support/jfrIntrinsics.hpp"
45 #include "memory/allocation.inline.hpp"
46 #include "memory/barrierSet.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/biasedLocking.hpp"
52 #include "runtime/compilationPolicy.hpp"
53 #include "runtime/interfaceSupport.hpp"
54 #include "runtime/javaCalls.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/threadCritical.hpp"
57 #include "runtime/vframe.hpp"
58 #include "runtime/vframeArray.hpp"
59 #include "utilities/copy.hpp"
60 #include "utilities/events.hpp"
61
62
63 // Implementation of StubAssembler
64
65 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
66 _name = name;
67 _must_gc_arguments = false;
68 _frame_size = no_frame_size;
69 _num_rt_args = 0;
70 _stub_id = stub_id;
71 }
72
73
74 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
75 _name = name;
76 _must_gc_arguments = must_gc_arguments;
77 }
78
79
80 void StubAssembler::set_frame_size(int size) {
81 if (_frame_size == no_frame_size) {
182 // create code buffer for code storage
183 CodeBuffer code(buffer_blob);
184
185 Compilation::setup_code_buffer(&code, 0);
186
187 // create assembler for code generation
188 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
189 // generate code for runtime stub
190 OopMapSet* oop_maps;
191 oop_maps = generate_code_for(id, sasm);
192 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
193 "if stub has an oop map it must have a valid frame size");
194
195 #ifdef ASSERT
196 // Make sure that stubs that need oopmaps have them
197 switch (id) {
198 // These stubs don't need to have an oopmap
199 case dtrace_object_alloc_id:
200 case g1_pre_barrier_slow_id:
201 case g1_post_barrier_slow_id:
202 case slow_subtype_check_id:
203 case fpu2long_stub_id:
204 case unwind_exception_id:
205 case counter_overflow_id:
206 #if defined(SPARC) || defined(PPC)
207 case handle_exception_nofpu_id: // Unused on sparc
208 #endif
209 break;
210
211 // All other stubs should have oopmaps
212 default:
213 assert(oop_maps != NULL, "must have an oopmap");
214 }
215 #endif
216
217 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
218 sasm->align(BytesPerWord);
219 // make sure all code is in code buffer
220 sasm->flush();
221 // create blob - distinguish a few special cases
1289
1290
1291 // Array copy return codes.
1292 enum {
1293 ac_failed = -1, // arraycopy failed
1294 ac_ok = 0 // arraycopy succeeded
1295 };
1296
1297
1298 // Below length is the # elements copied.
1299 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1300 oopDesc* dst, T* dst_addr,
1301 int length) {
1302
1303 // For performance reasons, we assume we are using a card marking write
1304 // barrier. The assert will fail if this is not the case.
1305 // Note that we use the non-virtual inlineable variant of write_ref_array.
1306 BarrierSet* bs = Universe::heap()->barrier_set();
1307 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1308 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1309 if (src == dst) {
1310 // same object, no check
1311 bs->write_ref_array_pre(dst_addr, length);
1312 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1313 bs->write_ref_array((HeapWord*)dst_addr, length);
1314 return ac_ok;
1315 } else {
1316 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1317 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1318 if (stype == bound || stype->is_subtype_of(bound)) {
1319 // Elements are guaranteed to be subtypes, so no check necessary
1320 bs->write_ref_array_pre(dst_addr, length);
1321 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1322 bs->write_ref_array((HeapWord*)dst_addr, length);
1323 return ac_ok;
1324 }
1325 }
1326 return ac_failed;
1327 }
1328
|
41 #include "gc_interface/collectedHeap.hpp"
42 #include "interpreter/bytecode.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "jfr/support/jfrIntrinsics.hpp"
45 #include "memory/allocation.inline.hpp"
46 #include "memory/barrierSet.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/biasedLocking.hpp"
52 #include "runtime/compilationPolicy.hpp"
53 #include "runtime/interfaceSupport.hpp"
54 #include "runtime/javaCalls.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/threadCritical.hpp"
57 #include "runtime/vframe.hpp"
58 #include "runtime/vframeArray.hpp"
59 #include "utilities/copy.hpp"
60 #include "utilities/events.hpp"
61 #include "utilities/macros.hpp"
62 #if INCLUDE_ALL_GCS
63 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
64 #endif
65
66 // Implementation of StubAssembler
67
68 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
69 _name = name;
70 _must_gc_arguments = false;
71 _frame_size = no_frame_size;
72 _num_rt_args = 0;
73 _stub_id = stub_id;
74 }
75
76
77 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
78 _name = name;
79 _must_gc_arguments = must_gc_arguments;
80 }
81
82
83 void StubAssembler::set_frame_size(int size) {
84 if (_frame_size == no_frame_size) {
185 // create code buffer for code storage
186 CodeBuffer code(buffer_blob);
187
188 Compilation::setup_code_buffer(&code, 0);
189
190 // create assembler for code generation
191 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
192 // generate code for runtime stub
193 OopMapSet* oop_maps;
194 oop_maps = generate_code_for(id, sasm);
195 assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
196 "if stub has an oop map it must have a valid frame size");
197
198 #ifdef ASSERT
199 // Make sure that stubs that need oopmaps have them
200 switch (id) {
201 // These stubs don't need to have an oopmap
202 case dtrace_object_alloc_id:
203 case g1_pre_barrier_slow_id:
204 case g1_post_barrier_slow_id:
205 case shenandoah_lrb_slow_id:
206 case slow_subtype_check_id:
207 case fpu2long_stub_id:
208 case unwind_exception_id:
209 case counter_overflow_id:
210 #if defined(SPARC) || defined(PPC)
211 case handle_exception_nofpu_id: // Unused on sparc
212 #endif
213 break;
214
215 // All other stubs should have oopmaps
216 default:
217 assert(oop_maps != NULL, "must have an oopmap");
218 }
219 #endif
220
221 // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
222 sasm->align(BytesPerWord);
223 // make sure all code is in code buffer
224 sasm->flush();
225 // create blob - distinguish a few special cases
1293
1294
1295 // Array copy return codes.
1296 enum {
1297 ac_failed = -1, // arraycopy failed
1298 ac_ok = 0 // arraycopy succeeded
1299 };
1300
1301
1302 // Below length is the # elements copied.
1303 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1304 oopDesc* dst, T* dst_addr,
1305 int length) {
1306
1307 // For performance reasons, we assume we are using a card marking write
1308 // barrier. The assert will fail if this is not the case.
1309 // Note that we use the non-virtual inlineable variant of write_ref_array.
1310 BarrierSet* bs = Universe::heap()->barrier_set();
1311 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1312 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1313
1314 #if INCLUDE_ALL_GCS
1315 if (UseShenandoahGC) {
1316 ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src_addr, dst_addr, length);
1317 }
1318 #endif
1319
1320 if (src == dst) {
1321 // same object, no check
1322 bs->write_ref_array_pre(dst_addr, length);
1323 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1324 bs->write_ref_array((HeapWord*)dst_addr, length);
1325 return ac_ok;
1326 } else {
1327 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1328 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1329 if (stype == bound || stype->is_subtype_of(bound)) {
1330 // Elements are guaranteed to be subtypes, so no check necessary
1331 bs->write_ref_array_pre(dst_addr, length);
1332 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1333 bs->write_ref_array((HeapWord*)dst_addr, length);
1334 return ac_ok;
1335 }
1336 }
1337 return ac_failed;
1338 }
1339
|