1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/globals.hpp"
35 #include "utilities/debug.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 // This metafunction returns either oop or narrowOop depending on whether
39 // an access needs to use compressed oops or not.
40 template <DecoratorSet decorators>
41 struct HeapOopType: AllStatic {
42 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
43 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
44 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
45 };
46
47 namespace AccessInternal {
48 enum BarrierType {
49 BARRIER_STORE,
50 BARRIER_STORE_AT,
51 BARRIER_LOAD,
52 BARRIER_LOAD_AT,
53 BARRIER_ATOMIC_CMPXCHG,
54 BARRIER_ATOMIC_CMPXCHG_AT,
55 BARRIER_ATOMIC_XCHG,
56 BARRIER_ATOMIC_XCHG_AT,
57 BARRIER_ARRAYCOPY,
58 BARRIER_CLONE
59 };
60
61 template <DecoratorSet decorators, typename T>
62 struct MustConvertCompressedOop: public std::integral_constant<bool,
63 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
64 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
65 std::is_same<T, oop>::value> {};
66
67 // This metafunction returns an appropriate oop type if the value is oop-like
68 // and otherwise returns the same type T.
69 template <DecoratorSet decorators, typename T>
70 struct EncodedType: AllStatic {
71 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
72 typename HeapOopType<decorators>::type,
73 T>;
74 };
75
76 template <DecoratorSet decorators>
77 inline typename HeapOopType<decorators>::type*
78 oop_field_addr(oop base, ptrdiff_t byte_offset) {
79 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
80 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
81 }
82
83 template <DecoratorSet decorators, typename T>
84 struct AccessFunctionTypes {
85 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
86 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
87 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
88 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
89
90 typedef T (*load_func_t)(void* addr);
91 typedef void (*store_func_t)(void* addr, T value);
92 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
93 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
94
95 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
96 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
97 size_t length);
98 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
99 };
100
101 template <DecoratorSet decorators>
102 struct AccessFunctionTypes<decorators, void> {
103 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
104 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
105 size_t length);
106 };
107
108 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
109
110 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
111 template <DecoratorSet decorators, typename T> \
112 struct AccessFunction<decorators, T, bt>: AllStatic{ \
113 typedef typename AccessFunctionTypes<decorators, T>::func type; \
114 }
115 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
116 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
117 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
118 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
119 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
120 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
121 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
122 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
123 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
124 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
125 #undef ACCESS_GENERATE_ACCESS_FUNCTION
126
127 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
128 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
129
130 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
131 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
132
133 void* field_addr(oop base, ptrdiff_t offset);
134
135 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
136 // faster build times, given how frequently included access is.
137 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
138 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
139 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
140
141 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
142 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
143
144 template<typename T>
145 void arraycopy_conjoint(T* src, T* dst, size_t length);
146 template<typename T>
147 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
148 template<typename T>
149 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
150 }
151
152 // This mask specifies what decorators are relevant for raw accesses. When passing
153 // accesses to the raw layer, irrelevant decorators are removed.
154 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
155 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
156
157 // The RawAccessBarrier performs raw accesses with additional knowledge of
158 // memory ordering, so that OrderAccess/Atomic is called when necessary.
159 // It additionally handles compressed oops, and hence is not completely "raw"
160 // strictly speaking.
161 template <DecoratorSet decorators>
162 class RawAccessBarrier: public AllStatic {
163 protected:
164 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
165 return AccessInternal::field_addr(base, byte_offset);
166 }
167
168 protected:
169 // Only encode if INTERNAL_VALUE_IS_OOP
170 template <DecoratorSet idecorators, typename T>
171 static inline typename EnableIf<
172 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
173 typename HeapOopType<idecorators>::type>::type
174 encode_internal(T value);
175
176 template <DecoratorSet idecorators, typename T>
177 static inline typename EnableIf<
178 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
179 encode_internal(T value) {
180 return value;
181 }
182
183 template <typename T>
184 static inline typename AccessInternal::EncodedType<decorators, T>::type
185 encode(T value) {
186 return encode_internal<decorators, T>(value);
187 }
188
189 // Only decode if INTERNAL_VALUE_IS_OOP
190 template <DecoratorSet idecorators, typename T>
191 static inline typename EnableIf<
192 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
193 decode_internal(typename HeapOopType<idecorators>::type value);
194
195 template <DecoratorSet idecorators, typename T>
196 static inline typename EnableIf<
197 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
198 decode_internal(T value) {
199 return value;
200 }
201
202 template <typename T>
203 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
204 return decode_internal<decorators, T>(value);
205 }
206
207 protected:
208 template <DecoratorSet ds, typename T>
209 static typename EnableIf<
210 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
211 load_internal(void* addr);
212
213 template <DecoratorSet ds, typename T>
214 static typename EnableIf<
215 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
216 load_internal(void* addr);
217
218 template <DecoratorSet ds, typename T>
219 static typename EnableIf<
220 HasDecorator<ds, MO_RELAXED>::value, T>::type
221 load_internal(void* addr);
222
223 template <DecoratorSet ds, typename T>
224 static inline typename EnableIf<
225 HasDecorator<ds, MO_UNORDERED>::value, T>::type
226 load_internal(void* addr) {
227 return *reinterpret_cast<T*>(addr);
228 }
229
230 template <DecoratorSet ds, typename T>
231 static typename EnableIf<
232 HasDecorator<ds, MO_SEQ_CST>::value>::type
233 store_internal(void* addr, T value);
234
235 template <DecoratorSet ds, typename T>
236 static typename EnableIf<
237 HasDecorator<ds, MO_RELEASE>::value>::type
238 store_internal(void* addr, T value);
239
240 template <DecoratorSet ds, typename T>
241 static typename EnableIf<
242 HasDecorator<ds, MO_RELAXED>::value>::type
243 store_internal(void* addr, T value);
244
245 template <DecoratorSet ds, typename T>
246 static inline typename EnableIf<
247 HasDecorator<ds, MO_UNORDERED>::value>::type
248 store_internal(void* addr, T value) {
249 *reinterpret_cast<T*>(addr) = value;
250 }
251
252 template <DecoratorSet ds, typename T>
253 static typename EnableIf<
254 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
255 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
256
257 template <DecoratorSet ds, typename T>
258 static typename EnableIf<
259 HasDecorator<ds, MO_RELAXED>::value, T>::type
260 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
261
262 template <DecoratorSet ds, typename T>
263 static typename EnableIf<
264 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
265 atomic_xchg_internal(void* addr, T new_value);
266
267 public:
268 template <typename T>
269 static inline void store(void* addr, T value) {
270 store_internal<decorators>(addr, value);
271 }
272
273 template <typename T>
274 static inline T load(void* addr) {
275 return load_internal<decorators, T>(addr);
276 }
277
278 template <typename T>
279 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
280 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
281 }
282
283 template <typename T>
284 static inline T atomic_xchg(void* addr, T new_value) {
285 return atomic_xchg_internal<decorators>(addr, new_value);
286 }
287
288 template <typename T>
289 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
290 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
291 size_t length);
292
293 template <typename T>
294 static void oop_store(void* addr, T value);
295 template <typename T>
296 static void oop_store_at(oop base, ptrdiff_t offset, T value);
297
298 template <typename T>
299 static T oop_load(void* addr);
300 template <typename T>
301 static T oop_load_at(oop base, ptrdiff_t offset);
302
303 template <typename T>
304 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
305 template <typename T>
306 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
307
308 template <typename T>
309 static T oop_atomic_xchg(void* addr, T new_value);
310 template <typename T>
311 static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
312
313 template <typename T>
314 static void store_at(oop base, ptrdiff_t offset, T value) {
315 store(field_addr(base, offset), value);
316 }
317
318 template <typename T>
319 static T load_at(oop base, ptrdiff_t offset) {
320 return load<T>(field_addr(base, offset));
321 }
322
323 template <typename T>
324 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
325 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
326 }
327
328 template <typename T>
329 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
330 return atomic_xchg(field_addr(base, offset), new_value);
331 }
332
333 template <typename T>
334 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
335 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
336 size_t length);
337
338 static void clone(oop src, oop dst, size_t size);
339 };
340
341 namespace AccessInternal {
342 DEBUG_ONLY(void check_access_thread_state());
343 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
344 }
345
346 // Below is the implementation of the first 4 steps of the template pipeline:
347 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
348 // and sets default decorators to sensible values.
349 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
350 // multiple types. The P type of the address and T type of the value must
351 // match.
352 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
353 // avoided, and in that case avoids it (calling raw accesses or
354 // primitive accesses in a build that does not require primitive GC barriers)
355 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
356 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
357 // to the access.
358
359 namespace AccessInternal {
360 template <typename T>
361 struct OopOrNarrowOopInternal: AllStatic {
362 typedef oop type;
363 };
364
365 template <>
366 struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
367 typedef narrowOop type;
368 };
369
370 // This metafunction returns a canonicalized oop/narrowOop type for a passed
371 // in oop-like types passed in from oop_* overloads where the user has sworn
372 // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
373 // narrowOoop, instanceOopDesc*, and random other things).
374 // In the oop_* overloads, it must hold that if the passed in type T is not
375 // narrowOop, then it by contract has to be one of many oop-like types implicitly
376 // convertible to oop, and hence returns oop as the canonical oop type.
377 // If it turns out it was not, then the implicit conversion to oop will fail
378 // to compile, as desired.
379 template <typename T>
380 struct OopOrNarrowOop: AllStatic {
381 typedef typename OopOrNarrowOopInternal<std::decay_t<T>>::type type;
382 };
383
384 inline void* field_addr(oop base, ptrdiff_t byte_offset) {
385 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
386 }
387 // Step 4: Runtime dispatch
388 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
389 // accessor. This is required when the access either depends on whether compressed oops
390 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
391 // barriers). The way it works is that a function pointer initially pointing to an
392 // accessor resolution function gets called for each access. Upon first invocation,
393 // it resolves which accessor to be used in future invocations and patches the
394 // function pointer to this new accessor.
395
396 template <DecoratorSet decorators, typename T, BarrierType type>
397 struct RuntimeDispatch: AllStatic {};
398
399 template <DecoratorSet decorators, typename T>
400 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
401 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
402 static func_t _store_func;
403
404 static void store_init(void* addr, T value);
405
406 static inline void store(void* addr, T value) {
407 assert_access_thread_state();
408 _store_func(addr, value);
409 }
410 };
411
412 template <DecoratorSet decorators, typename T>
413 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
414 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
415 static func_t _store_at_func;
416
417 static void store_at_init(oop base, ptrdiff_t offset, T value);
418
419 static inline void store_at(oop base, ptrdiff_t offset, T value) {
420 assert_access_thread_state();
421 _store_at_func(base, offset, value);
422 }
423 };
424
425 template <DecoratorSet decorators, typename T>
426 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
427 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
428 static func_t _load_func;
429
430 static T load_init(void* addr);
431
432 static inline T load(void* addr) {
433 assert_access_thread_state();
434 return _load_func(addr);
435 }
436 };
437
438 template <DecoratorSet decorators, typename T>
439 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
440 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
441 static func_t _load_at_func;
442
443 static T load_at_init(oop base, ptrdiff_t offset);
444
445 static inline T load_at(oop base, ptrdiff_t offset) {
446 assert_access_thread_state();
447 return _load_at_func(base, offset);
448 }
449 };
450
451 template <DecoratorSet decorators, typename T>
452 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
453 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
454 static func_t _atomic_cmpxchg_func;
455
456 static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
457
458 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
459 assert_access_thread_state();
460 return _atomic_cmpxchg_func(addr, compare_value, new_value);
461 }
462 };
463
464 template <DecoratorSet decorators, typename T>
465 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
466 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
467 static func_t _atomic_cmpxchg_at_func;
468
469 static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
470
471 static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
472 assert_access_thread_state();
473 return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
474 }
475 };
476
477 template <DecoratorSet decorators, typename T>
478 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
479 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
480 static func_t _atomic_xchg_func;
481
482 static T atomic_xchg_init(void* addr, T new_value);
483
484 static inline T atomic_xchg(void* addr, T new_value) {
485 assert_access_thread_state();
486 return _atomic_xchg_func(addr, new_value);
487 }
488 };
489
490 template <DecoratorSet decorators, typename T>
491 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
492 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
493 static func_t _atomic_xchg_at_func;
494
495 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
496
497 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
498 assert_access_thread_state();
499 return _atomic_xchg_at_func(base, offset, new_value);
500 }
501 };
502
503 template <DecoratorSet decorators, typename T>
504 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
505 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
506 static func_t _arraycopy_func;
507
508 static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
509 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
510 size_t length);
511
512 static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
513 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
514 size_t length) {
515 assert_access_thread_state();
516 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
517 dst_obj, dst_offset_in_bytes, dst_raw,
518 length);
519 }
520 };
521
522 template <DecoratorSet decorators, typename T>
523 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
524 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
525 static func_t _clone_func;
526
527 static void clone_init(oop src, oop dst, size_t size);
528
529 static inline void clone(oop src, oop dst, size_t size) {
530 assert_access_thread_state();
531 _clone_func(src, dst, size);
532 }
533 };
534
535 // Initialize the function pointers to point to the resolving function.
536 template <DecoratorSet decorators, typename T>
537 typename AccessFunction<decorators, T, BARRIER_STORE>::type
538 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
539
540 template <DecoratorSet decorators, typename T>
541 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
542 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
543
544 template <DecoratorSet decorators, typename T>
545 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
546 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
547
548 template <DecoratorSet decorators, typename T>
549 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
550 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
551
552 template <DecoratorSet decorators, typename T>
553 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
554 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
555
556 template <DecoratorSet decorators, typename T>
557 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
558 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
559
560 template <DecoratorSet decorators, typename T>
561 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
562 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
563
564 template <DecoratorSet decorators, typename T>
565 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
566 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
567
568 template <DecoratorSet decorators, typename T>
569 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
570 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
571
572 template <DecoratorSet decorators, typename T>
573 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
574 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
575
576 // Step 3: Pre-runtime dispatching.
577 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
578 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
579 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
580 // not possible.
581 struct PreRuntimeDispatch: AllStatic {
582 template<DecoratorSet decorators>
583 struct CanHardwireRaw: public std::integral_constant<
584 bool,
585 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
586 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
587 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
588 {};
589
590 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
591
592 template<DecoratorSet decorators>
593 static bool is_hardwired_primitive() {
594 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
595 }
596
597 template <DecoratorSet decorators, typename T>
598 inline static typename EnableIf<
599 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
600 store(void* addr, T value) {
601 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
602 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
603 Raw::oop_store(addr, value);
604 } else {
605 Raw::store(addr, value);
606 }
607 }
608
609 template <DecoratorSet decorators, typename T>
610 inline static typename EnableIf<
611 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
612 store(void* addr, T value) {
613 if (UseCompressedOops) {
614 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
615 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
616 } else {
617 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
618 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
619 }
620 }
621
622 template <DecoratorSet decorators, typename T>
623 inline static typename EnableIf<
624 !HasDecorator<decorators, AS_RAW>::value>::type
625 store(void* addr, T value) {
626 if (is_hardwired_primitive<decorators>()) {
627 const DecoratorSet expanded_decorators = decorators | AS_RAW;
628 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
629 } else {
630 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
631 }
632 }
633
634 template <DecoratorSet decorators, typename T>
635 inline static typename EnableIf<
636 HasDecorator<decorators, AS_RAW>::value>::type
637 store_at(oop base, ptrdiff_t offset, T value) {
638 store<decorators>(field_addr(base, offset), value);
639 }
640
641 template <DecoratorSet decorators, typename T>
642 inline static typename EnableIf<
643 !HasDecorator<decorators, AS_RAW>::value>::type
644 store_at(oop base, ptrdiff_t offset, T value) {
645 if (is_hardwired_primitive<decorators>()) {
646 const DecoratorSet expanded_decorators = decorators | AS_RAW;
647 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
648 } else {
649 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
650 }
651 }
652
653 template <DecoratorSet decorators, typename T>
654 inline static typename EnableIf<
655 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
656 load(void* addr) {
657 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
658 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
659 return Raw::template oop_load<T>(addr);
660 } else {
661 return Raw::template load<T>(addr);
662 }
663 }
664
665 template <DecoratorSet decorators, typename T>
666 inline static typename EnableIf<
667 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
668 load(void* addr) {
669 if (UseCompressedOops) {
670 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
671 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
672 } else {
673 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
674 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
675 }
676 }
677
678 template <DecoratorSet decorators, typename T>
679 inline static typename EnableIf<
680 !HasDecorator<decorators, AS_RAW>::value, T>::type
681 load(void* addr) {
682 if (is_hardwired_primitive<decorators>()) {
683 const DecoratorSet expanded_decorators = decorators | AS_RAW;
684 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
685 } else {
686 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
687 }
688 }
689
690 template <DecoratorSet decorators, typename T>
691 inline static typename EnableIf<
692 HasDecorator<decorators, AS_RAW>::value, T>::type
693 load_at(oop base, ptrdiff_t offset) {
694 return load<decorators, T>(field_addr(base, offset));
695 }
696
697 template <DecoratorSet decorators, typename T>
698 inline static typename EnableIf<
699 !HasDecorator<decorators, AS_RAW>::value, T>::type
700 load_at(oop base, ptrdiff_t offset) {
701 if (is_hardwired_primitive<decorators>()) {
702 const DecoratorSet expanded_decorators = decorators | AS_RAW;
703 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
704 } else {
705 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
706 }
707 }
708
709 template <DecoratorSet decorators, typename T>
710 inline static typename EnableIf<
711 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
712 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
713 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
714 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
715 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
716 } else {
717 return Raw::atomic_cmpxchg(addr, compare_value, new_value);
718 }
719 }
720
721 template <DecoratorSet decorators, typename T>
722 inline static typename EnableIf<
723 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
724 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
725 if (UseCompressedOops) {
726 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
727 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
728 } else {
729 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
730 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
731 }
732 }
733
734 template <DecoratorSet decorators, typename T>
735 inline static typename EnableIf<
736 !HasDecorator<decorators, AS_RAW>::value, T>::type
737 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
738 if (is_hardwired_primitive<decorators>()) {
739 const DecoratorSet expanded_decorators = decorators | AS_RAW;
740 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
741 } else {
742 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
743 }
744 }
745
746 template <DecoratorSet decorators, typename T>
747 inline static typename EnableIf<
748 HasDecorator<decorators, AS_RAW>::value, T>::type
749 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
750 return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
751 }
752
753 template <DecoratorSet decorators, typename T>
754 inline static typename EnableIf<
755 !HasDecorator<decorators, AS_RAW>::value, T>::type
756 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
757 if (is_hardwired_primitive<decorators>()) {
758 const DecoratorSet expanded_decorators = decorators | AS_RAW;
759 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
760 } else {
761 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
762 }
763 }
764
765 template <DecoratorSet decorators, typename T>
766 inline static typename EnableIf<
767 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
768 atomic_xchg(void* addr, T new_value) {
769 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
770 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
771 return Raw::oop_atomic_xchg(addr, new_value);
772 } else {
773 return Raw::atomic_xchg(addr, new_value);
774 }
775 }
776
777 template <DecoratorSet decorators, typename T>
778 inline static typename EnableIf<
779 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
780 atomic_xchg(void* addr, T new_value) {
781 if (UseCompressedOops) {
782 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
783 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
784 } else {
785 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
786 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
787 }
788 }
789
790 template <DecoratorSet decorators, typename T>
791 inline static typename EnableIf<
792 !HasDecorator<decorators, AS_RAW>::value, T>::type
793 atomic_xchg(void* addr, T new_value) {
794 if (is_hardwired_primitive<decorators>()) {
795 const DecoratorSet expanded_decorators = decorators | AS_RAW;
796 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
797 } else {
798 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
799 }
800 }
801
802 template <DecoratorSet decorators, typename T>
803 inline static typename EnableIf<
804 HasDecorator<decorators, AS_RAW>::value, T>::type
805 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
806 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
807 }
808
809 template <DecoratorSet decorators, typename T>
810 inline static typename EnableIf<
811 !HasDecorator<decorators, AS_RAW>::value, T>::type
812 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
813 if (is_hardwired_primitive<decorators>()) {
814 const DecoratorSet expanded_decorators = decorators | AS_RAW;
815 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
816 } else {
817 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
818 }
819 }
820
821 template <DecoratorSet decorators, typename T>
822 inline static typename EnableIf<
823 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
824 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
825 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
826 size_t length) {
827 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
828 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
829 return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
830 dst_obj, dst_offset_in_bytes, dst_raw,
831 length);
832 } else {
833 return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
834 dst_obj, dst_offset_in_bytes, dst_raw,
835 length);
836 }
837 }
838
839 template <DecoratorSet decorators, typename T>
840 inline static typename EnableIf<
841 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
842 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
843 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
844 size_t length) {
845 if (UseCompressedOops) {
846 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
847 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
848 dst_obj, dst_offset_in_bytes, dst_raw,
849 length);
850 } else {
851 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
852 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
853 dst_obj, dst_offset_in_bytes, dst_raw,
854 length);
855 }
856 }
857
858 template <DecoratorSet decorators, typename T>
859 inline static typename EnableIf<
860 !HasDecorator<decorators, AS_RAW>::value, bool>::type
861 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
862 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
863 size_t length) {
864 if (is_hardwired_primitive<decorators>()) {
865 const DecoratorSet expanded_decorators = decorators | AS_RAW;
866 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
867 dst_obj, dst_offset_in_bytes, dst_raw,
868 length);
869 } else {
870 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
871 dst_obj, dst_offset_in_bytes, dst_raw,
872 length);
873 }
874 }
875
876 template <DecoratorSet decorators>
877 inline static typename EnableIf<
878 HasDecorator<decorators, AS_RAW>::value>::type
879 clone(oop src, oop dst, size_t size) {
880 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
881 Raw::clone(src, dst, size);
882 }
883
884 template <DecoratorSet decorators>
885 inline static typename EnableIf<
886 !HasDecorator<decorators, AS_RAW>::value>::type
887 clone(oop src, oop dst, size_t size) {
888 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
889 }
890 };
891
892 // Step 2: Reduce types.
893 // Enforce that for non-oop types, T and P have to be strictly the same.
894 // P is the type of the address and T is the type of the values.
895 // As for oop types, it is allow to send T in {narrowOop, oop} and
896 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
897 // the subsequent table. (columns are P, rows are T)
898 // | | HeapWord | oop | narrowOop |
899 // | oop | rt-comp | hw-none | hw-comp |
900 // | narrowOop | x | x | hw-none |
901 //
902 // x means not allowed
903 // rt-comp means it must be checked at runtime whether the oop is compressed.
904 // hw-none means it is statically known the oop will not be compressed.
905 // hw-comp means it is statically known the oop will be compressed.
906
907 template <DecoratorSet decorators, typename T>
908 inline void store_reduce_types(T* addr, T value) {
909 PreRuntimeDispatch::store<decorators>(addr, value);
910 }
911
912 template <DecoratorSet decorators>
913 inline void store_reduce_types(narrowOop* addr, oop value) {
914 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
915 INTERNAL_RT_USE_COMPRESSED_OOPS;
916 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
917 }
918
919 template <DecoratorSet decorators>
920 inline void store_reduce_types(narrowOop* addr, narrowOop value) {
921 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
922 INTERNAL_RT_USE_COMPRESSED_OOPS;
923 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
924 }
925
926 template <DecoratorSet decorators>
927 inline void store_reduce_types(HeapWord* addr, oop value) {
928 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
929 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
930 }
931
932 template <DecoratorSet decorators, typename T>
933 inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
934 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
935 }
936
937 template <DecoratorSet decorators>
938 inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
939 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
940 INTERNAL_RT_USE_COMPRESSED_OOPS;
941 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
942 }
943
944 template <DecoratorSet decorators>
945 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
946 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
947 INTERNAL_RT_USE_COMPRESSED_OOPS;
948 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
949 }
950
951 template <DecoratorSet decorators>
952 inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
953 oop compare_value,
954 oop new_value) {
955 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
956 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
957 }
958
959 template <DecoratorSet decorators, typename T>
960 inline T atomic_xchg_reduce_types(T* addr, T new_value) {
961 const DecoratorSet expanded_decorators = decorators;
962 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
963 }
964
965 template <DecoratorSet decorators>
966 inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
967 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
968 INTERNAL_RT_USE_COMPRESSED_OOPS;
969 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
970 }
971
972 template <DecoratorSet decorators>
973 inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
974 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
975 INTERNAL_RT_USE_COMPRESSED_OOPS;
976 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
977 }
978
979 template <DecoratorSet decorators>
980 inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
981 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
982 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
983 }
984
985 template <DecoratorSet decorators, typename T>
986 inline T load_reduce_types(T* addr) {
987 return PreRuntimeDispatch::load<decorators, T>(addr);
988 }
989
990 template <DecoratorSet decorators, typename T>
991 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
992 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
993 INTERNAL_RT_USE_COMPRESSED_OOPS;
994 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
995 }
996
997 template <DecoratorSet decorators, typename T>
998 inline oop load_reduce_types(HeapWord* addr) {
999 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1000 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1001 }
1002
1003 template <DecoratorSet decorators, typename T>
1004 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1005 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1006 size_t length) {
1007 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1008 dst_obj, dst_offset_in_bytes, dst_raw,
1009 length);
1010 }
1011
1012 template <DecoratorSet decorators>
1013 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1014 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1015 size_t length) {
1016 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1017 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1018 dst_obj, dst_offset_in_bytes, dst_raw,
1019 length);
1020 }
1021
1022 template <DecoratorSet decorators>
1023 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1024 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1025 size_t length) {
1026 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1027 INTERNAL_RT_USE_COMPRESSED_OOPS;
1028 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1029 dst_obj, dst_offset_in_bytes, dst_raw,
1030 length);
1031 }
1032
1033 // Step 1: Set default decorators. This step remembers if a type was volatile
1034 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1035 // memory ordering is set for the access, and the implied decorator rules
1036 // are applied to select sensible defaults for decorators that have not been
1037 // explicitly set. For example, default object referent strength is set to strong.
1038 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1039 // and references from the types). This step also perform some type verification
1040 // that the passed in types make sense.
1041
1042 template <DecoratorSet decorators, typename T>
1043 static void verify_types(){
1044 // If this fails to compile, then you have sent in something that is
1045 // not recognized as a valid primitive type to a primitive Access function.
1046 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1047 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1048 std::is_floating_point<T>::value)); // not allowed primitive type
1049 }
1050
1051 template <DecoratorSet decorators, typename P, typename T>
1052 inline void store(P* addr, T value) {
1053 verify_types<decorators, T>();
1054 using DecayedP = std::decay_t<P>;
1055 using DecayedT = std::decay_t<T>;
1056 DecayedT decayed_value = value;
1057 // If a volatile address is passed in but no memory ordering decorator,
1058 // set the memory ordering to MO_RELAXED by default.
1059 const DecoratorSet expanded_decorators = DecoratorFixup<
1060 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1061 (MO_RELAXED | decorators) : decorators>::value;
1062 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1063 }
1064
1065 template <DecoratorSet decorators, typename T>
1066 inline void store_at(oop base, ptrdiff_t offset, T value) {
1067 verify_types<decorators, T>();
1068 using DecayedT = std::decay_t<T>;
1069 DecayedT decayed_value = value;
1070 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1071 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1072 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1073 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1074 }
1075
1076 template <DecoratorSet decorators, typename P, typename T>
1077 inline T load(P* addr) {
1078 verify_types<decorators, T>();
1079 using DecayedP = std::decay_t<P>;
1080 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1081 typename OopOrNarrowOop<T>::type,
1082 std::decay_t<T>>;
1083 // If a volatile address is passed in but no memory ordering decorator,
1084 // set the memory ordering to MO_RELAXED by default.
1085 const DecoratorSet expanded_decorators = DecoratorFixup<
1086 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1087 (MO_RELAXED | decorators) : decorators>::value;
1088 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1089 }
1090
1091 template <DecoratorSet decorators, typename T>
1092 inline T load_at(oop base, ptrdiff_t offset) {
1093 verify_types<decorators, T>();
1094 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1095 typename OopOrNarrowOop<T>::type,
1096 std::decay_t<T>>;
1097 // Expand the decorators (figure out sensible defaults)
1098 // Potentially remember if we need compressed oop awareness
1099 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1100 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1101 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1102 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1103 }
1104
1105 template <DecoratorSet decorators, typename P, typename T>
1106 inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1107 verify_types<decorators, T>();
1108 using DecayedP = std::decay_t<P>;
1109 using DecayedT = std::decay_t<T>;
1110 DecayedT new_decayed_value = new_value;
1111 DecayedT compare_decayed_value = compare_value;
1112 const DecoratorSet expanded_decorators = DecoratorFixup<
1113 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1114 (MO_SEQ_CST | decorators) : decorators>::value;
1115 return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1116 compare_decayed_value,
1117 new_decayed_value);
1118 }
1119
1120 template <DecoratorSet decorators, typename T>
1121 inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1122 verify_types<decorators, T>();
1123 using DecayedT = std::decay_t<T>;
1124 DecayedT new_decayed_value = new_value;
1125 DecayedT compare_decayed_value = compare_value;
1126 // Determine default memory ordering
1127 const DecoratorSet expanded_decorators = DecoratorFixup<
1128 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1129 (MO_SEQ_CST | decorators) : decorators>::value;
1130 // Potentially remember that we need compressed oop awareness
1131 const DecoratorSet final_decorators = expanded_decorators |
1132 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1133 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1134 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1135 new_decayed_value);
1136 }
1137
1138 template <DecoratorSet decorators, typename P, typename T>
1139 inline T atomic_xchg(P* addr, T new_value) {
1140 verify_types<decorators, T>();
1141 using DecayedP = std::decay_t<P>;
1142 using DecayedT = std::decay_t<T>;
1143 DecayedT new_decayed_value = new_value;
1144 // atomic_xchg is only available in SEQ_CST flavour.
1145 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1146 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1147 new_decayed_value);
1148 }
1149
1150 template <DecoratorSet decorators, typename T>
1151 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1152 verify_types<decorators, T>();
1153 using DecayedT = std::decay_t<T>;
1154 DecayedT new_decayed_value = new_value;
1155 // atomic_xchg is only available in SEQ_CST flavour.
1156 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1157 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1158 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1159 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1160 }
1161
1162 template <DecoratorSet decorators, typename T>
1163 inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1164 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1165 size_t length) {
1166 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1167 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1168 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1169 using DecayedT = std::decay_t<T>;
1170 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1171 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1172 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1173 length);
1174 }
1175
1176 template <DecoratorSet decorators>
1177 inline void clone(oop src, oop dst, size_t size) {
1178 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1179 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1180 }
1181
1182 // Infer the type that should be returned from an Access::oop_load.
1183 template <typename P, DecoratorSet decorators>
1184 class OopLoadProxy: public StackObj {
1185 private:
1186 P *const _addr;
1187 public:
1188 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1189
1190 inline operator oop() {
1191 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1192 }
1193
1194 inline operator narrowOop() {
1195 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1196 }
1197
1198 template <typename T>
1199 inline bool operator ==(const T& other) const {
1200 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1201 }
1202
1203 template <typename T>
1204 inline bool operator !=(const T& other) const {
1205 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1206 }
1207
1208 inline bool operator ==(std::nullptr_t) const {
1209 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) == nullptr;
1210 }
1211
1212 inline bool operator !=(std::nullptr_t) const {
1213 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) != nullptr;
1214 }
1215 };
1216
1217 // Infer the type that should be returned from an Access::load_at.
1218 template <DecoratorSet decorators>
1219 class LoadAtProxy: public StackObj {
1220 private:
1221 const oop _base;
1222 const ptrdiff_t _offset;
1223 public:
1224 LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1225
1226 template <typename T>
1227 inline operator T() const {
1228 return load_at<decorators, T>(_base, _offset);
1229 }
1230
1231 template <typename T>
1232 inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1233
1234 template <typename T>
1235 inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1236 };
1237
1238 // Infer the type that should be returned from an Access::oop_load_at.
1239 template <DecoratorSet decorators>
1240 class OopLoadAtProxy: public StackObj {
1241 private:
1242 const oop _base;
1243 const ptrdiff_t _offset;
1244 public:
1245 OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1246
1247 inline operator oop() const {
1248 return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1249 }
1250
1251 inline operator narrowOop() const {
1252 return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1253 }
1254
1255 template <typename T>
1256 inline bool operator ==(const T& other) const {
1257 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1258 }
1259
1260 template <typename T>
1261 inline bool operator !=(const T& other) const {
1262 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1263 }
1264 };
1265 }
1266
1267 #endif // SHARE_OOPS_ACCESSBACKEND_HPP