1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/globals.hpp"
35 #include "utilities/debug.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 // Result from oop_arraycopy
39 enum class OopCopyResult {
40 ok, // oop array copy sucessful
41 failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
42 failed_check_null // oop array copy failed null check (ARRAYCOPY_NOTNULL)
43 };
44
45 // This metafunction returns either oop or narrowOop depending on whether
46 // an access needs to use compressed oops or not.
47 template <DecoratorSet decorators>
48 struct HeapOopType: AllStatic {
49 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
50 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
51 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
52 };
53
54 namespace AccessInternal {
55 enum BarrierType {
56 BARRIER_STORE,
57 BARRIER_STORE_AT,
58 BARRIER_LOAD,
59 BARRIER_LOAD_AT,
60 BARRIER_ATOMIC_CMPXCHG,
61 BARRIER_ATOMIC_CMPXCHG_AT,
62 BARRIER_ATOMIC_XCHG,
63 BARRIER_ATOMIC_XCHG_AT,
64 BARRIER_ARRAYCOPY,
65 BARRIER_CLONE
66 };
67
68 template <DecoratorSet decorators, typename T>
69 struct MustConvertCompressedOop: public std::integral_constant<bool,
70 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
71 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
72 std::is_same<T, oop>::value> {};
73
74 // This metafunction returns an appropriate oop type if the value is oop-like
75 // and otherwise returns the same type T.
76 template <DecoratorSet decorators, typename T>
77 struct EncodedType: AllStatic {
78 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
79 typename HeapOopType<decorators>::type,
80 T>;
81 };
82
83 template <DecoratorSet decorators>
84 inline typename HeapOopType<decorators>::type*
85 oop_field_addr(oop base, ptrdiff_t byte_offset) {
86 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
87 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
88 }
89
90 template <DecoratorSet decorators, typename T>
91 struct AccessFunctionTypes {
92 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
93 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
94 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
95 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
96
97 typedef T (*load_func_t)(void* addr);
98 typedef void (*store_func_t)(void* addr, T value);
99 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
100 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
101
102 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
103 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
104 size_t length);
105 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
106 };
107
108 template <DecoratorSet decorators>
109 struct AccessFunctionTypes<decorators, void> {
110 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
111 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
112 size_t length);
113 };
114
115 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
116
117 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
118 template <DecoratorSet decorators, typename T> \
119 struct AccessFunction<decorators, T, bt>: AllStatic{ \
120 typedef typename AccessFunctionTypes<decorators, T>::func type; \
121 }
122 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
123 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
124 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
125 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
126 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
127 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
128 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
129 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
130 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
131 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
132 #undef ACCESS_GENERATE_ACCESS_FUNCTION
133
134 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
135 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
136
137 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
138 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
139
140 void* field_addr(oop base, ptrdiff_t offset);
141
142 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
143 // faster build times, given how frequently included access is.
144 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
145 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
146 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
147
148 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
149 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
150
151 template<typename T>
152 void arraycopy_conjoint(T* src, T* dst, size_t length);
153 template<typename T>
154 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
155 template<typename T>
156 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
157 }
158
159 // This mask specifies what decorators are relevant for raw accesses. When passing
160 // accesses to the raw layer, irrelevant decorators are removed.
161 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
162 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
163
164 // The RawAccessBarrier performs raw accesses with additional knowledge of
165 // memory ordering, so that OrderAccess/Atomic is called when necessary.
166 // It additionally handles compressed oops, and hence is not completely "raw"
167 // strictly speaking.
168 template <DecoratorSet decorators>
169 class RawAccessBarrier: public AllStatic {
170 protected:
171 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
172 return AccessInternal::field_addr(base, byte_offset);
173 }
174
175 protected:
176 // Only encode if INTERNAL_VALUE_IS_OOP
177 template <DecoratorSet idecorators, typename T>
178 static inline typename EnableIf<
179 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
180 typename HeapOopType<idecorators>::type>::type
181 encode_internal(T value);
182
183 template <DecoratorSet idecorators, typename T>
184 static inline typename EnableIf<
185 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
186 encode_internal(T value) {
187 return value;
188 }
189
190 template <typename T>
191 static inline typename AccessInternal::EncodedType<decorators, T>::type
192 encode(T value) {
193 return encode_internal<decorators, T>(value);
194 }
195
196 // Only decode if INTERNAL_VALUE_IS_OOP
197 template <DecoratorSet idecorators, typename T>
198 static inline typename EnableIf<
199 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
200 decode_internal(typename HeapOopType<idecorators>::type value);
201
202 template <DecoratorSet idecorators, typename T>
203 static inline typename EnableIf<
204 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
205 decode_internal(T value) {
206 return value;
207 }
208
209 template <typename T>
210 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
211 return decode_internal<decorators, T>(value);
212 }
213
214 protected:
215 template <DecoratorSet ds, typename T>
216 static typename EnableIf<
217 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
218 load_internal(void* addr);
219
220 template <DecoratorSet ds, typename T>
221 static typename EnableIf<
222 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
223 load_internal(void* addr);
224
225 template <DecoratorSet ds, typename T>
226 static typename EnableIf<
227 HasDecorator<ds, MO_RELAXED>::value, T>::type
228 load_internal(void* addr);
229
230 template <DecoratorSet ds, typename T>
231 static inline typename EnableIf<
232 HasDecorator<ds, MO_UNORDERED>::value, T>::type
233 load_internal(void* addr) {
234 return *reinterpret_cast<T*>(addr);
235 }
236
237 template <DecoratorSet ds, typename T>
238 static typename EnableIf<
239 HasDecorator<ds, MO_SEQ_CST>::value>::type
240 store_internal(void* addr, T value);
241
242 template <DecoratorSet ds, typename T>
243 static typename EnableIf<
244 HasDecorator<ds, MO_RELEASE>::value>::type
245 store_internal(void* addr, T value);
246
247 template <DecoratorSet ds, typename T>
248 static typename EnableIf<
249 HasDecorator<ds, MO_RELAXED>::value>::type
250 store_internal(void* addr, T value);
251
252 template <DecoratorSet ds, typename T>
253 static inline typename EnableIf<
254 HasDecorator<ds, MO_UNORDERED>::value>::type
255 store_internal(void* addr, T value) {
256 *reinterpret_cast<T*>(addr) = value;
257 }
258
259 template <DecoratorSet ds, typename T>
260 static typename EnableIf<
261 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
262 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
263
264 template <DecoratorSet ds, typename T>
265 static typename EnableIf<
266 HasDecorator<ds, MO_RELAXED>::value, T>::type
267 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
268
269 template <DecoratorSet ds, typename T>
270 static typename EnableIf<
271 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
272 atomic_xchg_internal(void* addr, T new_value);
273
274 public:
275 template <typename T>
276 static inline void store(void* addr, T value) {
277 store_internal<decorators>(addr, value);
278 }
279
280 template <typename T>
281 static inline T load(void* addr) {
282 return load_internal<decorators, T>(addr);
283 }
284
285 template <typename T>
286 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
287 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
288 }
289
290 template <typename T>
291 static inline T atomic_xchg(void* addr, T new_value) {
292 return atomic_xchg_internal<decorators>(addr, new_value);
293 }
294
295 template <typename T>
296 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
297 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
298 size_t length);
299
300 template <typename T>
301 static void oop_store(void* addr, T value);
302 template <typename T>
303 static void oop_store_at(oop base, ptrdiff_t offset, T value);
304
305 template <typename T>
306 static T oop_load(void* addr);
307 template <typename T>
308 static T oop_load_at(oop base, ptrdiff_t offset);
309
310 template <typename T>
311 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
312 template <typename T>
313 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
314
315 template <typename T>
316 static T oop_atomic_xchg(void* addr, T new_value);
317 template <typename T>
318 static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
319
320 template <typename T>
321 static void store_at(oop base, ptrdiff_t offset, T value) {
322 store(field_addr(base, offset), value);
323 }
324
325 template <typename T>
326 static T load_at(oop base, ptrdiff_t offset) {
327 return load<T>(field_addr(base, offset));
328 }
329
330 template <typename T>
331 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
332 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
333 }
334
335 template <typename T>
336 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
337 return atomic_xchg(field_addr(base, offset), new_value);
338 }
339
340 template <typename T>
341 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
342 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
343 size_t length);
344
345 static void clone(oop src, oop dst, size_t size);
346 };
347
348 namespace AccessInternal {
349 DEBUG_ONLY(void check_access_thread_state());
350 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
351 }
352
353 // Below is the implementation of the first 4 steps of the template pipeline:
354 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
355 // and sets default decorators to sensible values.
356 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
357 // multiple types. The P type of the address and T type of the value must
358 // match.
359 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
360 // avoided, and in that case avoids it (calling raw accesses or
361 // primitive accesses in a build that does not require primitive GC barriers)
362 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
363 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
364 // to the access.
365
366 namespace AccessInternal {
367 template <typename T>
368 struct OopOrNarrowOopInternal: AllStatic {
369 typedef oop type;
370 };
371
372 template <>
373 struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
374 typedef narrowOop type;
375 };
376
377 // This metafunction returns a canonicalized oop/narrowOop type for a passed
378 // in oop-like types passed in from oop_* overloads where the user has sworn
379 // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
380 // narrowOoop, instanceOopDesc*, and random other things).
381 // In the oop_* overloads, it must hold that if the passed in type T is not
382 // narrowOop, then it by contract has to be one of many oop-like types implicitly
383 // convertible to oop, and hence returns oop as the canonical oop type.
384 // If it turns out it was not, then the implicit conversion to oop will fail
385 // to compile, as desired.
386 template <typename T>
387 struct OopOrNarrowOop: AllStatic {
388 typedef typename OopOrNarrowOopInternal<std::decay_t<T>>::type type;
389 };
390
391 inline void* field_addr(oop base, ptrdiff_t byte_offset) {
392 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
393 }
394 // Step 4: Runtime dispatch
395 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
396 // accessor. This is required when the access either depends on whether compressed oops
397 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
398 // barriers). The way it works is that a function pointer initially pointing to an
399 // accessor resolution function gets called for each access. Upon first invocation,
400 // it resolves which accessor to be used in future invocations and patches the
401 // function pointer to this new accessor.
402
403 template <DecoratorSet decorators, typename T, BarrierType type>
404 struct RuntimeDispatch: AllStatic {};
405
406 template <DecoratorSet decorators, typename T>
407 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
408 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
409 static func_t _store_func;
410
411 static void store_init(void* addr, T value);
412
413 static inline void store(void* addr, T value) {
414 assert_access_thread_state();
415 _store_func(addr, value);
416 }
417 };
418
419 template <DecoratorSet decorators, typename T>
420 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
421 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
422 static func_t _store_at_func;
423
424 static void store_at_init(oop base, ptrdiff_t offset, T value);
425
426 static inline void store_at(oop base, ptrdiff_t offset, T value) {
427 assert_access_thread_state();
428 _store_at_func(base, offset, value);
429 }
430 };
431
432 template <DecoratorSet decorators, typename T>
433 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
434 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
435 static func_t _load_func;
436
437 static T load_init(void* addr);
438
439 static inline T load(void* addr) {
440 assert_access_thread_state();
441 return _load_func(addr);
442 }
443 };
444
445 template <DecoratorSet decorators, typename T>
446 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
447 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
448 static func_t _load_at_func;
449
450 static T load_at_init(oop base, ptrdiff_t offset);
451
452 static inline T load_at(oop base, ptrdiff_t offset) {
453 assert_access_thread_state();
454 return _load_at_func(base, offset);
455 }
456 };
457
458 template <DecoratorSet decorators, typename T>
459 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
460 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
461 static func_t _atomic_cmpxchg_func;
462
463 static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
464
465 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
466 assert_access_thread_state();
467 return _atomic_cmpxchg_func(addr, compare_value, new_value);
468 }
469 };
470
471 template <DecoratorSet decorators, typename T>
472 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
473 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
474 static func_t _atomic_cmpxchg_at_func;
475
476 static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
477
478 static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
479 assert_access_thread_state();
480 return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
481 }
482 };
483
484 template <DecoratorSet decorators, typename T>
485 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
486 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
487 static func_t _atomic_xchg_func;
488
489 static T atomic_xchg_init(void* addr, T new_value);
490
491 static inline T atomic_xchg(void* addr, T new_value) {
492 assert_access_thread_state();
493 return _atomic_xchg_func(addr, new_value);
494 }
495 };
496
497 template <DecoratorSet decorators, typename T>
498 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
499 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
500 static func_t _atomic_xchg_at_func;
501
502 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
503
504 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
505 assert_access_thread_state();
506 return _atomic_xchg_at_func(base, offset, new_value);
507 }
508 };
509
510 template <DecoratorSet decorators, typename T>
511 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
512 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
513 static func_t _arraycopy_func;
514
515 static OopCopyResult arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
516 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
517 size_t length);
518
519 static inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
520 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
521 size_t length) {
522 assert_access_thread_state();
523 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
524 dst_obj, dst_offset_in_bytes, dst_raw,
525 length);
526 }
527 };
528
529 template <DecoratorSet decorators, typename T>
530 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
531 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
532 static func_t _clone_func;
533
534 static void clone_init(oop src, oop dst, size_t size);
535
536 static inline void clone(oop src, oop dst, size_t size) {
537 assert_access_thread_state();
538 _clone_func(src, dst, size);
539 }
540 };
541
542 // Initialize the function pointers to point to the resolving function.
543 template <DecoratorSet decorators, typename T>
544 typename AccessFunction<decorators, T, BARRIER_STORE>::type
545 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
546
547 template <DecoratorSet decorators, typename T>
548 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
549 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
550
551 template <DecoratorSet decorators, typename T>
552 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
553 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
554
555 template <DecoratorSet decorators, typename T>
556 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
557 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
558
559 template <DecoratorSet decorators, typename T>
560 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
561 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
562
563 template <DecoratorSet decorators, typename T>
564 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
565 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
566
567 template <DecoratorSet decorators, typename T>
568 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
569 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
570
571 template <DecoratorSet decorators, typename T>
572 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
573 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
574
575 template <DecoratorSet decorators, typename T>
576 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
577 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
578
579 template <DecoratorSet decorators, typename T>
580 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
581 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
582
583 // Step 3: Pre-runtime dispatching.
584 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
585 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
586 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
587 // not possible.
588 struct PreRuntimeDispatch: AllStatic {
589 template<DecoratorSet decorators>
590 struct CanHardwireRaw: public std::integral_constant<
591 bool,
592 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
593 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
594 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
595 {};
596
597 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
598
599 template<DecoratorSet decorators>
600 static bool is_hardwired_primitive() {
601 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
602 }
603
604 template <DecoratorSet decorators, typename T>
605 inline static typename EnableIf<
606 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
607 store(void* addr, T value) {
608 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
609 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
610 Raw::oop_store(addr, value);
611 } else {
612 Raw::store(addr, value);
613 }
614 }
615
616 template <DecoratorSet decorators, typename T>
617 inline static typename EnableIf<
618 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
619 store(void* addr, T value) {
620 if (UseCompressedOops) {
621 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
622 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
623 } else {
624 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
625 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
626 }
627 }
628
629 template <DecoratorSet decorators, typename T>
630 inline static typename EnableIf<
631 !HasDecorator<decorators, AS_RAW>::value>::type
632 store(void* addr, T value) {
633 if (is_hardwired_primitive<decorators>()) {
634 const DecoratorSet expanded_decorators = decorators | AS_RAW;
635 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
636 } else {
637 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
638 }
639 }
640
641 template <DecoratorSet decorators, typename T>
642 inline static typename EnableIf<
643 HasDecorator<decorators, AS_RAW>::value>::type
644 store_at(oop base, ptrdiff_t offset, T value) {
645 store<decorators>(field_addr(base, offset), value);
646 }
647
648 template <DecoratorSet decorators, typename T>
649 inline static typename EnableIf<
650 !HasDecorator<decorators, AS_RAW>::value>::type
651 store_at(oop base, ptrdiff_t offset, T value) {
652 if (is_hardwired_primitive<decorators>()) {
653 const DecoratorSet expanded_decorators = decorators | AS_RAW;
654 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
655 } else {
656 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
657 }
658 }
659
660 template <DecoratorSet decorators, typename T>
661 inline static typename EnableIf<
662 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
663 load(void* addr) {
664 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
665 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
666 return Raw::template oop_load<T>(addr);
667 } else {
668 return Raw::template load<T>(addr);
669 }
670 }
671
672 template <DecoratorSet decorators, typename T>
673 inline static typename EnableIf<
674 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
675 load(void* addr) {
676 if (UseCompressedOops) {
677 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
678 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
679 } else {
680 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
681 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
682 }
683 }
684
685 template <DecoratorSet decorators, typename T>
686 inline static typename EnableIf<
687 !HasDecorator<decorators, AS_RAW>::value, T>::type
688 load(void* addr) {
689 if (is_hardwired_primitive<decorators>()) {
690 const DecoratorSet expanded_decorators = decorators | AS_RAW;
691 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
692 } else {
693 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
694 }
695 }
696
697 template <DecoratorSet decorators, typename T>
698 inline static typename EnableIf<
699 HasDecorator<decorators, AS_RAW>::value, T>::type
700 load_at(oop base, ptrdiff_t offset) {
701 return load<decorators, T>(field_addr(base, offset));
702 }
703
704 template <DecoratorSet decorators, typename T>
705 inline static typename EnableIf<
706 !HasDecorator<decorators, AS_RAW>::value, T>::type
707 load_at(oop base, ptrdiff_t offset) {
708 if (is_hardwired_primitive<decorators>()) {
709 const DecoratorSet expanded_decorators = decorators | AS_RAW;
710 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
711 } else {
712 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
713 }
714 }
715
716 template <DecoratorSet decorators, typename T>
717 inline static typename EnableIf<
718 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
719 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
720 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
721 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
722 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
723 } else {
724 return Raw::atomic_cmpxchg(addr, compare_value, new_value);
725 }
726 }
727
728 template <DecoratorSet decorators, typename T>
729 inline static typename EnableIf<
730 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
731 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
732 if (UseCompressedOops) {
733 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
734 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
735 } else {
736 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
737 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
738 }
739 }
740
741 template <DecoratorSet decorators, typename T>
742 inline static typename EnableIf<
743 !HasDecorator<decorators, AS_RAW>::value, T>::type
744 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
745 if (is_hardwired_primitive<decorators>()) {
746 const DecoratorSet expanded_decorators = decorators | AS_RAW;
747 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
748 } else {
749 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
750 }
751 }
752
753 template <DecoratorSet decorators, typename T>
754 inline static typename EnableIf<
755 HasDecorator<decorators, AS_RAW>::value, T>::type
756 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
757 return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
758 }
759
760 template <DecoratorSet decorators, typename T>
761 inline static typename EnableIf<
762 !HasDecorator<decorators, AS_RAW>::value, T>::type
763 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
764 if (is_hardwired_primitive<decorators>()) {
765 const DecoratorSet expanded_decorators = decorators | AS_RAW;
766 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
767 } else {
768 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
769 }
770 }
771
772 template <DecoratorSet decorators, typename T>
773 inline static typename EnableIf<
774 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
775 atomic_xchg(void* addr, T new_value) {
776 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
777 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
778 return Raw::oop_atomic_xchg(addr, new_value);
779 } else {
780 return Raw::atomic_xchg(addr, new_value);
781 }
782 }
783
784 template <DecoratorSet decorators, typename T>
785 inline static typename EnableIf<
786 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
787 atomic_xchg(void* addr, T new_value) {
788 if (UseCompressedOops) {
789 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
790 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
791 } else {
792 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
793 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
794 }
795 }
796
797 template <DecoratorSet decorators, typename T>
798 inline static typename EnableIf<
799 !HasDecorator<decorators, AS_RAW>::value, T>::type
800 atomic_xchg(void* addr, T new_value) {
801 if (is_hardwired_primitive<decorators>()) {
802 const DecoratorSet expanded_decorators = decorators | AS_RAW;
803 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
804 } else {
805 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
806 }
807 }
808
809 template <DecoratorSet decorators, typename T>
810 inline static typename EnableIf<
811 HasDecorator<decorators, AS_RAW>::value, T>::type
812 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
813 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
814 }
815
816 template <DecoratorSet decorators, typename T>
817 inline static typename EnableIf<
818 !HasDecorator<decorators, AS_RAW>::value, T>::type
819 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
820 if (is_hardwired_primitive<decorators>()) {
821 const DecoratorSet expanded_decorators = decorators | AS_RAW;
822 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
823 } else {
824 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
825 }
826 }
827
828 template <DecoratorSet decorators, typename T>
829 inline static typename EnableIf<
830 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, OopCopyResult>::type
831 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
832 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
833 size_t length) {
834 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
835 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
836 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
837 dst_obj, dst_offset_in_bytes, dst_raw,
838 length);
839 } else {
840 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
841 dst_obj, dst_offset_in_bytes, dst_raw,
842 length);
843 }
844
845 return OopCopyResult::ok;
846 }
847
848 template <DecoratorSet decorators, typename T>
849 inline static typename EnableIf<
850 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, OopCopyResult>::type
851 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
852 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
853 size_t length) {
854 if (UseCompressedOops) {
855 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
856 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
857 dst_obj, dst_offset_in_bytes, dst_raw,
858 length);
859 } else {
860 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
861 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
862 dst_obj, dst_offset_in_bytes, dst_raw,
863 length);
864 }
865 }
866
867 template <DecoratorSet decorators, typename T>
868 inline static typename EnableIf<
869 !HasDecorator<decorators, AS_RAW>::value, OopCopyResult>::type
870 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
871 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
872 size_t length) {
873 if (is_hardwired_primitive<decorators>()) {
874 const DecoratorSet expanded_decorators = decorators | AS_RAW;
875 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
876 dst_obj, dst_offset_in_bytes, dst_raw,
877 length);
878 } else {
879 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
880 dst_obj, dst_offset_in_bytes, dst_raw,
881 length);
882 }
883 }
884
885 template <DecoratorSet decorators>
886 inline static typename EnableIf<
887 HasDecorator<decorators, AS_RAW>::value>::type
888 clone(oop src, oop dst, size_t size) {
889 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
890 Raw::clone(src, dst, size);
891 }
892
893 template <DecoratorSet decorators>
894 inline static typename EnableIf<
895 !HasDecorator<decorators, AS_RAW>::value>::type
896 clone(oop src, oop dst, size_t size) {
897 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
898 }
899 };
900
901 // Step 2: Reduce types.
902 // Enforce that for non-oop types, T and P have to be strictly the same.
903 // P is the type of the address and T is the type of the values.
904 // As for oop types, it is allow to send T in {narrowOop, oop} and
905 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
906 // the subsequent table. (columns are P, rows are T)
907 // | | HeapWord | oop | narrowOop |
908 // | oop | rt-comp | hw-none | hw-comp |
909 // | narrowOop | x | x | hw-none |
910 //
911 // x means not allowed
912 // rt-comp means it must be checked at runtime whether the oop is compressed.
913 // hw-none means it is statically known the oop will not be compressed.
914 // hw-comp means it is statically known the oop will be compressed.
915
916 template <DecoratorSet decorators, typename T>
917 inline void store_reduce_types(T* addr, T value) {
918 PreRuntimeDispatch::store<decorators>(addr, value);
919 }
920
921 template <DecoratorSet decorators>
922 inline void store_reduce_types(narrowOop* addr, oop value) {
923 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
924 INTERNAL_RT_USE_COMPRESSED_OOPS;
925 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
926 }
927
928 template <DecoratorSet decorators>
929 inline void store_reduce_types(narrowOop* addr, narrowOop value) {
930 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
931 INTERNAL_RT_USE_COMPRESSED_OOPS;
932 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
933 }
934
935 template <DecoratorSet decorators>
936 inline void store_reduce_types(HeapWord* addr, oop value) {
937 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
938 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
939 }
940
941 template <DecoratorSet decorators, typename T>
942 inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
943 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
944 }
945
946 template <DecoratorSet decorators>
947 inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
948 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
949 INTERNAL_RT_USE_COMPRESSED_OOPS;
950 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
951 }
952
953 template <DecoratorSet decorators>
954 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
955 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
956 INTERNAL_RT_USE_COMPRESSED_OOPS;
957 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
958 }
959
960 template <DecoratorSet decorators>
961 inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
962 oop compare_value,
963 oop new_value) {
964 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
965 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
966 }
967
968 template <DecoratorSet decorators, typename T>
969 inline T atomic_xchg_reduce_types(T* addr, T new_value) {
970 const DecoratorSet expanded_decorators = decorators;
971 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
972 }
973
974 template <DecoratorSet decorators>
975 inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
976 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
977 INTERNAL_RT_USE_COMPRESSED_OOPS;
978 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
979 }
980
981 template <DecoratorSet decorators>
982 inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
983 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
984 INTERNAL_RT_USE_COMPRESSED_OOPS;
985 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
986 }
987
988 template <DecoratorSet decorators>
989 inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
990 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
991 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
992 }
993
994 template <DecoratorSet decorators, typename T>
995 inline T load_reduce_types(T* addr) {
996 return PreRuntimeDispatch::load<decorators, T>(addr);
997 }
998
999 template <DecoratorSet decorators, typename T>
1000 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1001 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1002 INTERNAL_RT_USE_COMPRESSED_OOPS;
1003 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1004 }
1005
1006 template <DecoratorSet decorators, typename T>
1007 inline oop load_reduce_types(HeapWord* addr) {
1008 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1009 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1010 }
1011
1012 template <DecoratorSet decorators, typename T>
1013 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1014 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1015 size_t length) {
1016 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1017 dst_obj, dst_offset_in_bytes, dst_raw,
1018 length);
1019 }
1020
1021 template <DecoratorSet decorators>
1022 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1023 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1024 size_t length) {
1025 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1026 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1027 dst_obj, dst_offset_in_bytes, dst_raw,
1028 length);
1029 }
1030
1031 template <DecoratorSet decorators>
1032 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1033 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1034 size_t length) {
1035 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1036 INTERNAL_RT_USE_COMPRESSED_OOPS;
1037 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1038 dst_obj, dst_offset_in_bytes, dst_raw,
1039 length);
1040 }
1041
1042 // Step 1: Set default decorators. This step remembers if a type was volatile
1043 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1044 // memory ordering is set for the access, and the implied decorator rules
1045 // are applied to select sensible defaults for decorators that have not been
1046 // explicitly set. For example, default object referent strength is set to strong.
1047 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1048 // and references from the types). This step also perform some type verification
1049 // that the passed in types make sense.
1050
1051 template <DecoratorSet decorators, typename T>
1052 static void verify_types(){
1053 // If this fails to compile, then you have sent in something that is
1054 // not recognized as a valid primitive type to a primitive Access function.
1055 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1056 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1057 std::is_floating_point<T>::value)); // not allowed primitive type
1058 }
1059
1060 template <DecoratorSet decorators, typename P, typename T>
1061 inline void store(P* addr, T value) {
1062 verify_types<decorators, T>();
1063 using DecayedP = std::decay_t<P>;
1064 using DecayedT = std::decay_t<T>;
1065 DecayedT decayed_value = value;
1066 // If a volatile address is passed in but no memory ordering decorator,
1067 // set the memory ordering to MO_RELAXED by default.
1068 const DecoratorSet expanded_decorators = DecoratorFixup<
1069 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1070 (MO_RELAXED | decorators) : decorators>::value;
1071 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1072 }
1073
1074 template <DecoratorSet decorators, typename T>
1075 inline void store_at(oop base, ptrdiff_t offset, T value) {
1076 verify_types<decorators, T>();
1077 using DecayedT = std::decay_t<T>;
1078 DecayedT decayed_value = value;
1079 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1080 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1081 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1082 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1083 }
1084
1085 template <DecoratorSet decorators, typename P, typename T>
1086 inline T load(P* addr) {
1087 verify_types<decorators, T>();
1088 using DecayedP = std::decay_t<P>;
1089 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1090 typename OopOrNarrowOop<T>::type,
1091 std::decay_t<T>>;
1092 // If a volatile address is passed in but no memory ordering decorator,
1093 // set the memory ordering to MO_RELAXED by default.
1094 const DecoratorSet expanded_decorators = DecoratorFixup<
1095 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1096 (MO_RELAXED | decorators) : decorators>::value;
1097 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1098 }
1099
1100 template <DecoratorSet decorators, typename T>
1101 inline T load_at(oop base, ptrdiff_t offset) {
1102 verify_types<decorators, T>();
1103 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1104 typename OopOrNarrowOop<T>::type,
1105 std::decay_t<T>>;
1106 // Expand the decorators (figure out sensible defaults)
1107 // Potentially remember if we need compressed oop awareness
1108 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1109 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1110 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1111 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1112 }
1113
1114 template <DecoratorSet decorators, typename P, typename T>
1115 inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1116 verify_types<decorators, T>();
1117 using DecayedP = std::decay_t<P>;
1118 using DecayedT = std::decay_t<T>;
1119 DecayedT new_decayed_value = new_value;
1120 DecayedT compare_decayed_value = compare_value;
1121 const DecoratorSet expanded_decorators = DecoratorFixup<
1122 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1123 (MO_SEQ_CST | decorators) : decorators>::value;
1124 return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1125 compare_decayed_value,
1126 new_decayed_value);
1127 }
1128
1129 template <DecoratorSet decorators, typename T>
1130 inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1131 verify_types<decorators, T>();
1132 using DecayedT = std::decay_t<T>;
1133 DecayedT new_decayed_value = new_value;
1134 DecayedT compare_decayed_value = compare_value;
1135 // Determine default memory ordering
1136 const DecoratorSet expanded_decorators = DecoratorFixup<
1137 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1138 (MO_SEQ_CST | decorators) : decorators>::value;
1139 // Potentially remember that we need compressed oop awareness
1140 const DecoratorSet final_decorators = expanded_decorators |
1141 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1142 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1143 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1144 new_decayed_value);
1145 }
1146
1147 template <DecoratorSet decorators, typename P, typename T>
1148 inline T atomic_xchg(P* addr, T new_value) {
1149 verify_types<decorators, T>();
1150 using DecayedP = std::decay_t<P>;
1151 using DecayedT = std::decay_t<T>;
1152 DecayedT new_decayed_value = new_value;
1153 // atomic_xchg is only available in SEQ_CST flavour.
1154 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1155 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1156 new_decayed_value);
1157 }
1158
1159 template <DecoratorSet decorators, typename T>
1160 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1161 verify_types<decorators, T>();
1162 using DecayedT = std::decay_t<T>;
1163 DecayedT new_decayed_value = new_value;
1164 // atomic_xchg is only available in SEQ_CST flavour.
1165 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1166 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1167 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1168 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1169 }
1170
1171 template <DecoratorSet decorators, typename T>
1172 inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1173 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1174 size_t length) {
1175 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1176 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1177 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1178 using DecayedT = std::decay_t<T>;
1179 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1180 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1181 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1182 length);
1183 }
1184
1185 template <DecoratorSet decorators>
1186 inline void clone(oop src, oop dst, size_t size) {
1187 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1188 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1189 }
1190
1191 // Infer the type that should be returned from an Access::oop_load.
1192 template <typename P, DecoratorSet decorators>
1193 class OopLoadProxy: public StackObj {
1194 private:
1195 P *const _addr;
1196 public:
1197 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1198
1199 inline operator oop() {
1200 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1201 }
1202
1203 inline operator narrowOop() {
1204 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1205 }
1206
1207 template <typename T>
1208 inline bool operator ==(const T& other) const {
1209 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1210 }
1211
1212 template <typename T>
1213 inline bool operator !=(const T& other) const {
1214 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1215 }
1216
1217 inline bool operator ==(std::nullptr_t) const {
1218 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) == nullptr;
1219 }
1220
1221 inline bool operator !=(std::nullptr_t) const {
1222 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) != nullptr;
1223 }
1224 };
1225
1226 // Infer the type that should be returned from an Access::load_at.
1227 template <DecoratorSet decorators>
1228 class LoadAtProxy: public StackObj {
1229 private:
1230 const oop _base;
1231 const ptrdiff_t _offset;
1232 public:
1233 LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1234
1235 template <typename T>
1236 inline operator T() const {
1237 return load_at<decorators, T>(_base, _offset);
1238 }
1239
1240 template <typename T>
1241 inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1242
1243 template <typename T>
1244 inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1245 };
1246
1247 // Infer the type that should be returned from an Access::oop_load_at.
1248 template <DecoratorSet decorators>
1249 class OopLoadAtProxy: public StackObj {
1250 private:
1251 const oop _base;
1252 const ptrdiff_t _offset;
1253 public:
1254 OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1255
1256 inline operator oop() const {
1257 return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1258 }
1259
1260 inline operator narrowOop() const {
1261 return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1262 }
1263
1264 template <typename T>
1265 inline bool operator ==(const T& other) const {
1266 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1267 }
1268
1269 template <typename T>
1270 inline bool operator !=(const T& other) const {
1271 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1272 }
1273 };
1274 }
1275
1276 #endif // SHARE_OOPS_ACCESSBACKEND_HPP