1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/inlineKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/globals.hpp"
36 #include "utilities/debug.hpp"
37 #include "utilities/globalDefinitions.hpp"
38
39 // Result from oop_arraycopy
40 enum class OopCopyResult {
41 ok, // oop array copy sucessful
42 failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
43 failed_check_null // oop array copy failed null check (ARRAYCOPY_NOTNULL)
44 };
45
46 // This metafunction returns either oop or narrowOop depending on whether
47 // an access needs to use compressed oops or not.
48 template <DecoratorSet decorators>
49 struct HeapOopType: AllStatic {
50 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
51 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
52 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
53 };
54
55 // This meta-function returns either oop or narrowOop depending on whether
56 // a back-end needs to consider compressed oops types or not.
57 template <DecoratorSet decorators>
58 struct ValueOopType: AllStatic {
59 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
60 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
61 };
62
63 namespace AccessInternal {
64 enum BarrierType {
65 BARRIER_STORE,
66 BARRIER_STORE_AT,
67 BARRIER_LOAD,
68 BARRIER_LOAD_AT,
69 BARRIER_ATOMIC_CMPXCHG,
70 BARRIER_ATOMIC_CMPXCHG_AT,
71 BARRIER_ATOMIC_XCHG,
72 BARRIER_ATOMIC_XCHG_AT,
73 BARRIER_ARRAYCOPY,
74 BARRIER_CLONE,
75 BARRIER_VALUE_COPY
76 };
77
78 template <DecoratorSet decorators, typename T>
79 struct MustConvertCompressedOop: public std::integral_constant<bool,
80 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
81 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
82 std::is_same<T, oop>::value> {};
83
84 // This metafunction returns an appropriate oop type if the value is oop-like
85 // and otherwise returns the same type T.
86 template <DecoratorSet decorators, typename T>
87 struct EncodedType: AllStatic {
88 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
89 typename HeapOopType<decorators>::type,
90 T>;
91 };
92
93 template <DecoratorSet decorators>
94 inline typename HeapOopType<decorators>::type*
95 oop_field_addr(oop base, ptrdiff_t byte_offset) {
96 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
97 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
98 }
99
100 template <DecoratorSet decorators, typename T>
101 struct AccessFunctionTypes {
102 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
103 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
104 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
105 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
106
107 typedef T (*load_func_t)(void* addr);
108 typedef void (*store_func_t)(void* addr, T value);
109 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
110 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
111
112 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
113 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
114 size_t length);
115 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
116 typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md, LayoutKind lk);
117 };
118
119 template <DecoratorSet decorators>
120 struct AccessFunctionTypes<decorators, void> {
121 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
122 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
123 size_t length);
124 };
125
126 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
127
128 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
129 template <DecoratorSet decorators, typename T> \
130 struct AccessFunction<decorators, T, bt>: AllStatic{ \
131 typedef typename AccessFunctionTypes<decorators, T>::func type; \
132 }
133 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
134 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
137 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
138 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
139 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
140 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
141 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
142 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
143 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
144 #undef ACCESS_GENERATE_ACCESS_FUNCTION
145
146 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
147 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
148
149 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
150 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
151
152 void* field_addr(oop base, ptrdiff_t offset);
153
154 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
155 // faster build times, given how frequently included access is.
156 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
157 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
158 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
159
160 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
161 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
162
163 template<typename T>
164 void arraycopy_conjoint(T* src, T* dst, size_t length);
165 template<typename T>
166 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
167 template<typename T>
168 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
169
170 void value_copy_internal(void* src, void* dst, size_t length);
171 }
172
173 // This mask specifies what decorators are relevant for raw accesses. When passing
174 // accesses to the raw layer, irrelevant decorators are removed.
175 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
176 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
177
178 // The RawAccessBarrier performs raw accesses with additional knowledge of
179 // memory ordering, so that OrderAccess/Atomic is called when necessary.
180 // It additionally handles compressed oops, and hence is not completely "raw"
181 // strictly speaking.
182 template <DecoratorSet decorators>
183 class RawAccessBarrier: public AllStatic {
184 protected:
185 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
186 return AccessInternal::field_addr(base, byte_offset);
187 }
188
189 protected:
190 // Only encode if INTERNAL_VALUE_IS_OOP
191 template <DecoratorSet idecorators, typename T>
192 static inline typename EnableIf<
193 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
194 typename HeapOopType<idecorators>::type>::type
195 encode_internal(T value);
196
197 template <DecoratorSet idecorators, typename T>
198 static inline typename EnableIf<
199 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
200 encode_internal(T value) {
201 return value;
202 }
203
204 template <typename T>
205 static inline typename AccessInternal::EncodedType<decorators, T>::type
206 encode(T value) {
207 return encode_internal<decorators, T>(value);
208 }
209
210 // Only decode if INTERNAL_VALUE_IS_OOP
211 template <DecoratorSet idecorators, typename T>
212 static inline typename EnableIf<
213 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
214 decode_internal(typename HeapOopType<idecorators>::type value);
215
216 template <DecoratorSet idecorators, typename T>
217 static inline typename EnableIf<
218 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
219 decode_internal(T value) {
220 return value;
221 }
222
223 template <typename T>
224 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
225 return decode_internal<decorators, T>(value);
226 }
227
228 protected:
229 template <DecoratorSet ds, typename T>
230 static typename EnableIf<
231 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
232 load_internal(void* addr);
233
234 template <DecoratorSet ds, typename T>
235 static typename EnableIf<
236 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
237 load_internal(void* addr);
238
239 template <DecoratorSet ds, typename T>
240 static typename EnableIf<
241 HasDecorator<ds, MO_RELAXED>::value, T>::type
242 load_internal(void* addr);
243
244 template <DecoratorSet ds, typename T>
245 static inline typename EnableIf<
246 HasDecorator<ds, MO_UNORDERED>::value, T>::type
247 load_internal(void* addr) {
248 return *reinterpret_cast<T*>(addr);
249 }
250
251 template <DecoratorSet ds, typename T>
252 static typename EnableIf<
253 HasDecorator<ds, MO_SEQ_CST>::value>::type
254 store_internal(void* addr, T value);
255
256 template <DecoratorSet ds, typename T>
257 static typename EnableIf<
258 HasDecorator<ds, MO_RELEASE>::value>::type
259 store_internal(void* addr, T value);
260
261 template <DecoratorSet ds, typename T>
262 static typename EnableIf<
263 HasDecorator<ds, MO_RELAXED>::value>::type
264 store_internal(void* addr, T value);
265
266 template <DecoratorSet ds, typename T>
267 static inline typename EnableIf<
268 HasDecorator<ds, MO_UNORDERED>::value>::type
269 store_internal(void* addr, T value) {
270 *reinterpret_cast<T*>(addr) = value;
271 }
272
273 template <DecoratorSet ds, typename T>
274 static typename EnableIf<
275 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
276 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
277
278 template <DecoratorSet ds, typename T>
279 static typename EnableIf<
280 HasDecorator<ds, MO_RELAXED>::value, T>::type
281 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
282
283 template <DecoratorSet ds, typename T>
284 static typename EnableIf<
285 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
286 atomic_xchg_internal(void* addr, T new_value);
287
288 public:
289 template <typename T>
290 static inline void store(void* addr, T value) {
291 store_internal<decorators>(addr, value);
292 }
293
294 template <typename T>
295 static inline T load(void* addr) {
296 return load_internal<decorators, T>(addr);
297 }
298
299 template <typename T>
300 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
301 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
302 }
303
304 template <typename T>
305 static inline T atomic_xchg(void* addr, T new_value) {
306 return atomic_xchg_internal<decorators>(addr, new_value);
307 }
308
309 template <typename T>
310 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
311 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
312 size_t length);
313
314 template <typename T>
315 static void oop_store(void* addr, T value);
316 template <typename T>
317 static void oop_store_at(oop base, ptrdiff_t offset, T value);
318
319 template <typename T>
320 static T oop_load(void* addr);
321 template <typename T>
322 static T oop_load_at(oop base, ptrdiff_t offset);
323
324 template <typename T>
325 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
326 template <typename T>
327 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
328
329 template <typename T>
330 static T oop_atomic_xchg(void* addr, T new_value);
331 template <typename T>
332 static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
333
334 template <typename T>
335 static void store_at(oop base, ptrdiff_t offset, T value) {
336 store(field_addr(base, offset), value);
337 }
338
339 template <typename T>
340 static T load_at(oop base, ptrdiff_t offset) {
341 return load<T>(field_addr(base, offset));
342 }
343
344 template <typename T>
345 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
346 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
347 }
348
349 template <typename T>
350 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
351 return atomic_xchg(field_addr(base, offset), new_value);
352 }
353
354 template <typename T>
355 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
356 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
357 size_t length);
358
359 static void clone(oop src, oop dst, size_t size);
360 static void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk);
361
362 };
363
364 namespace AccessInternal {
365 DEBUG_ONLY(void check_access_thread_state());
366 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
367 }
368
369 // Below is the implementation of the first 4 steps of the template pipeline:
370 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
371 // and sets default decorators to sensible values.
372 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
373 // multiple types. The P type of the address and T type of the value must
374 // match.
375 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
376 // avoided, and in that case avoids it (calling raw accesses or
377 // primitive accesses in a build that does not require primitive GC barriers)
378 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
379 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
380 // to the access.
381
382 namespace AccessInternal {
383 template <typename T>
384 struct OopOrNarrowOopInternal: AllStatic {
385 typedef oop type;
386 };
387
388 template <>
389 struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
390 typedef narrowOop type;
391 };
392
393 // This metafunction returns a canonicalized oop/narrowOop type for a passed
394 // in oop-like types passed in from oop_* overloads where the user has sworn
395 // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
396 // narrowOoop, instanceOopDesc*, and random other things).
397 // In the oop_* overloads, it must hold that if the passed in type T is not
398 // narrowOop, then it by contract has to be one of many oop-like types implicitly
399 // convertible to oop, and hence returns oop as the canonical oop type.
400 // If it turns out it was not, then the implicit conversion to oop will fail
401 // to compile, as desired.
402 template <typename T>
403 struct OopOrNarrowOop: AllStatic {
404 typedef typename OopOrNarrowOopInternal<std::decay_t<T>>::type type;
405 };
406
407 inline void* field_addr(oop base, ptrdiff_t byte_offset) {
408 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
409 }
410 // Step 4: Runtime dispatch
411 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
412 // accessor. This is required when the access either depends on whether compressed oops
413 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
414 // barriers). The way it works is that a function pointer initially pointing to an
415 // accessor resolution function gets called for each access. Upon first invocation,
416 // it resolves which accessor to be used in future invocations and patches the
417 // function pointer to this new accessor.
418
419 template <DecoratorSet decorators, typename T, BarrierType type>
420 struct RuntimeDispatch: AllStatic {};
421
422 template <DecoratorSet decorators, typename T>
423 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
424 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
425 static func_t _store_func;
426
427 static void store_init(void* addr, T value);
428
429 static inline void store(void* addr, T value) {
430 assert_access_thread_state();
431 _store_func(addr, value);
432 }
433 };
434
435 template <DecoratorSet decorators, typename T>
436 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
437 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
438 static func_t _store_at_func;
439
440 static void store_at_init(oop base, ptrdiff_t offset, T value);
441
442 static inline void store_at(oop base, ptrdiff_t offset, T value) {
443 assert_access_thread_state();
444 _store_at_func(base, offset, value);
445 }
446 };
447
448 template <DecoratorSet decorators, typename T>
449 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
450 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
451 static func_t _load_func;
452
453 static T load_init(void* addr);
454
455 static inline T load(void* addr) {
456 assert_access_thread_state();
457 return _load_func(addr);
458 }
459 };
460
461 template <DecoratorSet decorators, typename T>
462 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
463 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
464 static func_t _load_at_func;
465
466 static T load_at_init(oop base, ptrdiff_t offset);
467
468 static inline T load_at(oop base, ptrdiff_t offset) {
469 assert_access_thread_state();
470 return _load_at_func(base, offset);
471 }
472 };
473
474 template <DecoratorSet decorators, typename T>
475 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
476 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
477 static func_t _atomic_cmpxchg_func;
478
479 static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
480
481 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
482 assert_access_thread_state();
483 return _atomic_cmpxchg_func(addr, compare_value, new_value);
484 }
485 };
486
487 template <DecoratorSet decorators, typename T>
488 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
489 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
490 static func_t _atomic_cmpxchg_at_func;
491
492 static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
493
494 static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
495 assert_access_thread_state();
496 return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
497 }
498 };
499
500 template <DecoratorSet decorators, typename T>
501 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
502 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
503 static func_t _atomic_xchg_func;
504
505 static T atomic_xchg_init(void* addr, T new_value);
506
507 static inline T atomic_xchg(void* addr, T new_value) {
508 assert_access_thread_state();
509 return _atomic_xchg_func(addr, new_value);
510 }
511 };
512
513 template <DecoratorSet decorators, typename T>
514 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
515 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
516 static func_t _atomic_xchg_at_func;
517
518 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
519
520 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
521 assert_access_thread_state();
522 return _atomic_xchg_at_func(base, offset, new_value);
523 }
524 };
525
526 template <DecoratorSet decorators, typename T>
527 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
528 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
529 static func_t _arraycopy_func;
530
531 static OopCopyResult arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
532 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
533 size_t length);
534
535 static inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
536 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
537 size_t length) {
538 assert_access_thread_state();
539 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
540 dst_obj, dst_offset_in_bytes, dst_raw,
541 length);
542 }
543 };
544
545 template <DecoratorSet decorators, typename T>
546 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
547 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
548 static func_t _clone_func;
549
550 static void clone_init(oop src, oop dst, size_t size);
551
552 static inline void clone(oop src, oop dst, size_t size) {
553 assert_access_thread_state();
554 _clone_func(src, dst, size);
555 }
556 };
557
558 template <DecoratorSet decorators, typename T>
559 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
560 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
561 static func_t _value_copy_func;
562
563 static void value_copy_init(void* src, void* dst, InlineKlass* md, LayoutKind lk);
564
565 static inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
566 _value_copy_func(src, dst, md, lk);
567 }
568 };
569
570 // Initialize the function pointers to point to the resolving function.
571 template <DecoratorSet decorators, typename T>
572 typename AccessFunction<decorators, T, BARRIER_STORE>::type
573 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
574
575 template <DecoratorSet decorators, typename T>
576 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
577 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
578
579 template <DecoratorSet decorators, typename T>
580 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
581 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
582
583 template <DecoratorSet decorators, typename T>
584 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
585 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
586
587 template <DecoratorSet decorators, typename T>
588 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
589 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
590
591 template <DecoratorSet decorators, typename T>
592 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
593 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
594
595 template <DecoratorSet decorators, typename T>
596 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
597 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
598
599 template <DecoratorSet decorators, typename T>
600 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
601 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
602
603 template <DecoratorSet decorators, typename T>
604 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
605 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
606
607 template <DecoratorSet decorators, typename T>
608 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
609 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
610
611 template <DecoratorSet decorators, typename T>
612 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
613 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
614
615 // Step 3: Pre-runtime dispatching.
616 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
617 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
618 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
619 // not possible.
620 struct PreRuntimeDispatch: AllStatic {
621 template<DecoratorSet decorators>
622 struct CanHardwireRaw: public std::integral_constant<
623 bool,
624 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
625 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
626 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
627 {};
628
629 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
630
631 template<DecoratorSet decorators>
632 static bool is_hardwired_primitive() {
633 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
634 }
635
636 template <DecoratorSet decorators, typename T>
637 inline static typename EnableIf<
638 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
639 store(void* addr, T value) {
640 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
641 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
642 Raw::oop_store(addr, value);
643 } else {
644 Raw::store(addr, value);
645 }
646 }
647
648 template <DecoratorSet decorators, typename T>
649 inline static typename EnableIf<
650 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
651 store(void* addr, T value) {
652 if (UseCompressedOops) {
653 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
654 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
655 } else {
656 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
657 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
658 }
659 }
660
661 template <DecoratorSet decorators, typename T>
662 inline static typename EnableIf<
663 !HasDecorator<decorators, AS_RAW>::value>::type
664 store(void* addr, T value) {
665 if (is_hardwired_primitive<decorators>()) {
666 const DecoratorSet expanded_decorators = decorators | AS_RAW;
667 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
668 } else {
669 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
670 }
671 }
672
673 template <DecoratorSet decorators, typename T>
674 inline static typename EnableIf<
675 HasDecorator<decorators, AS_RAW>::value>::type
676 store_at(oop base, ptrdiff_t offset, T value) {
677 store<decorators>(field_addr(base, offset), value);
678 }
679
680 template <DecoratorSet decorators, typename T>
681 inline static typename EnableIf<
682 !HasDecorator<decorators, AS_RAW>::value>::type
683 store_at(oop base, ptrdiff_t offset, T value) {
684 if (is_hardwired_primitive<decorators>()) {
685 const DecoratorSet expanded_decorators = decorators | AS_RAW;
686 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
687 } else {
688 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
689 }
690 }
691
692 template <DecoratorSet decorators, typename T>
693 inline static typename EnableIf<
694 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
695 load(void* addr) {
696 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
697 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
698 return Raw::template oop_load<T>(addr);
699 } else {
700 return Raw::template load<T>(addr);
701 }
702 }
703
704 template <DecoratorSet decorators, typename T>
705 inline static typename EnableIf<
706 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
707 load(void* addr) {
708 if (UseCompressedOops) {
709 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
710 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
711 } else {
712 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
713 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
714 }
715 }
716
717 template <DecoratorSet decorators, typename T>
718 inline static typename EnableIf<
719 !HasDecorator<decorators, AS_RAW>::value, T>::type
720 load(void* addr) {
721 if (is_hardwired_primitive<decorators>()) {
722 const DecoratorSet expanded_decorators = decorators | AS_RAW;
723 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
724 } else {
725 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
726 }
727 }
728
729 template <DecoratorSet decorators, typename T>
730 inline static typename EnableIf<
731 HasDecorator<decorators, AS_RAW>::value, T>::type
732 load_at(oop base, ptrdiff_t offset) {
733 return load<decorators, T>(field_addr(base, offset));
734 }
735
736 template <DecoratorSet decorators, typename T>
737 inline static typename EnableIf<
738 !HasDecorator<decorators, AS_RAW>::value, T>::type
739 load_at(oop base, ptrdiff_t offset) {
740 if (is_hardwired_primitive<decorators>()) {
741 const DecoratorSet expanded_decorators = decorators | AS_RAW;
742 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
743 } else {
744 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
745 }
746 }
747
748 template <DecoratorSet decorators, typename T>
749 inline static typename EnableIf<
750 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
751 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
752 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
753 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
754 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
755 } else {
756 return Raw::atomic_cmpxchg(addr, compare_value, new_value);
757 }
758 }
759
760 template <DecoratorSet decorators, typename T>
761 inline static typename EnableIf<
762 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
763 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
764 if (UseCompressedOops) {
765 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
766 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
767 } else {
768 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
769 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
770 }
771 }
772
773 template <DecoratorSet decorators, typename T>
774 inline static typename EnableIf<
775 !HasDecorator<decorators, AS_RAW>::value, T>::type
776 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
777 if (is_hardwired_primitive<decorators>()) {
778 const DecoratorSet expanded_decorators = decorators | AS_RAW;
779 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
780 } else {
781 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
782 }
783 }
784
785 template <DecoratorSet decorators, typename T>
786 inline static typename EnableIf<
787 HasDecorator<decorators, AS_RAW>::value, T>::type
788 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
789 return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
790 }
791
792 template <DecoratorSet decorators, typename T>
793 inline static typename EnableIf<
794 !HasDecorator<decorators, AS_RAW>::value, T>::type
795 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
796 if (is_hardwired_primitive<decorators>()) {
797 const DecoratorSet expanded_decorators = decorators | AS_RAW;
798 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
799 } else {
800 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
801 }
802 }
803
804 template <DecoratorSet decorators, typename T>
805 inline static typename EnableIf<
806 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
807 atomic_xchg(void* addr, T new_value) {
808 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
809 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
810 return Raw::oop_atomic_xchg(addr, new_value);
811 } else {
812 return Raw::atomic_xchg(addr, new_value);
813 }
814 }
815
816 template <DecoratorSet decorators, typename T>
817 inline static typename EnableIf<
818 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
819 atomic_xchg(void* addr, T new_value) {
820 if (UseCompressedOops) {
821 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
822 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
823 } else {
824 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
825 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
826 }
827 }
828
829 template <DecoratorSet decorators, typename T>
830 inline static typename EnableIf<
831 !HasDecorator<decorators, AS_RAW>::value, T>::type
832 atomic_xchg(void* addr, T new_value) {
833 if (is_hardwired_primitive<decorators>()) {
834 const DecoratorSet expanded_decorators = decorators | AS_RAW;
835 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
836 } else {
837 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
838 }
839 }
840
841 template <DecoratorSet decorators, typename T>
842 inline static typename EnableIf<
843 HasDecorator<decorators, AS_RAW>::value, T>::type
844 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
845 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
846 }
847
848 template <DecoratorSet decorators, typename T>
849 inline static typename EnableIf<
850 !HasDecorator<decorators, AS_RAW>::value, T>::type
851 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
852 if (is_hardwired_primitive<decorators>()) {
853 const DecoratorSet expanded_decorators = decorators | AS_RAW;
854 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
855 } else {
856 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
857 }
858 }
859
860 template <DecoratorSet decorators, typename T>
861 inline static typename EnableIf<
862 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, OopCopyResult>::type
863 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
864 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
865 size_t length) {
866 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
867 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
868 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
869 dst_obj, dst_offset_in_bytes, dst_raw,
870 length);
871 } else {
872 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
873 dst_obj, dst_offset_in_bytes, dst_raw,
874 length);
875 }
876
877 return OopCopyResult::ok;
878 }
879
880 template <DecoratorSet decorators, typename T>
881 inline static typename EnableIf<
882 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, OopCopyResult>::type
883 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
884 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
885 size_t length) {
886 if (UseCompressedOops) {
887 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
888 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
889 dst_obj, dst_offset_in_bytes, dst_raw,
890 length);
891 } else {
892 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
893 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
894 dst_obj, dst_offset_in_bytes, dst_raw,
895 length);
896 }
897 }
898
899 template <DecoratorSet decorators, typename T>
900 inline static typename EnableIf<
901 !HasDecorator<decorators, AS_RAW>::value, OopCopyResult>::type
902 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
903 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
904 size_t length) {
905 if (is_hardwired_primitive<decorators>()) {
906 const DecoratorSet expanded_decorators = decorators | AS_RAW;
907 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
908 dst_obj, dst_offset_in_bytes, dst_raw,
909 length);
910 } else {
911 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
912 dst_obj, dst_offset_in_bytes, dst_raw,
913 length);
914 }
915 }
916
917 template <DecoratorSet decorators>
918 inline static typename EnableIf<
919 HasDecorator<decorators, AS_RAW>::value>::type
920 clone(oop src, oop dst, size_t size) {
921 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
922 Raw::clone(src, dst, size);
923 }
924
925 template <DecoratorSet decorators>
926 inline static typename EnableIf<
927 !HasDecorator<decorators, AS_RAW>::value>::type
928 clone(oop src, oop dst, size_t size) {
929 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
930 }
931
932 template <DecoratorSet decorators>
933 inline static typename EnableIf<
934 HasDecorator<decorators, AS_RAW>::value>::type
935 value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
936 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
937 Raw::value_copy(src, dst, md, lk);
938 }
939
940 template <DecoratorSet decorators>
941 inline static typename EnableIf<
942 !HasDecorator<decorators, AS_RAW>::value>::type
943 value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
944 const DecoratorSet expanded_decorators = decorators;
945 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md, lk);
946 }
947 };
948
949 // Step 2: Reduce types.
950 // Enforce that for non-oop types, T and P have to be strictly the same.
951 // P is the type of the address and T is the type of the values.
952 // As for oop types, it is allow to send T in {narrowOop, oop} and
953 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
954 // the subsequent table. (columns are P, rows are T)
955 // | | HeapWord | oop | narrowOop |
956 // | oop | rt-comp | hw-none | hw-comp |
957 // | narrowOop | x | x | hw-none |
958 //
959 // x means not allowed
960 // rt-comp means it must be checked at runtime whether the oop is compressed.
961 // hw-none means it is statically known the oop will not be compressed.
962 // hw-comp means it is statically known the oop will be compressed.
963
964 template <DecoratorSet decorators, typename T>
965 inline void store_reduce_types(T* addr, T value) {
966 PreRuntimeDispatch::store<decorators>(addr, value);
967 }
968
969 template <DecoratorSet decorators>
970 inline void store_reduce_types(narrowOop* addr, oop value) {
971 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
972 INTERNAL_RT_USE_COMPRESSED_OOPS;
973 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
974 }
975
976 template <DecoratorSet decorators>
977 inline void store_reduce_types(narrowOop* addr, narrowOop value) {
978 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
979 INTERNAL_RT_USE_COMPRESSED_OOPS;
980 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
981 }
982
983 template <DecoratorSet decorators>
984 inline void store_reduce_types(HeapWord* addr, oop value) {
985 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
986 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
987 }
988
989 template <DecoratorSet decorators, typename T>
990 inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
991 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
992 }
993
994 template <DecoratorSet decorators>
995 inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
996 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
997 INTERNAL_RT_USE_COMPRESSED_OOPS;
998 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
999 }
1000
1001 template <DecoratorSet decorators>
1002 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
1003 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1004 INTERNAL_RT_USE_COMPRESSED_OOPS;
1005 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1006 }
1007
1008 template <DecoratorSet decorators>
1009 inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
1010 oop compare_value,
1011 oop new_value) {
1012 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1013 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1014 }
1015
1016 template <DecoratorSet decorators, typename T>
1017 inline T atomic_xchg_reduce_types(T* addr, T new_value) {
1018 const DecoratorSet expanded_decorators = decorators;
1019 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1020 }
1021
1022 template <DecoratorSet decorators>
1023 inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
1024 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1025 INTERNAL_RT_USE_COMPRESSED_OOPS;
1026 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1027 }
1028
1029 template <DecoratorSet decorators>
1030 inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
1031 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1032 INTERNAL_RT_USE_COMPRESSED_OOPS;
1033 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1034 }
1035
1036 template <DecoratorSet decorators>
1037 inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
1038 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1039 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1040 }
1041
1042 template <DecoratorSet decorators, typename T>
1043 inline T load_reduce_types(T* addr) {
1044 return PreRuntimeDispatch::load<decorators, T>(addr);
1045 }
1046
1047 template <DecoratorSet decorators, typename T>
1048 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1049 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1050 INTERNAL_RT_USE_COMPRESSED_OOPS;
1051 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1052 }
1053
1054 template <DecoratorSet decorators, typename T>
1055 inline oop load_reduce_types(HeapWord* addr) {
1056 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1057 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1058 }
1059
1060 template <DecoratorSet decorators, typename T>
1061 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1062 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1063 size_t length) {
1064 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1065 dst_obj, dst_offset_in_bytes, dst_raw,
1066 length);
1067 }
1068
1069 template <DecoratorSet decorators>
1070 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1071 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1072 size_t length) {
1073 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1074 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1075 dst_obj, dst_offset_in_bytes, dst_raw,
1076 length);
1077 }
1078
1079 template <DecoratorSet decorators>
1080 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1081 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1082 size_t length) {
1083 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1084 INTERNAL_RT_USE_COMPRESSED_OOPS;
1085 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1086 dst_obj, dst_offset_in_bytes, dst_raw,
1087 length);
1088 }
1089
1090 // Step 1: Set default decorators. This step remembers if a type was volatile
1091 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1092 // memory ordering is set for the access, and the implied decorator rules
1093 // are applied to select sensible defaults for decorators that have not been
1094 // explicitly set. For example, default object referent strength is set to strong.
1095 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1096 // and references from the types). This step also perform some type verification
1097 // that the passed in types make sense.
1098
1099 template <DecoratorSet decorators, typename T>
1100 static void verify_types(){
1101 // If this fails to compile, then you have sent in something that is
1102 // not recognized as a valid primitive type to a primitive Access function.
1103 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1104 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1105 std::is_floating_point<T>::value)); // not allowed primitive type
1106 }
1107
1108 template <DecoratorSet decorators, typename P, typename T>
1109 inline void store(P* addr, T value) {
1110 verify_types<decorators, T>();
1111 using DecayedP = std::decay_t<P>;
1112 using DecayedT = std::decay_t<T>;
1113 DecayedT decayed_value = value;
1114 // If a volatile address is passed in but no memory ordering decorator,
1115 // set the memory ordering to MO_RELAXED by default.
1116 const DecoratorSet expanded_decorators = DecoratorFixup<
1117 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1118 (MO_RELAXED | decorators) : decorators>::value;
1119 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1120 }
1121
1122 template <DecoratorSet decorators, typename T>
1123 inline void store_at(oop base, ptrdiff_t offset, T value) {
1124 verify_types<decorators, T>();
1125 using DecayedT = std::decay_t<T>;
1126 DecayedT decayed_value = value;
1127 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1128 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1129 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1130 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1131 }
1132
1133 template <DecoratorSet decorators, typename P, typename T>
1134 inline T load(P* addr) {
1135 verify_types<decorators, T>();
1136 using DecayedP = std::decay_t<P>;
1137 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1138 typename OopOrNarrowOop<T>::type,
1139 std::decay_t<T>>;
1140 // If a volatile address is passed in but no memory ordering decorator,
1141 // set the memory ordering to MO_RELAXED by default.
1142 const DecoratorSet expanded_decorators = DecoratorFixup<
1143 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1144 (MO_RELAXED | decorators) : decorators>::value;
1145 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1146 }
1147
1148 template <DecoratorSet decorators, typename T>
1149 inline T load_at(oop base, ptrdiff_t offset) {
1150 verify_types<decorators, T>();
1151 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1152 typename OopOrNarrowOop<T>::type,
1153 std::decay_t<T>>;
1154 // Expand the decorators (figure out sensible defaults)
1155 // Potentially remember if we need compressed oop awareness
1156 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1157 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1158 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1159 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1160 }
1161
1162 template <DecoratorSet decorators, typename P, typename T>
1163 inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1164 verify_types<decorators, T>();
1165 using DecayedP = std::decay_t<P>;
1166 using DecayedT = std::decay_t<T>;
1167 DecayedT new_decayed_value = new_value;
1168 DecayedT compare_decayed_value = compare_value;
1169 const DecoratorSet expanded_decorators = DecoratorFixup<
1170 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1171 (MO_SEQ_CST | decorators) : decorators>::value;
1172 return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1173 compare_decayed_value,
1174 new_decayed_value);
1175 }
1176
1177 template <DecoratorSet decorators, typename T>
1178 inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1179 verify_types<decorators, T>();
1180 using DecayedT = std::decay_t<T>;
1181 DecayedT new_decayed_value = new_value;
1182 DecayedT compare_decayed_value = compare_value;
1183 // Determine default memory ordering
1184 const DecoratorSet expanded_decorators = DecoratorFixup<
1185 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1186 (MO_SEQ_CST | decorators) : decorators>::value;
1187 // Potentially remember that we need compressed oop awareness
1188 const DecoratorSet final_decorators = expanded_decorators |
1189 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1190 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1191 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1192 new_decayed_value);
1193 }
1194
1195 template <DecoratorSet decorators, typename P, typename T>
1196 inline T atomic_xchg(P* addr, T new_value) {
1197 verify_types<decorators, T>();
1198 using DecayedP = std::decay_t<P>;
1199 using DecayedT = std::decay_t<T>;
1200 DecayedT new_decayed_value = new_value;
1201 // atomic_xchg is only available in SEQ_CST flavour.
1202 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1203 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1204 new_decayed_value);
1205 }
1206
1207 template <DecoratorSet decorators, typename T>
1208 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1209 verify_types<decorators, T>();
1210 using DecayedT = std::decay_t<T>;
1211 DecayedT new_decayed_value = new_value;
1212 // atomic_xchg is only available in SEQ_CST flavour.
1213 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1214 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1215 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1216 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1217 }
1218
1219 template <DecoratorSet decorators, typename T>
1220 inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1221 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1222 size_t length) {
1223 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1224 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1225 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1226 using DecayedT = std::decay_t<T>;
1227 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1228 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1229 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1230 length);
1231 }
1232
1233 template <DecoratorSet decorators>
1234 inline void clone(oop src, oop dst, size_t size) {
1235 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1236 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1237 }
1238
1239 template <DecoratorSet decorators>
1240 inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
1241 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1242 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md, lk);
1243 }
1244
1245 // Infer the type that should be returned from an Access::oop_load.
1246 template <typename P, DecoratorSet decorators>
1247 class OopLoadProxy: public StackObj {
1248 private:
1249 P *const _addr;
1250 public:
1251 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1252
1253 inline operator oop() {
1254 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1255 }
1256
1257 inline operator narrowOop() {
1258 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1259 }
1260
1261 template <typename T>
1262 inline bool operator ==(const T& other) const {
1263 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1264 }
1265
1266 template <typename T>
1267 inline bool operator !=(const T& other) const {
1268 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1269 }
1270
1271 inline bool operator ==(std::nullptr_t) const {
1272 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) == nullptr;
1273 }
1274
1275 inline bool operator !=(std::nullptr_t) const {
1276 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) != nullptr;
1277 }
1278 };
1279
1280 // Infer the type that should be returned from an Access::load_at.
1281 template <DecoratorSet decorators>
1282 class LoadAtProxy: public StackObj {
1283 private:
1284 const oop _base;
1285 const ptrdiff_t _offset;
1286 public:
1287 LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1288
1289 template <typename T>
1290 inline operator T() const {
1291 return load_at<decorators, T>(_base, _offset);
1292 }
1293
1294 template <typename T>
1295 inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1296
1297 template <typename T>
1298 inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1299 };
1300
1301 // Infer the type that should be returned from an Access::oop_load_at.
1302 template <DecoratorSet decorators>
1303 class OopLoadAtProxy: public StackObj {
1304 private:
1305 const oop _base;
1306 const ptrdiff_t _offset;
1307 public:
1308 OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1309
1310 inline operator oop() const {
1311 return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1312 }
1313
1314 inline operator narrowOop() const {
1315 return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1316 }
1317
1318 template <typename T>
1319 inline bool operator ==(const T& other) const {
1320 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1321 }
1322
1323 template <typename T>
1324 inline bool operator !=(const T& other) const {
1325 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1326 }
1327 };
1328 }
1329
1330 #endif // SHARE_OOPS_ACCESSBACKEND_HPP