1010
1011 // for better readability
1012 const bool must_gc_arguments = true;
1013 const bool dont_gc_arguments = false;
1014
1015 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
1016 bool save_fpu_registers = true;
1017
1018 // stub code & info for the different stubs
1019 OopMapSet* oop_maps = NULL;
1020 switch (id) {
1021 case forward_exception_id:
1022 {
1023 oop_maps = generate_handle_exception(id, sasm);
1024 __ leave();
1025 __ ret(0);
1026 }
1027 break;
1028
1029 case new_instance_id:
1030 case fast_new_instance_id:
1031 case fast_new_instance_init_check_id:
1032 {
1033 Register klass = rdx; // Incoming
1034 Register obj = rax; // Result
1035
1036 if (id == new_instance_id) {
1037 __ set_info("new_instance", dont_gc_arguments);
1038 } else if (id == fast_new_instance_id) {
1039 __ set_info("fast new_instance", dont_gc_arguments);
1040 } else {
1041 assert(id == fast_new_instance_init_check_id, "bad StubID");
1042 __ set_info("fast new_instance init check", dont_gc_arguments);
1043 }
1044
1045 // If TLAB is disabled, see if there is support for inlining contiguous
1046 // allocations.
1047 // Otherwise, just go to the slow path.
1048 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB
1049 && Universe::heap()->supports_inline_contig_alloc()) {
1050 Label slow_path;
1051 Register obj_size = rcx;
1052 Register t1 = rbx;
1053 Register t2 = rsi;
1054 assert_different_registers(klass, obj, obj_size, t1, t2);
1055
1056 __ push(rdi);
1057 __ push(rbx);
1082 NOT_LP64(__ get_thread(thread));
1083
1084 // get the instance size (size is postive so movl is fine for 64bit)
1085 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1086
1087 __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path);
1088
1089 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
1090 __ verify_oop(obj);
1091 __ pop(rbx);
1092 __ pop(rdi);
1093 __ ret(0);
1094
1095 __ bind(slow_path);
1096 __ pop(rbx);
1097 __ pop(rdi);
1098 }
1099
1100 __ enter();
1101 OopMap* map = save_live_registers(sasm, 2);
1102 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1103 oop_maps = new OopMapSet();
1104 oop_maps->add_gc_map(call_offset, map);
1105 restore_live_registers_except_rax(sasm);
1106 __ verify_oop(obj);
1107 __ leave();
1108 __ ret(0);
1109
1110 // rax,: new instance
1111 }
1112
1113 break;
1114
1115 case counter_overflow_id:
1116 {
1117 Register bci = rax, method = rbx;
1118 __ enter();
1119 OopMap* map = save_live_registers(sasm, 3);
1120 // Retrieve bci
1121 __ movl(bci, Address(rbp, 2*BytesPerWord));
1122 // And a pointer to the Method*
1123 __ movptr(method, Address(rbp, 3*BytesPerWord));
1124 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1125 oop_maps = new OopMapSet();
1126 oop_maps->add_gc_map(call_offset, map);
1127 restore_live_registers(sasm);
1128 __ leave();
1129 __ ret(0);
1130 }
1131 break;
1132
1133 case new_type_array_id:
1134 case new_object_array_id:
1135 {
1136 Register length = rbx; // Incoming
1137 Register klass = rdx; // Incoming
1138 Register obj = rax; // Result
1139
1140 if (id == new_type_array_id) {
1141 __ set_info("new_type_array", dont_gc_arguments);
1142 } else {
1143 __ set_info("new_object_array", dont_gc_arguments);
1144 }
1145
1146 #ifdef ASSERT
1147 // assert object type is really an array of the proper kind
1148 {
1149 Label ok;
1150 Register t0 = obj;
1151 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1152 __ sarl(t0, Klass::_lh_array_tag_shift);
1153 int tag = ((id == new_type_array_id)
1154 ? Klass::_lh_array_tag_type_value
1155 : Klass::_lh_array_tag_obj_value);
1156 __ cmpl(t0, tag);
1157 __ jcc(Assembler::equal, ok);
1158 __ stop("assert(is an array klass)");
1159 __ should_not_reach_here();
1160 __ bind(ok);
1161 }
1162 #endif // ASSERT
1163
1164 // If TLAB is disabled, see if there is support for inlining contiguous
1165 // allocations.
1166 // Otherwise, just go to the slow path.
1167 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1168 Register arr_size = rsi;
1169 Register t1 = rcx; // must be rcx for use as shift count
1170 Register t2 = rdi;
1171 Label slow_path;
1172
1173 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1174 // since size is positive movl does right thing on 64bit
1175 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1176 // since size is postive movl does right thing on 64bit
1177 __ movl(arr_size, length);
1178 assert(t1 == rcx, "fixed register usage");
1190
1191 __ initialize_header(obj, klass, length, t1, t2);
1192 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1193 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1194 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1195 __ andptr(t1, Klass::_lh_header_size_mask);
1196 __ subptr(arr_size, t1); // body length
1197 __ addptr(t1, obj); // body start
1198 __ initialize_body(t1, arr_size, 0, t2);
1199 __ verify_oop(obj);
1200 __ ret(0);
1201
1202 __ bind(slow_path);
1203 }
1204
1205 __ enter();
1206 OopMap* map = save_live_registers(sasm, 3);
1207 int call_offset;
1208 if (id == new_type_array_id) {
1209 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1210 } else {
1211 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1212 }
1213
1214 oop_maps = new OopMapSet();
1215 oop_maps->add_gc_map(call_offset, map);
1216 restore_live_registers_except_rax(sasm);
1217
1218 __ verify_oop(obj);
1219 __ leave();
1220 __ ret(0);
1221
1222 // rax,: new array
1223 }
1224 break;
1225
1226 case new_multi_array_id:
1227 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1228 // rax,: klass
1229 // rbx,: rank
1230 // rcx: address of 1st dimension
1231 OopMap* map = save_live_registers(sasm, 4);
1232 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1233
1234 oop_maps = new OopMapSet();
1235 oop_maps->add_gc_map(call_offset, map);
1236 restore_live_registers_except_rax(sasm);
1237
1238 // rax,: new multi array
1239 __ verify_oop(rax);
1240 }
1241 break;
1242
1243 case register_finalizer_id:
1244 {
1245 __ set_info("register_finalizer", dont_gc_arguments);
1246
1247 // This is called via call_runtime so the arguments
1248 // will be place in C abi locations
1249
1250 #ifdef _LP64
1251 __ verify_oop(c_rarg0);
1252 __ mov(rax, c_rarg0);
1253 #else
1254 // The object is passed on the stack and we haven't pushed a
1255 // frame yet so it's one work away from top of stack.
1256 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1257 __ verify_oop(rax);
1258 #endif // _LP64
1259
1260 // load the klass and check the has finalizer flag
1261 Label register_finalizer;
1262 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1325 // activation and we are calling a leaf VM function only.
1326 generate_unwind_exception(sasm);
1327 }
1328 break;
1329
1330 case throw_array_store_exception_id:
1331 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1332 // tos + 0: link
1333 // + 1: return address
1334 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1335 }
1336 break;
1337
1338 case throw_class_cast_exception_id:
1339 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1340 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1341 }
1342 break;
1343
1344 case throw_incompatible_class_change_error_id:
1345 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1346 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1347 }
1348 break;
1349
1350 case slow_subtype_check_id:
1351 {
1352 // Typical calling sequence:
1353 // __ push(klass_RInfo); // object klass or other subclass
1354 // __ push(sup_k_RInfo); // array element klass or other superclass
1355 // __ call(slow_subtype_check);
1356 // Note that the subclass is pushed first, and is therefore deepest.
1357 // Previous versions of this code reversed the names 'sub' and 'super'.
1358 // This was operationally harmless but made the code unreadable.
1359 enum layout {
1360 rax_off, SLOT2(raxH_off)
1361 rcx_off, SLOT2(rcxH_off)
1362 rsi_off, SLOT2(rsiH_off)
1363 rdi_off, SLOT2(rdiH_off)
1364 // saved_rbp_off, SLOT2(saved_rbpH_off)
1365 return_off, SLOT2(returnH_off)
1366 sup_k_off, SLOT2(sup_kH_off)
1367 klass_off, SLOT2(superH_off)
1368 framesize,
1369 result_off = klass_off // deepest argument is also the return value
|
1010
1011 // for better readability
1012 const bool must_gc_arguments = true;
1013 const bool dont_gc_arguments = false;
1014
1015 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
1016 bool save_fpu_registers = true;
1017
1018 // stub code & info for the different stubs
1019 OopMapSet* oop_maps = NULL;
1020 switch (id) {
1021 case forward_exception_id:
1022 {
1023 oop_maps = generate_handle_exception(id, sasm);
1024 __ leave();
1025 __ ret(0);
1026 }
1027 break;
1028
1029 case new_instance_id:
1030 case new_instance_no_inline_id:
1031 case fast_new_instance_id:
1032 case fast_new_instance_init_check_id:
1033 {
1034 Register klass = rdx; // Incoming
1035 Register obj = rax; // Result
1036
1037 if (id == new_instance_id) {
1038 __ set_info("new_instance", dont_gc_arguments);
1039 } else if (id == new_instance_no_inline_id) {
1040 __ set_info("new_instance_no_inline", dont_gc_arguments);
1041 } else if (id == fast_new_instance_id) {
1042 __ set_info("fast new_instance", dont_gc_arguments);
1043 } else {
1044 assert(id == fast_new_instance_init_check_id, "bad StubID");
1045 __ set_info("fast new_instance init check", dont_gc_arguments);
1046 }
1047
1048 // If TLAB is disabled, see if there is support for inlining contiguous
1049 // allocations.
1050 // Otherwise, just go to the slow path.
1051 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB
1052 && Universe::heap()->supports_inline_contig_alloc()) {
1053 Label slow_path;
1054 Register obj_size = rcx;
1055 Register t1 = rbx;
1056 Register t2 = rsi;
1057 assert_different_registers(klass, obj, obj_size, t1, t2);
1058
1059 __ push(rdi);
1060 __ push(rbx);
1085 NOT_LP64(__ get_thread(thread));
1086
1087 // get the instance size (size is postive so movl is fine for 64bit)
1088 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1089
1090 __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path);
1091
1092 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
1093 __ verify_oop(obj);
1094 __ pop(rbx);
1095 __ pop(rdi);
1096 __ ret(0);
1097
1098 __ bind(slow_path);
1099 __ pop(rbx);
1100 __ pop(rdi);
1101 }
1102
1103 __ enter();
1104 OopMap* map = save_live_registers(sasm, 2);
1105 int call_offset;
1106 if (id == new_instance_no_inline_id) {
1107 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance_no_inline), klass);
1108 } else {
1109 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1110 }
1111 oop_maps = new OopMapSet();
1112 oop_maps->add_gc_map(call_offset, map);
1113 restore_live_registers_except_rax(sasm);
1114 __ verify_oop(obj);
1115 __ leave();
1116 __ ret(0);
1117
1118 // rax,: new instance
1119 }
1120
1121 break;
1122
1123 case counter_overflow_id:
1124 {
1125 Register bci = rax, method = rbx;
1126 __ enter();
1127 OopMap* map = save_live_registers(sasm, 3);
1128 // Retrieve bci
1129 __ movl(bci, Address(rbp, 2*BytesPerWord));
1130 // And a pointer to the Method*
1131 __ movptr(method, Address(rbp, 3*BytesPerWord));
1132 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1133 oop_maps = new OopMapSet();
1134 oop_maps->add_gc_map(call_offset, map);
1135 restore_live_registers(sasm);
1136 __ leave();
1137 __ ret(0);
1138 }
1139 break;
1140
1141 case new_type_array_id:
1142 case new_object_array_id:
1143 case new_flat_array_id:
1144 {
1145 Register length = rbx; // Incoming
1146 Register klass = rdx; // Incoming
1147 Register obj = rax; // Result
1148
1149 if (id == new_type_array_id) {
1150 __ set_info("new_type_array", dont_gc_arguments);
1151 } else if (id == new_object_array_id) {
1152 __ set_info("new_object_array", dont_gc_arguments);
1153 } else {
1154 __ set_info("new_flat_array", dont_gc_arguments);
1155 }
1156
1157 #ifdef ASSERT
1158 // assert object type is really an array of the proper kind
1159 {
1160 Label ok;
1161 Register t0 = obj;
1162 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1163 __ sarl(t0, Klass::_lh_array_tag_shift);
1164 switch (id) {
1165 case new_type_array_id:
1166 __ cmpl(t0, Klass::_lh_array_tag_type_value);
1167 __ jcc(Assembler::equal, ok);
1168 __ stop("assert(is a type array klass)");
1169 break;
1170 case new_object_array_id:
1171 __ cmpl(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
1172 __ jcc(Assembler::equal, ok);
1173 __ cmpl(t0, Klass::_lh_array_tag_vt_value); // new "[LVT;"
1174 __ jcc(Assembler::equal, ok);
1175 __ stop("assert(is an object or inline type array klass)");
1176 break;
1177 case new_flat_array_id:
1178 // new "[QVT;"
1179 __ cmpl(t0, Klass::_lh_array_tag_vt_value); // the array can be flattened.
1180 __ jcc(Assembler::equal, ok);
1181 __ cmpl(t0, Klass::_lh_array_tag_obj_value); // the array cannot be flattened (due to InlineArrayElementMaxFlatSize, etc)
1182 __ jcc(Assembler::equal, ok);
1183 __ stop("assert(is an object or inline type array klass)");
1184 break;
1185 default: ShouldNotReachHere();
1186 }
1187 __ should_not_reach_here();
1188 __ bind(ok);
1189 }
1190 #endif // ASSERT
1191
1192 // If TLAB is disabled, see if there is support for inlining contiguous
1193 // allocations.
1194 // Otherwise, just go to the slow path.
1195 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1196 Register arr_size = rsi;
1197 Register t1 = rcx; // must be rcx for use as shift count
1198 Register t2 = rdi;
1199 Label slow_path;
1200
1201 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1202 // since size is positive movl does right thing on 64bit
1203 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1204 // since size is postive movl does right thing on 64bit
1205 __ movl(arr_size, length);
1206 assert(t1 == rcx, "fixed register usage");
1218
1219 __ initialize_header(obj, klass, length, t1, t2);
1220 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1221 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1222 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1223 __ andptr(t1, Klass::_lh_header_size_mask);
1224 __ subptr(arr_size, t1); // body length
1225 __ addptr(t1, obj); // body start
1226 __ initialize_body(t1, arr_size, 0, t2);
1227 __ verify_oop(obj);
1228 __ ret(0);
1229
1230 __ bind(slow_path);
1231 }
1232
1233 __ enter();
1234 OopMap* map = save_live_registers(sasm, 3);
1235 int call_offset;
1236 if (id == new_type_array_id) {
1237 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1238 } else if (id == new_object_array_id) {
1239 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1240 } else {
1241 assert(id == new_flat_array_id, "must be");
1242 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_flat_array), klass, length);
1243 }
1244
1245 oop_maps = new OopMapSet();
1246 oop_maps->add_gc_map(call_offset, map);
1247 restore_live_registers_except_rax(sasm);
1248
1249 __ verify_oop(obj);
1250 __ leave();
1251 __ ret(0);
1252
1253 // rax,: new array
1254 }
1255 break;
1256
1257 case new_multi_array_id:
1258 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1259 // rax,: klass
1260 // rbx,: rank
1261 // rcx: address of 1st dimension
1262 OopMap* map = save_live_registers(sasm, 4);
1263 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1264
1265 oop_maps = new OopMapSet();
1266 oop_maps->add_gc_map(call_offset, map);
1267 restore_live_registers_except_rax(sasm);
1268
1269 // rax,: new multi array
1270 __ verify_oop(rax);
1271 }
1272 break;
1273
1274 case load_flattened_array_id:
1275 {
1276 StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
1277 OopMap* map = save_live_registers(sasm, 3);
1278
1279 // Called with store_parameter and not C abi
1280
1281 f.load_argument(1, rax); // rax,: array
1282 f.load_argument(0, rbx); // rbx,: index
1283 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), rax, rbx);
1284
1285 oop_maps = new OopMapSet();
1286 oop_maps->add_gc_map(call_offset, map);
1287 restore_live_registers_except_rax(sasm);
1288
1289 // rax,: loaded element at array[index]
1290 __ verify_oop(rax);
1291 }
1292 break;
1293
1294 case store_flattened_array_id:
1295 {
1296 StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
1297 OopMap* map = save_live_registers(sasm, 4);
1298
1299 // Called with store_parameter and not C abi
1300
1301 f.load_argument(2, rax); // rax,: array
1302 f.load_argument(1, rbx); // rbx,: index
1303 f.load_argument(0, rcx); // rcx,: value
1304 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), rax, rbx, rcx);
1305
1306 oop_maps = new OopMapSet();
1307 oop_maps->add_gc_map(call_offset, map);
1308 restore_live_registers_except_rax(sasm);
1309 }
1310 break;
1311
1312 case substitutability_check_id:
1313 {
1314 StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
1315 OopMap* map = save_live_registers(sasm, 3);
1316
1317 // Called with store_parameter and not C abi
1318
1319 f.load_argument(1, rax); // rax,: left
1320 f.load_argument(0, rbx); // rbx,: right
1321 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), rax, rbx);
1322
1323 oop_maps = new OopMapSet();
1324 oop_maps->add_gc_map(call_offset, map);
1325 restore_live_registers_except_rax(sasm);
1326
1327 // rax,: are the two operands substitutable
1328 }
1329 break;
1330
1331
1332 case buffer_inline_args_id:
1333 case buffer_inline_args_no_receiver_id:
1334 {
1335 const char* name = (id == buffer_inline_args_id) ?
1336 "buffer_inline_args" : "buffer_inline_args_no_receiver";
1337 StubFrame f(sasm, name, dont_gc_arguments);
1338 OopMap* map = save_live_registers(sasm, 2);
1339 Register method = rbx;
1340 address entry = (id == buffer_inline_args_id) ?
1341 CAST_FROM_FN_PTR(address, buffer_inline_args) :
1342 CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
1343 int call_offset = __ call_RT(rax, noreg, entry, method);
1344 oop_maps = new OopMapSet();
1345 oop_maps->add_gc_map(call_offset, map);
1346 restore_live_registers_except_rax(sasm);
1347 __ verify_oop(rax); // rax: an array of buffered value objects
1348 }
1349 break;
1350
1351 case register_finalizer_id:
1352 {
1353 __ set_info("register_finalizer", dont_gc_arguments);
1354
1355 // This is called via call_runtime so the arguments
1356 // will be place in C abi locations
1357
1358 #ifdef _LP64
1359 __ verify_oop(c_rarg0);
1360 __ mov(rax, c_rarg0);
1361 #else
1362 // The object is passed on the stack and we haven't pushed a
1363 // frame yet so it's one work away from top of stack.
1364 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1365 __ verify_oop(rax);
1366 #endif // _LP64
1367
1368 // load the klass and check the has finalizer flag
1369 Label register_finalizer;
1370 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1433 // activation and we are calling a leaf VM function only.
1434 generate_unwind_exception(sasm);
1435 }
1436 break;
1437
1438 case throw_array_store_exception_id:
1439 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1440 // tos + 0: link
1441 // + 1: return address
1442 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1443 }
1444 break;
1445
1446 case throw_class_cast_exception_id:
1447 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1448 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1449 }
1450 break;
1451
1452 case throw_incompatible_class_change_error_id:
1453 { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments);
1454 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1455 }
1456 break;
1457
1458 case throw_illegal_monitor_state_exception_id:
1459 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1460 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1461 }
1462 break;
1463
1464 case slow_subtype_check_id:
1465 {
1466 // Typical calling sequence:
1467 // __ push(klass_RInfo); // object klass or other subclass
1468 // __ push(sup_k_RInfo); // array element klass or other superclass
1469 // __ call(slow_subtype_check);
1470 // Note that the subclass is pushed first, and is therefore deepest.
1471 // Previous versions of this code reversed the names 'sub' and 'super'.
1472 // This was operationally harmless but made the code unreadable.
1473 enum layout {
1474 rax_off, SLOT2(raxH_off)
1475 rcx_off, SLOT2(rcxH_off)
1476 rsi_off, SLOT2(rsiH_off)
1477 rdi_off, SLOT2(rdiH_off)
1478 // saved_rbp_off, SLOT2(saved_rbpH_off)
1479 return_off, SLOT2(returnH_off)
1480 sup_k_off, SLOT2(sup_kH_off)
1481 klass_off, SLOT2(superH_off)
1482 framesize,
1483 result_off = klass_off // deepest argument is also the return value
|