1043 break;
1044
1045 case C1StubId::new_instance_id:
1046 case C1StubId::fast_new_instance_id:
1047 case C1StubId::fast_new_instance_init_check_id:
1048 {
1049 Register klass = rdx; // Incoming
1050 Register obj = rax; // Result
1051
1052 if (id == C1StubId::new_instance_id) {
1053 __ set_info("new_instance", dont_gc_arguments);
1054 } else if (id == C1StubId::fast_new_instance_id) {
1055 __ set_info("fast new_instance", dont_gc_arguments);
1056 } else {
1057 assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
1058 __ set_info("fast new_instance init check", dont_gc_arguments);
1059 }
1060
1061 __ enter();
1062 OopMap* map = save_live_registers(sasm, 2);
1063 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1064 oop_maps = new OopMapSet();
1065 oop_maps->add_gc_map(call_offset, map);
1066 restore_live_registers_except_rax(sasm);
1067 __ verify_oop(obj);
1068 __ leave();
1069 __ ret(0);
1070
1071 // rax,: new instance
1072 }
1073
1074 break;
1075
1076 case C1StubId::counter_overflow_id:
1077 {
1078 Register bci = rax, method = rbx;
1079 __ enter();
1080 OopMap* map = save_live_registers(sasm, 3);
1081 // Retrieve bci
1082 __ movl(bci, Address(rbp, 2*BytesPerWord));
1083 // And a pointer to the Method*
1084 __ movptr(method, Address(rbp, 3*BytesPerWord));
1085 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1086 oop_maps = new OopMapSet();
1087 oop_maps->add_gc_map(call_offset, map);
1088 restore_live_registers(sasm);
1089 __ leave();
1090 __ ret(0);
1091 }
1092 break;
1093
1094 case C1StubId::new_type_array_id:
1095 case C1StubId::new_object_array_id:
1096 {
1097 Register length = rbx; // Incoming
1098 Register klass = rdx; // Incoming
1099 Register obj = rax; // Result
1100
1101 if (id == C1StubId::new_type_array_id) {
1102 __ set_info("new_type_array", dont_gc_arguments);
1103 } else {
1104 __ set_info("new_object_array", dont_gc_arguments);
1105 }
1106
1107 #ifdef ASSERT
1108 // assert object type is really an array of the proper kind
1109 {
1110 Label ok;
1111 Register t0 = obj;
1112 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1113 __ sarl(t0, Klass::_lh_array_tag_shift);
1114 int tag = ((id == C1StubId::new_type_array_id)
1115 ? Klass::_lh_array_tag_type_value
1116 : Klass::_lh_array_tag_obj_value);
1117 __ cmpl(t0, tag);
1118 __ jcc(Assembler::equal, ok);
1119 __ stop("assert(is an array klass)");
1120 __ should_not_reach_here();
1121 __ bind(ok);
1122 }
1123 #endif // ASSERT
1124
1125 __ enter();
1126 OopMap* map = save_live_registers(sasm, 3);
1127 int call_offset;
1128 if (id == C1StubId::new_type_array_id) {
1129 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1130 } else {
1131 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1132 }
1133
1134 oop_maps = new OopMapSet();
1135 oop_maps->add_gc_map(call_offset, map);
1136 restore_live_registers_except_rax(sasm);
1137
1138 __ verify_oop(obj);
1139 __ leave();
1140 __ ret(0);
1141
1142 // rax,: new array
1143 }
1144 break;
1145
1146 case C1StubId::new_multi_array_id:
1147 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1148 // rax,: klass
1149 // rbx,: rank
1150 // rcx: address of 1st dimension
1151 OopMap* map = save_live_registers(sasm, 4);
1152 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1153
1154 oop_maps = new OopMapSet();
1155 oop_maps->add_gc_map(call_offset, map);
1156 restore_live_registers_except_rax(sasm);
1157
1158 // rax,: new multi array
1159 __ verify_oop(rax);
1160 }
1161 break;
1162
1163 case C1StubId::register_finalizer_id:
1164 {
1165 __ set_info("register_finalizer", dont_gc_arguments);
1166
1167 // This is called via call_runtime so the arguments
1168 // will be place in C abi locations
1169
1170 #ifdef _LP64
1171 __ verify_oop(c_rarg0);
1172 __ mov(rax, c_rarg0);
1173 #else
1174 // The object is passed on the stack and we haven't pushed a
1175 // frame yet so it's one work away from top of stack.
1176 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1177 __ verify_oop(rax);
1178 #endif // _LP64
1179
1180 // load the klass and check the has finalizer flag
1181 Label register_finalizer;
1182 Register t = rsi;
1243 // activation and we are calling a leaf VM function only.
1244 generate_unwind_exception(sasm);
1245 }
1246 break;
1247
1248 case C1StubId::throw_array_store_exception_id:
1249 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1250 // tos + 0: link
1251 // + 1: return address
1252 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1253 }
1254 break;
1255
1256 case C1StubId::throw_class_cast_exception_id:
1257 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1258 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1259 }
1260 break;
1261
1262 case C1StubId::throw_incompatible_class_change_error_id:
1263 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1264 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1265 }
1266 break;
1267
1268 case C1StubId::slow_subtype_check_id:
1269 {
1270 // Typical calling sequence:
1271 // __ push(klass_RInfo); // object klass or other subclass
1272 // __ push(sup_k_RInfo); // array element klass or other superclass
1273 // __ call(slow_subtype_check);
1274 // Note that the subclass is pushed first, and is therefore deepest.
1275 // Previous versions of this code reversed the names 'sub' and 'super'.
1276 // This was operationally harmless but made the code unreadable.
1277 enum layout {
1278 rax_off, SLOT2(raxH_off)
1279 rcx_off, SLOT2(rcxH_off)
1280 rsi_off, SLOT2(rsiH_off)
1281 rdi_off, SLOT2(rdiH_off)
1282 // saved_rbp_off, SLOT2(saved_rbpH_off)
1283 return_off, SLOT2(returnH_off)
1284 sup_k_off, SLOT2(sup_kH_off)
1285 klass_off, SLOT2(superH_off)
1286 framesize,
1287 result_off = klass_off // deepest argument is also the return value
|
1043 break;
1044
1045 case C1StubId::new_instance_id:
1046 case C1StubId::fast_new_instance_id:
1047 case C1StubId::fast_new_instance_init_check_id:
1048 {
1049 Register klass = rdx; // Incoming
1050 Register obj = rax; // Result
1051
1052 if (id == C1StubId::new_instance_id) {
1053 __ set_info("new_instance", dont_gc_arguments);
1054 } else if (id == C1StubId::fast_new_instance_id) {
1055 __ set_info("fast new_instance", dont_gc_arguments);
1056 } else {
1057 assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
1058 __ set_info("fast new_instance init check", dont_gc_arguments);
1059 }
1060
1061 __ enter();
1062 OopMap* map = save_live_registers(sasm, 2);
1063 int call_offset;
1064 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1065 oop_maps = new OopMapSet();
1066 oop_maps->add_gc_map(call_offset, map);
1067 restore_live_registers_except_rax(sasm);
1068 __ verify_oop(obj);
1069 __ leave();
1070 __ ret(0);
1071
1072 // rax,: new instance
1073 }
1074
1075 break;
1076
1077 case C1StubId::counter_overflow_id:
1078 {
1079 Register bci = rax, method = rbx;
1080 __ enter();
1081 OopMap* map = save_live_registers(sasm, 3);
1082 // Retrieve bci
1083 __ movl(bci, Address(rbp, 2*BytesPerWord));
1084 // And a pointer to the Method*
1085 __ movptr(method, Address(rbp, 3*BytesPerWord));
1086 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1087 oop_maps = new OopMapSet();
1088 oop_maps->add_gc_map(call_offset, map);
1089 restore_live_registers(sasm);
1090 __ leave();
1091 __ ret(0);
1092 }
1093 break;
1094
1095 case C1StubId::new_type_array_id:
1096 case C1StubId::new_object_array_id:
1097 case C1StubId::new_null_free_array_id:
1098 {
1099 Register length = rbx; // Incoming
1100 Register klass = rdx; // Incoming
1101 Register obj = rax; // Result
1102
1103 if (id == C1StubId::new_type_array_id) {
1104 __ set_info("new_type_array", dont_gc_arguments);
1105 } else if (id == C1StubId::new_object_array_id) {
1106 __ set_info("new_object_array", dont_gc_arguments);
1107 } else {
1108 __ set_info("new_null_free_array", dont_gc_arguments);
1109 }
1110
1111 #ifdef ASSERT
1112 // assert object type is really an array of the proper kind
1113 {
1114 Label ok;
1115 Register t0 = obj;
1116 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1117 __ sarl(t0, Klass::_lh_array_tag_shift);
1118 switch (id) {
1119 case C1StubId::new_type_array_id:
1120 __ cmpl(t0, Klass::_lh_array_tag_type_value);
1121 __ jcc(Assembler::equal, ok);
1122 __ stop("assert(is a type array klass)");
1123 break;
1124 case C1StubId::new_object_array_id:
1125 __ cmpl(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
1126 __ jcc(Assembler::equal, ok);
1127 __ cmpl(t0, Klass::_lh_array_tag_vt_value); // new "[LVT;"
1128 __ jcc(Assembler::equal, ok);
1129 __ stop("assert(is an object or inline type array klass)");
1130 break;
1131 case C1StubId::new_null_free_array_id:
1132 __ cmpl(t0, Klass::_lh_array_tag_vt_value); // the array can be a flat array.
1133 __ jcc(Assembler::equal, ok);
1134 __ cmpl(t0, Klass::_lh_array_tag_obj_value); // the array cannot be a flat array (due to InlineArrayElementMaxFlatSize, etc)
1135 __ jcc(Assembler::equal, ok);
1136 __ stop("assert(is an object or inline type array klass)");
1137 break;
1138 default: ShouldNotReachHere();
1139 }
1140 __ should_not_reach_here();
1141 __ bind(ok);
1142 }
1143 #endif // ASSERT
1144
1145 __ enter();
1146 OopMap* map = save_live_registers(sasm, 3);
1147 int call_offset;
1148 if (id == C1StubId::new_type_array_id) {
1149 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1150 } else if (id == C1StubId::new_object_array_id) {
1151 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1152 } else {
1153 assert(id == C1StubId::new_null_free_array_id, "must be");
1154 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_null_free_array), klass, length);
1155 }
1156
1157 oop_maps = new OopMapSet();
1158 oop_maps->add_gc_map(call_offset, map);
1159 restore_live_registers_except_rax(sasm);
1160
1161 __ verify_oop(obj);
1162 __ leave();
1163 __ ret(0);
1164
1165 // rax,: new array
1166 }
1167 break;
1168
1169 case C1StubId::new_multi_array_id:
1170 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1171 // rax,: klass
1172 // rbx,: rank
1173 // rcx: address of 1st dimension
1174 OopMap* map = save_live_registers(sasm, 4);
1175 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1176
1177 oop_maps = new OopMapSet();
1178 oop_maps->add_gc_map(call_offset, map);
1179 restore_live_registers_except_rax(sasm);
1180
1181 // rax,: new multi array
1182 __ verify_oop(rax);
1183 }
1184 break;
1185
1186 case C1StubId::load_flat_array_id:
1187 {
1188 StubFrame f(sasm, "load_flat_array", dont_gc_arguments);
1189 OopMap* map = save_live_registers(sasm, 3);
1190
1191 // Called with store_parameter and not C abi
1192
1193 f.load_argument(1, rax); // rax,: array
1194 f.load_argument(0, rbx); // rbx,: index
1195 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, load_flat_array), rax, rbx);
1196
1197 oop_maps = new OopMapSet();
1198 oop_maps->add_gc_map(call_offset, map);
1199 restore_live_registers_except_rax(sasm);
1200
1201 // rax,: loaded element at array[index]
1202 __ verify_oop(rax);
1203 }
1204 break;
1205
1206 case C1StubId::store_flat_array_id:
1207 {
1208 StubFrame f(sasm, "store_flat_array", dont_gc_arguments);
1209 OopMap* map = save_live_registers(sasm, 4);
1210
1211 // Called with store_parameter and not C abi
1212
1213 f.load_argument(2, rax); // rax,: array
1214 f.load_argument(1, rbx); // rbx,: index
1215 f.load_argument(0, rcx); // rcx,: value
1216 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flat_array), rax, rbx, rcx);
1217
1218 oop_maps = new OopMapSet();
1219 oop_maps->add_gc_map(call_offset, map);
1220 restore_live_registers_except_rax(sasm);
1221 }
1222 break;
1223
1224 case C1StubId::substitutability_check_id:
1225 {
1226 StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
1227 OopMap* map = save_live_registers(sasm, 3);
1228
1229 // Called with store_parameter and not C abi
1230
1231 f.load_argument(1, rax); // rax,: left
1232 f.load_argument(0, rbx); // rbx,: right
1233 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), rax, rbx);
1234
1235 oop_maps = new OopMapSet();
1236 oop_maps->add_gc_map(call_offset, map);
1237 restore_live_registers_except_rax(sasm);
1238
1239 // rax,: are the two operands substitutable
1240 }
1241 break;
1242
1243
1244 case C1StubId::buffer_inline_args_id:
1245 case C1StubId::buffer_inline_args_no_receiver_id:
1246 {
1247 const char* name = (id == C1StubId::buffer_inline_args_id) ?
1248 "buffer_inline_args" : "buffer_inline_args_no_receiver";
1249 StubFrame f(sasm, name, dont_gc_arguments);
1250 OopMap* map = save_live_registers(sasm, 2);
1251 Register method = rbx;
1252 address entry = (id == C1StubId::buffer_inline_args_id) ?
1253 CAST_FROM_FN_PTR(address, buffer_inline_args) :
1254 CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
1255 int call_offset = __ call_RT(rax, noreg, entry, method);
1256 oop_maps = new OopMapSet();
1257 oop_maps->add_gc_map(call_offset, map);
1258 restore_live_registers_except_rax(sasm);
1259 __ verify_oop(rax); // rax: an array of buffered value objects
1260 }
1261 break;
1262
1263 case C1StubId::register_finalizer_id:
1264 {
1265 __ set_info("register_finalizer", dont_gc_arguments);
1266
1267 // This is called via call_runtime so the arguments
1268 // will be place in C abi locations
1269
1270 #ifdef _LP64
1271 __ verify_oop(c_rarg0);
1272 __ mov(rax, c_rarg0);
1273 #else
1274 // The object is passed on the stack and we haven't pushed a
1275 // frame yet so it's one work away from top of stack.
1276 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1277 __ verify_oop(rax);
1278 #endif // _LP64
1279
1280 // load the klass and check the has finalizer flag
1281 Label register_finalizer;
1282 Register t = rsi;
1343 // activation and we are calling a leaf VM function only.
1344 generate_unwind_exception(sasm);
1345 }
1346 break;
1347
1348 case C1StubId::throw_array_store_exception_id:
1349 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1350 // tos + 0: link
1351 // + 1: return address
1352 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1353 }
1354 break;
1355
1356 case C1StubId::throw_class_cast_exception_id:
1357 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1358 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1359 }
1360 break;
1361
1362 case C1StubId::throw_incompatible_class_change_error_id:
1363 { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments);
1364 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1365 }
1366 break;
1367
1368 case C1StubId::throw_illegal_monitor_state_exception_id:
1369 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1370 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1371 }
1372 break;
1373
1374 case C1StubId::throw_identity_exception_id:
1375 { StubFrame f(sasm, "throw_identity_exception", dont_gc_arguments);
1376 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_identity_exception), true);
1377 }
1378 break;
1379
1380 case C1StubId::slow_subtype_check_id:
1381 {
1382 // Typical calling sequence:
1383 // __ push(klass_RInfo); // object klass or other subclass
1384 // __ push(sup_k_RInfo); // array element klass or other superclass
1385 // __ call(slow_subtype_check);
1386 // Note that the subclass is pushed first, and is therefore deepest.
1387 // Previous versions of this code reversed the names 'sub' and 'super'.
1388 // This was operationally harmless but made the code unreadable.
1389 enum layout {
1390 rax_off, SLOT2(raxH_off)
1391 rcx_off, SLOT2(rcxH_off)
1392 rsi_off, SLOT2(rsiH_off)
1393 rdi_off, SLOT2(rdiH_off)
1394 // saved_rbp_off, SLOT2(saved_rbpH_off)
1395 return_off, SLOT2(returnH_off)
1396 sup_k_off, SLOT2(sup_kH_off)
1397 klass_off, SLOT2(superH_off)
1398 framesize,
1399 result_off = klass_off // deepest argument is also the return value
|