1150 __ addptr(rsp, 2*wordSize); |
1169 __ addptr(rsp, 2*wordSize); |
1151 } |
1170 } |
1152 } |
1171 } |
1153 } |
1172 } |
1154 |
1173 |
|
1174 |
|
1175 static void save_or_restore_arguments(MacroAssembler* masm, |
|
1176 const int stack_slots, |
|
1177 const int total_in_args, |
|
1178 const int arg_save_area, |
|
1179 OopMap* map, |
|
1180 VMRegPair* in_regs, |
|
1181 BasicType* in_sig_bt) { |
|
1182 // if map is non-NULL then the code should store the values, |
|
1183 // otherwise it should load them. |
|
1184 int handle_index = 0; |
|
1185 // Save down double word first |
|
1186 for ( int i = 0; i < total_in_args; i++) { |
|
1187 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) { |
|
1188 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area; |
|
1189 int offset = slot * VMRegImpl::stack_slot_size; |
|
1190 handle_index += 2; |
|
1191 assert(handle_index <= stack_slots, "overflow"); |
|
1192 if (map != NULL) { |
|
1193 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister()); |
|
1194 } else { |
|
1195 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset)); |
|
1196 } |
|
1197 } |
|
1198 if (in_regs[i].first()->is_Register() && |
|
1199 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { |
|
1200 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area; |
|
1201 int offset = slot * VMRegImpl::stack_slot_size; |
|
1202 handle_index += 2; |
|
1203 assert(handle_index <= stack_slots, "overflow"); |
|
1204 if (map != NULL) { |
|
1205 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register()); |
|
1206 if (in_sig_bt[i] == T_ARRAY) { |
|
1207 map->set_oop(VMRegImpl::stack2reg(slot));; |
|
1208 } |
|
1209 } else { |
|
1210 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset)); |
|
1211 } |
|
1212 } |
|
1213 } |
|
1214 // Save or restore single word registers |
|
1215 for ( int i = 0; i < total_in_args; i++) { |
|
1216 if (in_regs[i].first()->is_Register()) { |
|
1217 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area; |
|
1218 int offset = slot * VMRegImpl::stack_slot_size; |
|
1219 assert(handle_index <= stack_slots, "overflow"); |
|
1220 |
|
1221 // Value is in an input register pass we must flush it to the stack |
|
1222 const Register reg = in_regs[i].first()->as_Register(); |
|
1223 switch (in_sig_bt[i]) { |
|
1224 case T_BOOLEAN: |
|
1225 case T_CHAR: |
|
1226 case T_BYTE: |
|
1227 case T_SHORT: |
|
1228 case T_INT: |
|
1229 if (map != NULL) { |
|
1230 __ movl(Address(rsp, offset), reg); |
|
1231 } else { |
|
1232 __ movl(reg, Address(rsp, offset)); |
|
1233 } |
|
1234 break; |
|
1235 case T_ARRAY: |
|
1236 case T_LONG: |
|
1237 // handled above |
|
1238 break; |
|
1239 case T_OBJECT: |
|
1240 default: ShouldNotReachHere(); |
|
1241 } |
|
1242 } else if (in_regs[i].first()->is_XMMRegister()) { |
|
1243 if (in_sig_bt[i] == T_FLOAT) { |
|
1244 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area; |
|
1245 int offset = slot * VMRegImpl::stack_slot_size; |
|
1246 assert(handle_index <= stack_slots, "overflow"); |
|
1247 if (map != NULL) { |
|
1248 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister()); |
|
1249 } else { |
|
1250 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset)); |
|
1251 } |
|
1252 } |
|
1253 } else if (in_regs[i].first()->is_stack()) { |
|
1254 if (in_sig_bt[i] == T_ARRAY && map != NULL) { |
|
1255 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); |
|
1256 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); |
|
1257 } |
|
1258 } |
|
1259 } |
|
1260 } |
|
1261 |
|
1262 |
|
1263 // Check GC_locker::needs_gc and enter the runtime if it's true. This |
|
1264 // keeps a new JNI critical region from starting until a GC has been |
|
1265 // forced. Save down any oops in registers and describe them in an |
|
1266 // OopMap. |
|
1267 static void check_needs_gc_for_critical_native(MacroAssembler* masm, |
|
1268 int stack_slots, |
|
1269 int total_c_args, |
|
1270 int total_in_args, |
|
1271 int arg_save_area, |
|
1272 OopMapSet* oop_maps, |
|
1273 VMRegPair* in_regs, |
|
1274 BasicType* in_sig_bt) { |
|
1275 __ block_comment("check GC_locker::needs_gc"); |
|
1276 Label cont; |
|
1277 __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false); |
|
1278 __ jcc(Assembler::equal, cont); |
|
1279 |
|
1280 // Save down any incoming oops and call into the runtime to halt for a GC |
|
1281 |
|
1282 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); |
|
1283 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1284 arg_save_area, map, in_regs, in_sig_bt); |
|
1285 |
|
1286 address the_pc = __ pc(); |
|
1287 oop_maps->add_gc_map( __ offset(), map); |
|
1288 __ set_last_Java_frame(rsp, noreg, the_pc); |
|
1289 |
|
1290 __ block_comment("block_for_jni_critical"); |
|
1291 __ movptr(c_rarg0, r15_thread); |
|
1292 __ mov(r12, rsp); // remember sp |
|
1293 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
1294 __ andptr(rsp, -16); // align stack as required by ABI |
|
1295 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical))); |
|
1296 __ mov(rsp, r12); // restore sp |
|
1297 __ reinit_heapbase(); |
|
1298 |
|
1299 __ reset_last_Java_frame(false, true); |
|
1300 |
|
1301 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1302 arg_save_area, NULL, in_regs, in_sig_bt); |
|
1303 |
|
1304 __ bind(cont); |
|
1305 #ifdef ASSERT |
|
1306 if (StressCriticalJNINatives) { |
|
1307 // Stress register saving |
|
1308 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); |
|
1309 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1310 arg_save_area, map, in_regs, in_sig_bt); |
|
1311 // Destroy argument registers |
|
1312 for (int i = 0; i < total_in_args - 1; i++) { |
|
1313 if (in_regs[i].first()->is_Register()) { |
|
1314 const Register reg = in_regs[i].first()->as_Register(); |
|
1315 __ xorptr(reg, reg); |
|
1316 } else if (in_regs[i].first()->is_XMMRegister()) { |
|
1317 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister()); |
|
1318 } else if (in_regs[i].first()->is_FloatRegister()) { |
|
1319 ShouldNotReachHere(); |
|
1320 } else if (in_regs[i].first()->is_stack()) { |
|
1321 // Nothing to do |
|
1322 } else { |
|
1323 ShouldNotReachHere(); |
|
1324 } |
|
1325 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) { |
|
1326 i++; |
|
1327 } |
|
1328 } |
|
1329 |
|
1330 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1331 arg_save_area, NULL, in_regs, in_sig_bt); |
|
1332 } |
|
1333 #endif |
|
1334 } |
|
1335 |
|
1336 // Unpack an array argument into a pointer to the body and the length |
|
1337 // if the array is non-null, otherwise pass 0 for both. |
|
1338 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { |
|
1339 Register tmp_reg = rax; |
|
1340 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, |
|
1341 "possible collision"); |
|
1342 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, |
|
1343 "possible collision"); |
|
1344 |
|
1345 // Pass the length, ptr pair |
|
1346 Label is_null, done; |
|
1347 VMRegPair tmp; |
|
1348 tmp.set_ptr(tmp_reg->as_VMReg()); |
|
1349 if (reg.first()->is_stack()) { |
|
1350 // Load the arg up from the stack |
|
1351 move_ptr(masm, reg, tmp); |
|
1352 reg = tmp; |
|
1353 } |
|
1354 __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); |
|
1355 __ jccb(Assembler::equal, is_null); |
|
1356 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type))); |
|
1357 move_ptr(masm, tmp, body_arg); |
|
1358 // load the length relative to the body. |
|
1359 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() - |
|
1360 arrayOopDesc::base_offset_in_bytes(in_elem_type))); |
|
1361 move32_64(masm, tmp, length_arg); |
|
1362 __ jmpb(done); |
|
1363 __ bind(is_null); |
|
1364 // Pass zeros |
|
1365 __ xorptr(tmp_reg, tmp_reg); |
|
1366 move_ptr(masm, tmp, body_arg); |
|
1367 move32_64(masm, tmp, length_arg); |
|
1368 __ bind(done); |
|
1369 } |
|
1370 |
1155 // --------------------------------------------------------------------------- |
1371 // --------------------------------------------------------------------------- |
1156 // Generate a native wrapper for a given method. The method takes arguments |
1372 // Generate a native wrapper for a given method. The method takes arguments |
1157 // in the Java compiled code convention, marshals them to the native |
1373 // in the Java compiled code convention, marshals them to the native |
1158 // convention (handlizes oops, etc), transitions to native, makes the call, |
1374 // convention (handlizes oops, etc), transitions to native, makes the call, |
1159 // returns to java state (possibly blocking), unhandlizes any result and |
1375 // returns to java state (possibly blocking), unhandlizes any result and |
1164 int total_in_args, |
1380 int total_in_args, |
1165 int comp_args_on_stack, |
1381 int comp_args_on_stack, |
1166 BasicType *in_sig_bt, |
1382 BasicType *in_sig_bt, |
1167 VMRegPair *in_regs, |
1383 VMRegPair *in_regs, |
1168 BasicType ret_type) { |
1384 BasicType ret_type) { |
1169 // Native nmethod wrappers never take possesion of the oop arguments. |
1385 bool is_critical_native = true; |
1170 // So the caller will gc the arguments. The only thing we need an |
1386 address native_func = method->critical_native_function(); |
1171 // oopMap for is if the call is static |
1387 if (native_func == NULL) { |
1172 // |
1388 native_func = method->native_function(); |
|
1389 is_critical_native = false; |
|
1390 } |
|
1391 assert(native_func != NULL, "must have function"); |
|
1392 |
1173 // An OopMap for lock (and class if static) |
1393 // An OopMap for lock (and class if static) |
1174 OopMapSet *oop_maps = new OopMapSet(); |
1394 OopMapSet *oop_maps = new OopMapSet(); |
1175 intptr_t start = (intptr_t)__ pc(); |
1395 intptr_t start = (intptr_t)__ pc(); |
1176 |
1396 |
1177 // We have received a description of where all the java arg are located |
1397 // We have received a description of where all the java arg are located |
1178 // on entry to the wrapper. We need to convert these args to where |
1398 // on entry to the wrapper. We need to convert these args to where |
1179 // the jni function will expect them. To figure out where they go |
1399 // the jni function will expect them. To figure out where they go |
1180 // we convert the java signature to a C signature by inserting |
1400 // we convert the java signature to a C signature by inserting |
1181 // the hidden arguments as arg[0] and possibly arg[1] (static method) |
1401 // the hidden arguments as arg[0] and possibly arg[1] (static method) |
1182 |
1402 |
1183 int total_c_args = total_in_args + 1; |
1403 int total_c_args = total_in_args; |
1184 if (method->is_static()) { |
1404 if (!is_critical_native) { |
1185 total_c_args++; |
1405 total_c_args += 1; |
|
1406 if (method->is_static()) { |
|
1407 total_c_args++; |
|
1408 } |
|
1409 } else { |
|
1410 for (int i = 0; i < total_in_args; i++) { |
|
1411 if (in_sig_bt[i] == T_ARRAY) { |
|
1412 total_c_args++; |
|
1413 } |
|
1414 } |
1186 } |
1415 } |
1187 |
1416 |
1188 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); |
1417 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); |
1189 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); |
1418 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); |
|
1419 BasicType* in_elem_bt = NULL; |
1190 |
1420 |
1191 int argc = 0; |
1421 int argc = 0; |
1192 out_sig_bt[argc++] = T_ADDRESS; |
1422 if (!is_critical_native) { |
1193 if (method->is_static()) { |
1423 out_sig_bt[argc++] = T_ADDRESS; |
1194 out_sig_bt[argc++] = T_OBJECT; |
1424 if (method->is_static()) { |
1195 } |
1425 out_sig_bt[argc++] = T_OBJECT; |
1196 |
1426 } |
1197 for (int i = 0; i < total_in_args ; i++ ) { |
1427 |
1198 out_sig_bt[argc++] = in_sig_bt[i]; |
1428 for (int i = 0; i < total_in_args ; i++ ) { |
|
1429 out_sig_bt[argc++] = in_sig_bt[i]; |
|
1430 } |
|
1431 } else { |
|
1432 Thread* THREAD = Thread::current(); |
|
1433 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); |
|
1434 SignatureStream ss(method->signature()); |
|
1435 for (int i = 0; i < total_in_args ; i++ ) { |
|
1436 if (in_sig_bt[i] == T_ARRAY) { |
|
1437 // Arrays are passed as int, elem* pair |
|
1438 out_sig_bt[argc++] = T_INT; |
|
1439 out_sig_bt[argc++] = T_ADDRESS; |
|
1440 Symbol* atype = ss.as_symbol(CHECK_NULL); |
|
1441 const char* at = atype->as_C_string(); |
|
1442 if (strlen(at) == 2) { |
|
1443 assert(at[0] == '[', "must be"); |
|
1444 switch (at[1]) { |
|
1445 case 'B': in_elem_bt[i] = T_BYTE; break; |
|
1446 case 'C': in_elem_bt[i] = T_CHAR; break; |
|
1447 case 'D': in_elem_bt[i] = T_DOUBLE; break; |
|
1448 case 'F': in_elem_bt[i] = T_FLOAT; break; |
|
1449 case 'I': in_elem_bt[i] = T_INT; break; |
|
1450 case 'J': in_elem_bt[i] = T_LONG; break; |
|
1451 case 'S': in_elem_bt[i] = T_SHORT; break; |
|
1452 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; |
|
1453 default: ShouldNotReachHere(); |
|
1454 } |
|
1455 } |
|
1456 } else { |
|
1457 out_sig_bt[argc++] = in_sig_bt[i]; |
|
1458 in_elem_bt[i] = T_VOID; |
|
1459 } |
|
1460 if (in_sig_bt[i] != T_VOID) { |
|
1461 assert(in_sig_bt[i] == ss.type(), "must match"); |
|
1462 ss.next(); |
|
1463 } |
|
1464 } |
1199 } |
1465 } |
1200 |
1466 |
1201 // Now figure out where the args must be stored and how much stack space |
1467 // Now figure out where the args must be stored and how much stack space |
1202 // they require. |
1468 // they require. |
1203 // |
|
1204 int out_arg_slots; |
1469 int out_arg_slots; |
1205 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); |
1470 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); |
1206 |
1471 |
1207 // Compute framesize for the wrapper. We need to handlize all oops in |
1472 // Compute framesize for the wrapper. We need to handlize all oops in |
1208 // incoming registers |
1473 // incoming registers |
1211 |
1476 |
1212 // First count the abi requirement plus all of the outgoing args |
1477 // First count the abi requirement plus all of the outgoing args |
1213 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; |
1478 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; |
1214 |
1479 |
1215 // Now the space for the inbound oop handle area |
1480 // Now the space for the inbound oop handle area |
|
1481 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers |
|
1482 if (is_critical_native) { |
|
1483 // Critical natives may have to call out so they need a save area |
|
1484 // for register arguments. |
|
1485 int double_slots = 0; |
|
1486 int single_slots = 0; |
|
1487 for ( int i = 0; i < total_in_args; i++) { |
|
1488 if (in_regs[i].first()->is_Register()) { |
|
1489 const Register reg = in_regs[i].first()->as_Register(); |
|
1490 switch (in_sig_bt[i]) { |
|
1491 case T_ARRAY: |
|
1492 case T_BOOLEAN: |
|
1493 case T_BYTE: |
|
1494 case T_SHORT: |
|
1495 case T_CHAR: |
|
1496 case T_INT: single_slots++; break; |
|
1497 case T_LONG: double_slots++; break; |
|
1498 default: ShouldNotReachHere(); |
|
1499 } |
|
1500 } else if (in_regs[i].first()->is_XMMRegister()) { |
|
1501 switch (in_sig_bt[i]) { |
|
1502 case T_FLOAT: single_slots++; break; |
|
1503 case T_DOUBLE: double_slots++; break; |
|
1504 default: ShouldNotReachHere(); |
|
1505 } |
|
1506 } else if (in_regs[i].first()->is_FloatRegister()) { |
|
1507 ShouldNotReachHere(); |
|
1508 } |
|
1509 } |
|
1510 total_save_slots = double_slots * 2 + single_slots; |
|
1511 // align the save area |
|
1512 if (double_slots != 0) { |
|
1513 stack_slots = round_to(stack_slots, 2); |
|
1514 } |
|
1515 } |
1216 |
1516 |
1217 int oop_handle_offset = stack_slots; |
1517 int oop_handle_offset = stack_slots; |
1218 stack_slots += 6*VMRegImpl::slots_per_word; |
1518 stack_slots += total_save_slots; |
1219 |
1519 |
1220 // Now any space we need for handlizing a klass if static method |
1520 // Now any space we need for handlizing a klass if static method |
1221 |
1521 |
1222 int oop_temp_slot_offset = 0; |
|
1223 int klass_slot_offset = 0; |
1522 int klass_slot_offset = 0; |
1224 int klass_offset = -1; |
1523 int klass_offset = -1; |
1225 int lock_slot_offset = 0; |
1524 int lock_slot_offset = 0; |
1226 bool is_static = false; |
1525 bool is_static = false; |
1227 |
1526 |
1270 // stack properly aligned. |
1569 // stack properly aligned. |
1271 stack_slots = round_to(stack_slots, StackAlignmentInSlots); |
1570 stack_slots = round_to(stack_slots, StackAlignmentInSlots); |
1272 |
1571 |
1273 int stack_size = stack_slots * VMRegImpl::stack_slot_size; |
1572 int stack_size = stack_slots * VMRegImpl::stack_slot_size; |
1274 |
1573 |
1275 |
|
1276 // First thing make an ic check to see if we should even be here |
1574 // First thing make an ic check to see if we should even be here |
1277 |
1575 |
1278 // We are free to use all registers as temps without saving them and |
1576 // We are free to use all registers as temps without saving them and |
1279 // restoring them except rbp. rbp is the only callee save register |
1577 // restoring them except rbp. rbp is the only callee save register |
1280 // as far as the interpreter and the compiler(s) are concerned. |
1578 // as far as the interpreter and the compiler(s) are concerned. |
1281 |
1579 |
1282 |
1580 |
1283 const Register ic_reg = rax; |
1581 const Register ic_reg = rax; |
1284 const Register receiver = j_rarg0; |
1582 const Register receiver = j_rarg0; |
1285 |
1583 |
1286 Label ok; |
1584 Label hit; |
1287 Label exception_pending; |
1585 Label exception_pending; |
1288 |
1586 |
1289 assert_different_registers(ic_reg, receiver, rscratch1); |
1587 assert_different_registers(ic_reg, receiver, rscratch1); |
1290 __ verify_oop(receiver); |
1588 __ verify_oop(receiver); |
1291 __ load_klass(rscratch1, receiver); |
1589 __ load_klass(rscratch1, receiver); |
1292 __ cmpq(ic_reg, rscratch1); |
1590 __ cmpq(ic_reg, rscratch1); |
1293 __ jcc(Assembler::equal, ok); |
1591 __ jcc(Assembler::equal, hit); |
1294 |
1592 |
1295 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
1593 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
1296 |
|
1297 __ bind(ok); |
|
1298 |
1594 |
1299 // Verified entry point must be aligned |
1595 // Verified entry point must be aligned |
1300 __ align(8); |
1596 __ align(8); |
|
1597 |
|
1598 __ bind(hit); |
1301 |
1599 |
1302 int vep_offset = ((intptr_t)__ pc()) - start; |
1600 int vep_offset = ((intptr_t)__ pc()) - start; |
1303 |
1601 |
1304 // The instruction at the verified entry point must be 5 bytes or longer |
1602 // The instruction at the verified entry point must be 5 bytes or longer |
1305 // because it can be patched on the fly by make_non_entrant. The stack bang |
1603 // because it can be patched on the fly by make_non_entrant. The stack bang |
1657 save_native_result(masm, ret_type, stack_slots); |
2000 save_native_result(masm, ret_type, stack_slots); |
1658 __ mov(c_rarg0, r15_thread); |
2001 __ mov(c_rarg0, r15_thread); |
1659 __ mov(r12, rsp); // remember sp |
2002 __ mov(r12, rsp); // remember sp |
1660 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
2003 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
1661 __ andptr(rsp, -16); // align stack as required by ABI |
2004 __ andptr(rsp, -16); // align stack as required by ABI |
1662 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); |
2005 if (!is_critical_native) { |
|
2006 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); |
|
2007 } else { |
|
2008 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition))); |
|
2009 } |
1663 __ mov(rsp, r12); // restore sp |
2010 __ mov(rsp, r12); // restore sp |
1664 __ reinit_heapbase(); |
2011 __ reinit_heapbase(); |
1665 // Restore any method result value |
2012 // Restore any method result value |
1666 restore_native_result(masm, ret_type, stack_slots); |
2013 restore_native_result(masm, ret_type, stack_slots); |
|
2014 |
|
2015 if (is_critical_native) { |
|
2016 // The call above performed the transition to thread_in_Java so |
|
2017 // skip the transition logic below. |
|
2018 __ jmpb(after_transition); |
|
2019 } |
|
2020 |
1667 __ bind(Continue); |
2021 __ bind(Continue); |
1668 } |
2022 } |
1669 |
2023 |
1670 // change thread state |
2024 // change thread state |
1671 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); |
2025 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); |
|
2026 __ bind(after_transition); |
1672 |
2027 |
1673 Label reguard; |
2028 Label reguard; |
1674 Label reguard_done; |
2029 Label reguard_done; |
1675 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); |
2030 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); |
1676 __ jcc(Assembler::equal, reguard); |
2031 __ jcc(Assembler::equal, reguard); |
1744 __ movptr(rax, Address(rax, 0)); |
2099 __ movptr(rax, Address(rax, 0)); |
1745 __ bind(L); |
2100 __ bind(L); |
1746 __ verify_oop(rax); |
2101 __ verify_oop(rax); |
1747 } |
2102 } |
1748 |
2103 |
1749 // reset handle block |
2104 if (!is_critical_native) { |
1750 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset())); |
2105 // reset handle block |
1751 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); |
2106 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset())); |
|
2107 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); |
|
2108 } |
1752 |
2109 |
1753 // pop our frame |
2110 // pop our frame |
1754 |
2111 |
1755 __ leave(); |
2112 __ leave(); |
1756 |
2113 |
1757 // Any exception pending? |
2114 if (!is_critical_native) { |
1758 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); |
2115 // Any exception pending? |
1759 __ jcc(Assembler::notEqual, exception_pending); |
2116 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); |
|
2117 __ jcc(Assembler::notEqual, exception_pending); |
|
2118 } |
1760 |
2119 |
1761 // Return |
2120 // Return |
1762 |
2121 |
1763 __ ret(0); |
2122 __ ret(0); |
1764 |
2123 |
1765 // Unexpected paths are out of line and go here |
2124 // Unexpected paths are out of line and go here |
1766 |
2125 |
1767 // forward the exception |
2126 if (!is_critical_native) { |
1768 __ bind(exception_pending); |
2127 // forward the exception |
1769 |
2128 __ bind(exception_pending); |
1770 // and forward the exception |
2129 |
1771 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
2130 // and forward the exception |
1772 |
2131 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
|
2132 } |
1773 |
2133 |
1774 // Slow path locking & unlocking |
2134 // Slow path locking & unlocking |
1775 if (method->is_synchronized()) { |
2135 if (method->is_synchronized()) { |
1776 |
2136 |
1777 // BEGIN Slow path lock |
2137 // BEGIN Slow path lock |