hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
changeset 11637 030466036615
parent 10981 31a7be6a8ef8
child 11962 42ae1f21ed2b
equal deleted inserted replaced
11636:3c07b54482a5 11637:030466036615
     1 /*
     1 /*
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
   936       __ movq(dst.first()->as_Register(), src.first()->as_Register());
   936       __ movq(dst.first()->as_Register(), src.first()->as_Register());
   937     }
   937     }
   938   }
   938   }
   939 }
   939 }
   940 
   940 
       
   941 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
       
   942   if (src.first()->is_stack()) {
       
   943     if (dst.first()->is_stack()) {
       
   944       // stack to stack
       
   945       __ movq(rax, Address(rbp, reg2offset_in(src.first())));
       
   946       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
       
   947     } else {
       
   948       // stack to reg
       
   949       __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
       
   950     }
       
   951   } else if (dst.first()->is_stack()) {
       
   952     // reg to stack
       
   953     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
       
   954   } else {
       
   955     if (dst.first() != src.first()) {
       
   956       __ movq(dst.first()->as_Register(), src.first()->as_Register());
       
   957     }
       
   958   }
       
   959 }
   941 
   960 
   942 // An oop arg. Must pass a handle not the oop itself
   961 // An oop arg. Must pass a handle not the oop itself
   943 static void object_move(MacroAssembler* masm,
   962 static void object_move(MacroAssembler* masm,
   944                         OopMap* map,
   963                         OopMap* map,
   945                         int oop_handle_offset,
   964                         int oop_handle_offset,
  1150         __ addptr(rsp, 2*wordSize);
  1169         __ addptr(rsp, 2*wordSize);
  1151       }
  1170       }
  1152     }
  1171     }
  1153 }
  1172 }
  1154 
  1173 
       
  1174 
       
  1175 static void save_or_restore_arguments(MacroAssembler* masm,
       
  1176                                       const int stack_slots,
       
  1177                                       const int total_in_args,
       
  1178                                       const int arg_save_area,
       
  1179                                       OopMap* map,
       
  1180                                       VMRegPair* in_regs,
       
  1181                                       BasicType* in_sig_bt) {
       
  1182   // if map is non-NULL then the code should store the values,
       
  1183   // otherwise it should load them.
       
  1184   int handle_index = 0;
       
  1185   // Save down double word first
       
  1186   for ( int i = 0; i < total_in_args; i++) {
       
  1187     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
       
  1188       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
       
  1189       int offset = slot * VMRegImpl::stack_slot_size;
       
  1190       handle_index += 2;
       
  1191       assert(handle_index <= stack_slots, "overflow");
       
  1192       if (map != NULL) {
       
  1193         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
       
  1194       } else {
       
  1195         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
       
  1196       }
       
  1197     }
       
  1198     if (in_regs[i].first()->is_Register() &&
       
  1199         (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
       
  1200       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
       
  1201       int offset = slot * VMRegImpl::stack_slot_size;
       
  1202       handle_index += 2;
       
  1203       assert(handle_index <= stack_slots, "overflow");
       
  1204       if (map != NULL) {
       
  1205         __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
       
  1206         if (in_sig_bt[i] == T_ARRAY) {
       
  1207           map->set_oop(VMRegImpl::stack2reg(slot));;
       
  1208         }
       
  1209       } else {
       
  1210         __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
       
  1211       }
       
  1212     }
       
  1213   }
       
  1214   // Save or restore single word registers
       
  1215   for ( int i = 0; i < total_in_args; i++) {
       
  1216     if (in_regs[i].first()->is_Register()) {
       
  1217       int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
       
  1218       int offset = slot * VMRegImpl::stack_slot_size;
       
  1219       assert(handle_index <= stack_slots, "overflow");
       
  1220 
       
  1221       // Value is in an input register pass we must flush it to the stack
       
  1222       const Register reg = in_regs[i].first()->as_Register();
       
  1223       switch (in_sig_bt[i]) {
       
  1224         case T_BOOLEAN:
       
  1225         case T_CHAR:
       
  1226         case T_BYTE:
       
  1227         case T_SHORT:
       
  1228         case T_INT:
       
  1229           if (map != NULL) {
       
  1230             __ movl(Address(rsp, offset), reg);
       
  1231           } else {
       
  1232             __ movl(reg, Address(rsp, offset));
       
  1233           }
       
  1234           break;
       
  1235         case T_ARRAY:
       
  1236         case T_LONG:
       
  1237           // handled above
       
  1238           break;
       
  1239         case T_OBJECT:
       
  1240         default: ShouldNotReachHere();
       
  1241       }
       
  1242     } else if (in_regs[i].first()->is_XMMRegister()) {
       
  1243       if (in_sig_bt[i] == T_FLOAT) {
       
  1244         int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
       
  1245         int offset = slot * VMRegImpl::stack_slot_size;
       
  1246         assert(handle_index <= stack_slots, "overflow");
       
  1247         if (map != NULL) {
       
  1248           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
       
  1249         } else {
       
  1250           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
       
  1251         }
       
  1252       }
       
  1253     } else if (in_regs[i].first()->is_stack()) {
       
  1254       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
       
  1255         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
       
  1256         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
       
  1257       }
       
  1258     }
       
  1259   }
       
  1260 }
       
  1261 
       
  1262 
       
  1263 // Check GC_locker::needs_gc and enter the runtime if it's true.  This
       
  1264 // keeps a new JNI critical region from starting until a GC has been
       
  1265 // forced.  Save down any oops in registers and describe them in an
       
  1266 // OopMap.
       
  1267 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
       
  1268                                                int stack_slots,
       
  1269                                                int total_c_args,
       
  1270                                                int total_in_args,
       
  1271                                                int arg_save_area,
       
  1272                                                OopMapSet* oop_maps,
       
  1273                                                VMRegPair* in_regs,
       
  1274                                                BasicType* in_sig_bt) {
       
  1275   __ block_comment("check GC_locker::needs_gc");
       
  1276   Label cont;
       
  1277   __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
       
  1278   __ jcc(Assembler::equal, cont);
       
  1279 
       
  1280   // Save down any incoming oops and call into the runtime to halt for a GC
       
  1281 
       
  1282   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
       
  1283   save_or_restore_arguments(masm, stack_slots, total_in_args,
       
  1284                             arg_save_area, map, in_regs, in_sig_bt);
       
  1285 
       
  1286   address the_pc = __ pc();
       
  1287   oop_maps->add_gc_map( __ offset(), map);
       
  1288   __ set_last_Java_frame(rsp, noreg, the_pc);
       
  1289 
       
  1290   __ block_comment("block_for_jni_critical");
       
  1291   __ movptr(c_rarg0, r15_thread);
       
  1292   __ mov(r12, rsp); // remember sp
       
  1293   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
       
  1294   __ andptr(rsp, -16); // align stack as required by ABI
       
  1295   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
       
  1296   __ mov(rsp, r12); // restore sp
       
  1297   __ reinit_heapbase();
       
  1298 
       
  1299   __ reset_last_Java_frame(false, true);
       
  1300 
       
  1301   save_or_restore_arguments(masm, stack_slots, total_in_args,
       
  1302                             arg_save_area, NULL, in_regs, in_sig_bt);
       
  1303 
       
  1304   __ bind(cont);
       
  1305 #ifdef ASSERT
       
  1306   if (StressCriticalJNINatives) {
       
  1307     // Stress register saving
       
  1308     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
       
  1309     save_or_restore_arguments(masm, stack_slots, total_in_args,
       
  1310                               arg_save_area, map, in_regs, in_sig_bt);
       
  1311     // Destroy argument registers
       
  1312     for (int i = 0; i < total_in_args - 1; i++) {
       
  1313       if (in_regs[i].first()->is_Register()) {
       
  1314         const Register reg = in_regs[i].first()->as_Register();
       
  1315         __ xorptr(reg, reg);
       
  1316       } else if (in_regs[i].first()->is_XMMRegister()) {
       
  1317         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
       
  1318       } else if (in_regs[i].first()->is_FloatRegister()) {
       
  1319         ShouldNotReachHere();
       
  1320       } else if (in_regs[i].first()->is_stack()) {
       
  1321         // Nothing to do
       
  1322       } else {
       
  1323         ShouldNotReachHere();
       
  1324       }
       
  1325       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
       
  1326         i++;
       
  1327       }
       
  1328     }
       
  1329 
       
  1330     save_or_restore_arguments(masm, stack_slots, total_in_args,
       
  1331                               arg_save_area, NULL, in_regs, in_sig_bt);
       
  1332   }
       
  1333 #endif
       
  1334 }
       
  1335 
       
  1336 // Unpack an array argument into a pointer to the body and the length
       
  1337 // if the array is non-null, otherwise pass 0 for both.
       
  1338 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
       
  1339   Register tmp_reg = rax;
       
  1340   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
       
  1341          "possible collision");
       
  1342   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
       
  1343          "possible collision");
       
  1344 
       
  1345   // Pass the length, ptr pair
       
  1346   Label is_null, done;
       
  1347   VMRegPair tmp;
       
  1348   tmp.set_ptr(tmp_reg->as_VMReg());
       
  1349   if (reg.first()->is_stack()) {
       
  1350     // Load the arg up from the stack
       
  1351     move_ptr(masm, reg, tmp);
       
  1352     reg = tmp;
       
  1353   }
       
  1354   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
       
  1355   __ jccb(Assembler::equal, is_null);
       
  1356   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
       
  1357   move_ptr(masm, tmp, body_arg);
       
  1358   // load the length relative to the body.
       
  1359   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
       
  1360                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
       
  1361   move32_64(masm, tmp, length_arg);
       
  1362   __ jmpb(done);
       
  1363   __ bind(is_null);
       
  1364   // Pass zeros
       
  1365   __ xorptr(tmp_reg, tmp_reg);
       
  1366   move_ptr(masm, tmp, body_arg);
       
  1367   move32_64(masm, tmp, length_arg);
       
  1368   __ bind(done);
       
  1369 }
       
  1370 
  1155 // ---------------------------------------------------------------------------
  1371 // ---------------------------------------------------------------------------
  1156 // Generate a native wrapper for a given method.  The method takes arguments
  1372 // Generate a native wrapper for a given method.  The method takes arguments
  1157 // in the Java compiled code convention, marshals them to the native
  1373 // in the Java compiled code convention, marshals them to the native
  1158 // convention (handlizes oops, etc), transitions to native, makes the call,
  1374 // convention (handlizes oops, etc), transitions to native, makes the call,
  1159 // returns to java state (possibly blocking), unhandlizes any result and
  1375 // returns to java state (possibly blocking), unhandlizes any result and
  1164                                                 int total_in_args,
  1380                                                 int total_in_args,
  1165                                                 int comp_args_on_stack,
  1381                                                 int comp_args_on_stack,
  1166                                                 BasicType *in_sig_bt,
  1382                                                 BasicType *in_sig_bt,
  1167                                                 VMRegPair *in_regs,
  1383                                                 VMRegPair *in_regs,
  1168                                                 BasicType ret_type) {
  1384                                                 BasicType ret_type) {
  1169   // Native nmethod wrappers never take possesion of the oop arguments.
  1385   bool is_critical_native = true;
  1170   // So the caller will gc the arguments. The only thing we need an
  1386   address native_func = method->critical_native_function();
  1171   // oopMap for is if the call is static
  1387   if (native_func == NULL) {
  1172   //
  1388     native_func = method->native_function();
       
  1389     is_critical_native = false;
       
  1390   }
       
  1391   assert(native_func != NULL, "must have function");
       
  1392 
  1173   // An OopMap for lock (and class if static)
  1393   // An OopMap for lock (and class if static)
  1174   OopMapSet *oop_maps = new OopMapSet();
  1394   OopMapSet *oop_maps = new OopMapSet();
  1175   intptr_t start = (intptr_t)__ pc();
  1395   intptr_t start = (intptr_t)__ pc();
  1176 
  1396 
  1177   // We have received a description of where all the java arg are located
  1397   // We have received a description of where all the java arg are located
  1178   // on entry to the wrapper. We need to convert these args to where
  1398   // on entry to the wrapper. We need to convert these args to where
  1179   // the jni function will expect them. To figure out where they go
  1399   // the jni function will expect them. To figure out where they go
  1180   // we convert the java signature to a C signature by inserting
  1400   // we convert the java signature to a C signature by inserting
  1181   // the hidden arguments as arg[0] and possibly arg[1] (static method)
  1401   // the hidden arguments as arg[0] and possibly arg[1] (static method)
  1182 
  1402 
  1183   int total_c_args = total_in_args + 1;
  1403   int total_c_args = total_in_args;
  1184   if (method->is_static()) {
  1404   if (!is_critical_native) {
  1185     total_c_args++;
  1405     total_c_args += 1;
       
  1406     if (method->is_static()) {
       
  1407       total_c_args++;
       
  1408     }
       
  1409   } else {
       
  1410     for (int i = 0; i < total_in_args; i++) {
       
  1411       if (in_sig_bt[i] == T_ARRAY) {
       
  1412         total_c_args++;
       
  1413       }
       
  1414     }
  1186   }
  1415   }
  1187 
  1416 
  1188   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
  1417   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
  1189   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
  1418   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
       
  1419   BasicType* in_elem_bt = NULL;
  1190 
  1420 
  1191   int argc = 0;
  1421   int argc = 0;
  1192   out_sig_bt[argc++] = T_ADDRESS;
  1422   if (!is_critical_native) {
  1193   if (method->is_static()) {
  1423     out_sig_bt[argc++] = T_ADDRESS;
  1194     out_sig_bt[argc++] = T_OBJECT;
  1424     if (method->is_static()) {
  1195   }
  1425       out_sig_bt[argc++] = T_OBJECT;
  1196 
  1426     }
  1197   for (int i = 0; i < total_in_args ; i++ ) {
  1427 
  1198     out_sig_bt[argc++] = in_sig_bt[i];
  1428     for (int i = 0; i < total_in_args ; i++ ) {
       
  1429       out_sig_bt[argc++] = in_sig_bt[i];
       
  1430     }
       
  1431   } else {
       
  1432     Thread* THREAD = Thread::current();
       
  1433     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
       
  1434     SignatureStream ss(method->signature());
       
  1435     for (int i = 0; i < total_in_args ; i++ ) {
       
  1436       if (in_sig_bt[i] == T_ARRAY) {
       
  1437         // Arrays are passed as int, elem* pair
       
  1438         out_sig_bt[argc++] = T_INT;
       
  1439         out_sig_bt[argc++] = T_ADDRESS;
       
  1440         Symbol* atype = ss.as_symbol(CHECK_NULL);
       
  1441         const char* at = atype->as_C_string();
       
  1442         if (strlen(at) == 2) {
       
  1443           assert(at[0] == '[', "must be");
       
  1444           switch (at[1]) {
       
  1445             case 'B': in_elem_bt[i]  = T_BYTE; break;
       
  1446             case 'C': in_elem_bt[i]  = T_CHAR; break;
       
  1447             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
       
  1448             case 'F': in_elem_bt[i]  = T_FLOAT; break;
       
  1449             case 'I': in_elem_bt[i]  = T_INT; break;
       
  1450             case 'J': in_elem_bt[i]  = T_LONG; break;
       
  1451             case 'S': in_elem_bt[i]  = T_SHORT; break;
       
  1452             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
       
  1453             default: ShouldNotReachHere();
       
  1454           }
       
  1455         }
       
  1456       } else {
       
  1457         out_sig_bt[argc++] = in_sig_bt[i];
       
  1458         in_elem_bt[i] = T_VOID;
       
  1459       }
       
  1460       if (in_sig_bt[i] != T_VOID) {
       
  1461         assert(in_sig_bt[i] == ss.type(), "must match");
       
  1462         ss.next();
       
  1463       }
       
  1464     }
  1199   }
  1465   }
  1200 
  1466 
  1201   // Now figure out where the args must be stored and how much stack space
  1467   // Now figure out where the args must be stored and how much stack space
  1202   // they require.
  1468   // they require.
  1203   //
       
  1204   int out_arg_slots;
  1469   int out_arg_slots;
  1205   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
  1470   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
  1206 
  1471 
  1207   // Compute framesize for the wrapper.  We need to handlize all oops in
  1472   // Compute framesize for the wrapper.  We need to handlize all oops in
  1208   // incoming registers
  1473   // incoming registers
  1211 
  1476 
  1212   // First count the abi requirement plus all of the outgoing args
  1477   // First count the abi requirement plus all of the outgoing args
  1213   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
  1478   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
  1214 
  1479 
  1215   // Now the space for the inbound oop handle area
  1480   // Now the space for the inbound oop handle area
       
  1481   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
       
  1482   if (is_critical_native) {
       
  1483     // Critical natives may have to call out so they need a save area
       
  1484     // for register arguments.
       
  1485     int double_slots = 0;
       
  1486     int single_slots = 0;
       
  1487     for ( int i = 0; i < total_in_args; i++) {
       
  1488       if (in_regs[i].first()->is_Register()) {
       
  1489         const Register reg = in_regs[i].first()->as_Register();
       
  1490         switch (in_sig_bt[i]) {
       
  1491           case T_ARRAY:
       
  1492           case T_BOOLEAN:
       
  1493           case T_BYTE:
       
  1494           case T_SHORT:
       
  1495           case T_CHAR:
       
  1496           case T_INT:  single_slots++; break;
       
  1497           case T_LONG: double_slots++; break;
       
  1498           default:  ShouldNotReachHere();
       
  1499         }
       
  1500       } else if (in_regs[i].first()->is_XMMRegister()) {
       
  1501         switch (in_sig_bt[i]) {
       
  1502           case T_FLOAT:  single_slots++; break;
       
  1503           case T_DOUBLE: double_slots++; break;
       
  1504           default:  ShouldNotReachHere();
       
  1505         }
       
  1506       } else if (in_regs[i].first()->is_FloatRegister()) {
       
  1507         ShouldNotReachHere();
       
  1508       }
       
  1509     }
       
  1510     total_save_slots = double_slots * 2 + single_slots;
       
  1511     // align the save area
       
  1512     if (double_slots != 0) {
       
  1513       stack_slots = round_to(stack_slots, 2);
       
  1514     }
       
  1515   }
  1216 
  1516 
  1217   int oop_handle_offset = stack_slots;
  1517   int oop_handle_offset = stack_slots;
  1218   stack_slots += 6*VMRegImpl::slots_per_word;
  1518   stack_slots += total_save_slots;
  1219 
  1519 
  1220   // Now any space we need for handlizing a klass if static method
  1520   // Now any space we need for handlizing a klass if static method
  1221 
  1521 
  1222   int oop_temp_slot_offset = 0;
       
  1223   int klass_slot_offset = 0;
  1522   int klass_slot_offset = 0;
  1224   int klass_offset = -1;
  1523   int klass_offset = -1;
  1225   int lock_slot_offset = 0;
  1524   int lock_slot_offset = 0;
  1226   bool is_static = false;
  1525   bool is_static = false;
  1227 
  1526 
  1270   // stack properly aligned.
  1569   // stack properly aligned.
  1271   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
  1570   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
  1272 
  1571 
  1273   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
  1572   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
  1274 
  1573 
  1275 
       
  1276   // First thing make an ic check to see if we should even be here
  1574   // First thing make an ic check to see if we should even be here
  1277 
  1575 
  1278   // We are free to use all registers as temps without saving them and
  1576   // We are free to use all registers as temps without saving them and
  1279   // restoring them except rbp. rbp is the only callee save register
  1577   // restoring them except rbp. rbp is the only callee save register
  1280   // as far as the interpreter and the compiler(s) are concerned.
  1578   // as far as the interpreter and the compiler(s) are concerned.
  1281 
  1579 
  1282 
  1580 
  1283   const Register ic_reg = rax;
  1581   const Register ic_reg = rax;
  1284   const Register receiver = j_rarg0;
  1582   const Register receiver = j_rarg0;
  1285 
  1583 
  1286   Label ok;
  1584   Label hit;
  1287   Label exception_pending;
  1585   Label exception_pending;
  1288 
  1586 
  1289   assert_different_registers(ic_reg, receiver, rscratch1);
  1587   assert_different_registers(ic_reg, receiver, rscratch1);
  1290   __ verify_oop(receiver);
  1588   __ verify_oop(receiver);
  1291   __ load_klass(rscratch1, receiver);
  1589   __ load_klass(rscratch1, receiver);
  1292   __ cmpq(ic_reg, rscratch1);
  1590   __ cmpq(ic_reg, rscratch1);
  1293   __ jcc(Assembler::equal, ok);
  1591   __ jcc(Assembler::equal, hit);
  1294 
  1592 
  1295   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  1593   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  1296 
       
  1297   __ bind(ok);
       
  1298 
  1594 
  1299   // Verified entry point must be aligned
  1595   // Verified entry point must be aligned
  1300   __ align(8);
  1596   __ align(8);
       
  1597 
       
  1598   __ bind(hit);
  1301 
  1599 
  1302   int vep_offset = ((intptr_t)__ pc()) - start;
  1600   int vep_offset = ((intptr_t)__ pc()) - start;
  1303 
  1601 
  1304   // The instruction at the verified entry point must be 5 bytes or longer
  1602   // The instruction at the verified entry point must be 5 bytes or longer
  1305   // because it can be patched on the fly by make_non_entrant. The stack bang
  1603   // because it can be patched on the fly by make_non_entrant. The stack bang
  1317   // Generate a new frame for the wrapper.
  1615   // Generate a new frame for the wrapper.
  1318   __ enter();
  1616   __ enter();
  1319   // -2 because return address is already present and so is saved rbp
  1617   // -2 because return address is already present and so is saved rbp
  1320   __ subptr(rsp, stack_size - 2*wordSize);
  1618   __ subptr(rsp, stack_size - 2*wordSize);
  1321 
  1619 
  1322     // Frame is now completed as far as size and linkage.
  1620   // Frame is now completed as far as size and linkage.
  1323 
  1621   int frame_complete = ((intptr_t)__ pc()) - start;
  1324     int frame_complete = ((intptr_t)__ pc()) - start;
       
  1325 
  1622 
  1326 #ifdef ASSERT
  1623 #ifdef ASSERT
  1327     {
  1624     {
  1328       Label L;
  1625       Label L;
  1329       __ mov(rax, rsp);
  1626       __ mov(rax, rsp);
  1339   // We use r14 as the oop handle for the receiver/klass
  1636   // We use r14 as the oop handle for the receiver/klass
  1340   // It is callee save so it survives the call to native
  1637   // It is callee save so it survives the call to native
  1341 
  1638 
  1342   const Register oop_handle_reg = r14;
  1639   const Register oop_handle_reg = r14;
  1343 
  1640 
  1344 
  1641   if (is_critical_native) {
       
  1642     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
       
  1643                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
       
  1644   }
  1345 
  1645 
  1346   //
  1646   //
  1347   // We immediately shuffle the arguments so that any vm call we have to
  1647   // We immediately shuffle the arguments so that any vm call we have to
  1348   // make from here on out (sync slow path, jvmti, etc.) we will have
  1648   // make from here on out (sync slow path, jvmti, etc.) we will have
  1349   // captured the oops from our caller and have a valid oopMap for
  1649   // captured the oops from our caller and have a valid oopMap for
  1388     freg_destroyed[f] = false;
  1688     freg_destroyed[f] = false;
  1389   }
  1689   }
  1390 
  1690 
  1391 #endif /* ASSERT */
  1691 #endif /* ASSERT */
  1392 
  1692 
  1393 
  1693   if (is_critical_native) {
       
  1694     // The mapping of Java and C arguments passed in registers are
       
  1695     // rotated by one, which helps when passing arguments to regular
       
  1696     // Java method but for critical natives that creates a cycle which
       
  1697     // can cause arguments to be killed before they are used.  Break
       
  1698     // the cycle by moving the first argument into a temporary
       
  1699     // register.
       
  1700     for (int i = 0; i < total_c_args; i++) {
       
  1701       if (in_regs[i].first()->is_Register() &&
       
  1702           in_regs[i].first()->as_Register() == rdi) {
       
  1703         __ mov(rbx, rdi);
       
  1704         in_regs[i].set1(rbx->as_VMReg());
       
  1705       }
       
  1706     }
       
  1707   }
       
  1708 
       
  1709   // This may iterate in two different directions depending on the
       
  1710   // kind of native it is.  The reason is that for regular JNI natives
       
  1711   // the incoming and outgoing registers are offset upwards and for
       
  1712   // critical natives they are offset down.
  1394   int c_arg = total_c_args - 1;
  1713   int c_arg = total_c_args - 1;
  1395   for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
  1714   int stride = -1;
       
  1715   int init = total_in_args - 1;
       
  1716   if (is_critical_native) {
       
  1717     // stride forwards
       
  1718     c_arg = 0;
       
  1719     stride = 1;
       
  1720     init = 0;
       
  1721   }
       
  1722   for (int i = init, count = 0; count < total_in_args; i += stride, c_arg += stride, count++ ) {
  1396 #ifdef ASSERT
  1723 #ifdef ASSERT
  1397     if (in_regs[i].first()->is_Register()) {
  1724     if (in_regs[i].first()->is_Register()) {
  1398       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
  1725       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
  1399     } else if (in_regs[i].first()->is_XMMRegister()) {
  1726     } else if (in_regs[i].first()->is_XMMRegister()) {
  1400       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
  1727       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
  1405       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
  1732       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
  1406     }
  1733     }
  1407 #endif /* ASSERT */
  1734 #endif /* ASSERT */
  1408     switch (in_sig_bt[i]) {
  1735     switch (in_sig_bt[i]) {
  1409       case T_ARRAY:
  1736       case T_ARRAY:
       
  1737         if (is_critical_native) {
       
  1738           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
       
  1739           c_arg++;
       
  1740 #ifdef ASSERT
       
  1741           if (out_regs[c_arg].first()->is_Register()) {
       
  1742             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
       
  1743           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
       
  1744             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
       
  1745           }
       
  1746 #endif
       
  1747           break;
       
  1748         }
  1410       case T_OBJECT:
  1749       case T_OBJECT:
       
  1750         assert(!is_critical_native, "no oop arguments");
  1411         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
  1751         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
  1412                     ((i == 0) && (!is_static)),
  1752                     ((i == 0) && (!is_static)),
  1413                     &receiver_offset);
  1753                     &receiver_offset);
  1414         break;
  1754         break;
  1415       case T_VOID:
  1755       case T_VOID:
  1441   // need to spill before we call out
  1781   // need to spill before we call out
  1442   c_arg++;
  1782   c_arg++;
  1443 
  1783 
  1444   // Pre-load a static method's oop into r14.  Used both by locking code and
  1784   // Pre-load a static method's oop into r14.  Used both by locking code and
  1445   // the normal JNI call code.
  1785   // the normal JNI call code.
  1446   if (method->is_static()) {
  1786   if (method->is_static() && !is_critical_native) {
  1447 
  1787 
  1448     //  load oop into a register
  1788     //  load oop into a register
  1449     __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
  1789     __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
  1450 
  1790 
  1451     // Now handlize the static class mirror it's known not-null.
  1791     // Now handlize the static class mirror it's known not-null.
  1507 
  1847 
  1508   Label slow_path_lock;
  1848   Label slow_path_lock;
  1509   Label lock_done;
  1849   Label lock_done;
  1510 
  1850 
  1511   if (method->is_synchronized()) {
  1851   if (method->is_synchronized()) {
       
  1852     assert(!is_critical_native, "unhandled");
  1512 
  1853 
  1513 
  1854 
  1514     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
  1855     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
  1515 
  1856 
  1516     // Get the handle (the 2nd argument)
  1857     // Get the handle (the 2nd argument)
  1570 
  1911 
  1571   // Finally just about ready to make the JNI call
  1912   // Finally just about ready to make the JNI call
  1572 
  1913 
  1573 
  1914 
  1574   // get JNIEnv* which is first argument to native
  1915   // get JNIEnv* which is first argument to native
  1575 
  1916   if (!is_critical_native) {
  1576   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
  1917     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
       
  1918   }
  1577 
  1919 
  1578   // Now set thread in native
  1920   // Now set thread in native
  1579   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
  1921   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
  1580 
  1922 
  1581   __ call(RuntimeAddress(method->native_function()));
  1923   __ call(RuntimeAddress(native_func));
  1582 
  1924 
  1583     // Either restore the MXCSR register after returning from the JNI Call
  1925     // Either restore the MXCSR register after returning from the JNI Call
  1584     // or verify that it wasn't changed.
  1926     // or verify that it wasn't changed.
  1585     if (RestoreMXCSROnJNICalls) {
  1927     if (RestoreMXCSROnJNICalls) {
  1586       __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
  1928       __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
  1632       // due to cache line collision.
  1974       // due to cache line collision.
  1633       __ serialize_memory(r15_thread, rcx);
  1975       __ serialize_memory(r15_thread, rcx);
  1634     }
  1976     }
  1635   }
  1977   }
  1636 
  1978 
       
  1979   Label after_transition;
  1637 
  1980 
  1638   // check for safepoint operation in progress and/or pending suspend requests
  1981   // check for safepoint operation in progress and/or pending suspend requests
  1639   {
  1982   {
  1640     Label Continue;
  1983     Label Continue;
  1641 
  1984 
  1657     save_native_result(masm, ret_type, stack_slots);
  2000     save_native_result(masm, ret_type, stack_slots);
  1658     __ mov(c_rarg0, r15_thread);
  2001     __ mov(c_rarg0, r15_thread);
  1659     __ mov(r12, rsp); // remember sp
  2002     __ mov(r12, rsp); // remember sp
  1660     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  2003     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  1661     __ andptr(rsp, -16); // align stack as required by ABI
  2004     __ andptr(rsp, -16); // align stack as required by ABI
  1662     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
  2005     if (!is_critical_native) {
       
  2006       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
       
  2007     } else {
       
  2008       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
       
  2009     }
  1663     __ mov(rsp, r12); // restore sp
  2010     __ mov(rsp, r12); // restore sp
  1664     __ reinit_heapbase();
  2011     __ reinit_heapbase();
  1665     // Restore any method result value
  2012     // Restore any method result value
  1666     restore_native_result(masm, ret_type, stack_slots);
  2013     restore_native_result(masm, ret_type, stack_slots);
       
  2014 
       
  2015     if (is_critical_native) {
       
  2016       // The call above performed the transition to thread_in_Java so
       
  2017       // skip the transition logic below.
       
  2018       __ jmpb(after_transition);
       
  2019     }
       
  2020 
  1667     __ bind(Continue);
  2021     __ bind(Continue);
  1668   }
  2022   }
  1669 
  2023 
  1670   // change thread state
  2024   // change thread state
  1671   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
  2025   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
       
  2026   __ bind(after_transition);
  1672 
  2027 
  1673   Label reguard;
  2028   Label reguard;
  1674   Label reguard_done;
  2029   Label reguard_done;
  1675   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
  2030   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
  1676   __ jcc(Assembler::equal, reguard);
  2031   __ jcc(Assembler::equal, reguard);
  1744       __ movptr(rax, Address(rax, 0));
  2099       __ movptr(rax, Address(rax, 0));
  1745       __ bind(L);
  2100       __ bind(L);
  1746       __ verify_oop(rax);
  2101       __ verify_oop(rax);
  1747   }
  2102   }
  1748 
  2103 
  1749   // reset handle block
  2104   if (!is_critical_native) {
  1750   __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
  2105     // reset handle block
  1751   __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
  2106     __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
       
  2107     __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
       
  2108   }
  1752 
  2109 
  1753   // pop our frame
  2110   // pop our frame
  1754 
  2111 
  1755   __ leave();
  2112   __ leave();
  1756 
  2113 
  1757   // Any exception pending?
  2114   if (!is_critical_native) {
  1758   __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
  2115     // Any exception pending?
  1759   __ jcc(Assembler::notEqual, exception_pending);
  2116     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
       
  2117     __ jcc(Assembler::notEqual, exception_pending);
       
  2118   }
  1760 
  2119 
  1761   // Return
  2120   // Return
  1762 
  2121 
  1763   __ ret(0);
  2122   __ ret(0);
  1764 
  2123 
  1765   // Unexpected paths are out of line and go here
  2124   // Unexpected paths are out of line and go here
  1766 
  2125 
  1767   // forward the exception
  2126   if (!is_critical_native) {
  1768   __ bind(exception_pending);
  2127     // forward the exception
  1769 
  2128     __ bind(exception_pending);
  1770   // and forward the exception
  2129 
  1771   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
  2130     // and forward the exception
  1772 
  2131     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
       
  2132   }
  1773 
  2133 
  1774   // Slow path locking & unlocking
  2134   // Slow path locking & unlocking
  1775   if (method->is_synchronized()) {
  2135   if (method->is_synchronized()) {
  1776 
  2136 
  1777     // BEGIN Slow path lock
  2137     // BEGIN Slow path lock
  1874                                             frame_complete,
  2234                                             frame_complete,
  1875                                             stack_slots / VMRegImpl::slots_per_word,
  2235                                             stack_slots / VMRegImpl::slots_per_word,
  1876                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
  2236                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
  1877                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
  2237                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
  1878                                             oop_maps);
  2238                                             oop_maps);
       
  2239 
       
  2240   if (is_critical_native) {
       
  2241     nm->set_lazy_critical_native(true);
       
  2242   }
       
  2243 
  1879   return nm;
  2244   return nm;
  1880 
  2245 
  1881 }
  2246 }
  1882 
  2247 
  1883 #ifdef HAVE_DTRACE_H
  2248 #ifdef HAVE_DTRACE_H