200 assert(Patching_lock->is_locked() || |
200 assert(Patching_lock->is_locked() || |
201 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); |
201 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); |
202 assert (instr_addr != NULL, "illegal address for code patching"); |
202 assert (instr_addr != NULL, "illegal address for code patching"); |
203 |
203 |
204 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call |
204 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call |
205 if (os::is_MP()) { |
205 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); |
206 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); |
|
207 } |
|
208 |
206 |
209 // First patch dummy jmp in place |
207 // First patch dummy jmp in place |
210 unsigned char patch[4]; |
208 unsigned char patch[4]; |
211 assert(sizeof(patch)==sizeof(jint), "sanity check"); |
209 assert(sizeof(patch)==sizeof(jint), "sanity check"); |
212 patch[0] = 0xEB; // jmp rel8 |
210 patch[0] = 0xEB; // jmp rel8 |
260 // Make sure patching code is locked. No two threads can patch at the same |
258 // Make sure patching code is locked. No two threads can patch at the same |
261 // time but one may be executing this code. |
259 // time but one may be executing this code. |
262 assert(Patching_lock->is_locked() || |
260 assert(Patching_lock->is_locked() || |
263 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); |
261 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); |
264 // Both C1 and C2 should now be generating code which aligns the patched address |
262 // Both C1 and C2 should now be generating code which aligns the patched address |
265 // to be within a single cache line except that C1 does not do the alignment on |
263 // to be within a single cache line. |
266 // uniprocessor systems. |
|
267 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == |
264 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == |
268 ((uintptr_t)displacement_address() + 3) / cache_line_size; |
265 ((uintptr_t)displacement_address() + 3) / cache_line_size; |
269 |
266 |
270 guarantee(!os::is_MP() || is_aligned, "destination must be aligned"); |
267 guarantee(is_aligned, "destination must be aligned"); |
271 |
268 |
272 if (is_aligned) { |
269 // The destination lies within a single cache line. |
273 // Simple case: The destination lies within a single cache line. |
270 set_destination(dest); |
274 set_destination(dest); |
|
275 } else if ((uintptr_t)instruction_address() / cache_line_size == |
|
276 ((uintptr_t)instruction_address()+1) / cache_line_size) { |
|
277 // Tricky case: The instruction prefix lies within a single cache line. |
|
278 intptr_t disp = dest - return_address(); |
|
279 #ifdef AMD64 |
|
280 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); |
|
281 #endif // AMD64 |
|
282 |
|
283 int call_opcode = instruction_address()[0]; |
|
284 |
|
285 // First patch dummy jump in place: |
|
286 { |
|
287 u_char patch_jump[2]; |
|
288 patch_jump[0] = 0xEB; // jmp rel8 |
|
289 patch_jump[1] = 0xFE; // jmp to self |
|
290 |
|
291 assert(sizeof(patch_jump)==sizeof(short), "sanity check"); |
|
292 *(short*)instruction_address() = *(short*)patch_jump; |
|
293 } |
|
294 // Invalidate. Opteron requires a flush after every write. |
|
295 wrote(0); |
|
296 |
|
297 // (Note: We assume any reader which has already started to read |
|
298 // the unpatched call will completely read the whole unpatched call |
|
299 // without seeing the next writes we are about to make.) |
|
300 |
|
301 // Next, patch the last three bytes: |
|
302 u_char patch_disp[5]; |
|
303 patch_disp[0] = call_opcode; |
|
304 *(int32_t*)&patch_disp[1] = (int32_t)disp; |
|
305 assert(sizeof(patch_disp)==instruction_size, "sanity check"); |
|
306 for (int i = sizeof(short); i < instruction_size; i++) |
|
307 instruction_address()[i] = patch_disp[i]; |
|
308 |
|
309 // Invalidate. Opteron requires a flush after every write. |
|
310 wrote(sizeof(short)); |
|
311 |
|
312 // (Note: We assume that any reader which reads the opcode we are |
|
313 // about to repatch will also read the writes we just made.) |
|
314 |
|
315 // Finally, overwrite the jump: |
|
316 *(short*)instruction_address() = *(short*)patch_disp; |
|
317 // Invalidate. Opteron requires a flush after every write. |
|
318 wrote(0); |
|
319 |
|
320 debug_only(verify()); |
|
321 guarantee(destination() == dest, "patch succeeded"); |
|
322 } else { |
|
323 // Impossible: One or the other must be atomically writable. |
|
324 ShouldNotReachHere(); |
|
325 } |
|
326 } |
271 } |
327 |
272 |
328 |
273 |
329 void NativeMovConstReg::verify() { |
274 void NativeMovConstReg::verify() { |
330 #ifdef AMD64 |
275 #ifdef AMD64 |