hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il
changeset 1 489c9b5090e2
child 1659 b9a3819ac7c6
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Sat Dec 01 00:00:00 2007 +0000
@@ -0,0 +1,136 @@
+//
+// Copyright 2004-2007 Sun Microsystems, Inc.  All Rights Reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+// CA 95054 USA or visit www.sun.com if you need additional information or
+// have any questions.
+//  
+//
+
+  // The argument size of each inline directive is ignored by the compiler
+  // and is set to the number of arguments as documentation.
+
+  // Get the raw thread ID from %gs:0
+      .inline _raw_thread_id,0
+      movq     %fs:0, %rax 
+      .end
+
+  // Get the frame pointer from previous frame.
+      .inline _get_previous_fp,0
+      movq     %rbp, %rax 
+      movq     %rax, %rax 
+      .end
+
+  // Support for jint Atomic::add(jint add_value, volatile jint* dest)
+  // An additional bool (os::is_MP()) is passed as the last argument.
+      .inline _Atomic_add,3
+      movl     %edi, %eax      // save add_value for return
+      testl    %edx, %edx      // MP test
+      je       1f
+      lock
+1:    xaddl    %edi, (%rsi)
+      addl     %edi, %eax
+      .end
+
+  // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest)
+  // An additional bool (os::is_MP()) is passed as the last argument.
+      .inline _Atomic_add_long,3
+      movq     %rdi, %rax      // save add_value for return
+      testq    %rdx, %rdx      // MP test
+      je       1f
+      lock
+1:    xaddq    %rdi, (%rsi)
+      addq     %rdi, %rax
+      .end
+
+  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
+      .inline _Atomic_xchg,2
+      xchgl    (%rsi), %edi
+      movl     %edi, %eax
+      .end
+
+  // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest).
+      .inline _Atomic_xchg_long,2
+      xchgq    (%rsi), %rdi
+      movq     %rdi, %rax
+      .end
+
+  // Support for jint Atomic::cmpxchg(jint exchange_value, 
+  //                                  volatile jint *dest, 
+  //                                  jint compare_value)
+  // An additional bool (os::is_MP()) is passed as the last argument.
+      .inline _Atomic_cmpxchg,4
+      movl     %edx, %eax      // compare_value
+      testl    %ecx, %ecx      // MP test
+      je       1f
+      lock
+1:    cmpxchgl %edi, (%rsi)
+      .end
+
+  // Support for jlong Atomic::cmpxchg(jlong exchange_value,
+  //                                   volatile jlong* dest,
+  //                                   jlong compare_value)
+  // An additional bool (os::is_MP()) is passed as the last argument.
+      .inline _Atomic_cmpxchg_long,6
+      movq     %rdx, %rax      // compare_value
+      testq    %rcx, %rcx      // MP test
+      je       1f
+      lock
+1:    cmpxchgq %rdi, (%rsi)
+      .end
+
+  // Support for OrderAccess::acquire()
+      .inline _OrderAccess_acquire,0
+      movl     0(%rsp), %eax
+      .end
+
+  // Support for OrderAccess::fence()
+      .inline _OrderAccess_fence,0
+      lock
+      addl     $0, (%rsp)
+      .end
+
+  // Support for u2 Bytes::swap_u2(u2 x)
+      .inline _raw_swap_u2,1
+      movw     %di, %ax
+      rorw     $8, %ax
+      .end
+
+  // Support for u4 Bytes::swap_u4(u4 x)
+      .inline _raw_swap_u4,1
+      movl     %edi, %eax
+      bswapl   %eax
+      .end
+
+  // Support for u8 Bytes::swap_u8(u8 x)
+      .inline _raw_swap_u8,1
+      movq     %rdi, %rax
+      bswapq   %rax
+      .end
+
+  // Support for void Prefetch::read
+      .inline _Prefetch_read,2
+      prefetcht0 (%rdi, %rsi, 1)
+      .end
+
+  // Support for void Prefetch::write
+  // We use prefetcht0 because em64t doesn't support prefetchw.
+  // prefetchw is a 3dnow instruction.
+      .inline _Prefetch_write,2
+      prefetcht0 (%rdi, %rsi, 1)
+      .end