64 // clobbered, so we must protect it around the CPUID. |
64 // clobbered, so we must protect it around the CPUID. |
65 __asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory"); |
65 __asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory"); |
66 #endif |
66 #endif |
67 } |
67 } |
68 |
68 |
69 template<> |
|
70 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> |
|
71 { |
|
72 template <typename T> |
|
73 void operator()(T v, volatile T* p) const { |
|
74 __asm__ volatile ( "xchgb (%2),%0" |
|
75 : "=q" (v) |
|
76 : "0" (v), "r" (p) |
|
77 : "memory"); |
|
78 } |
|
79 }; |
|
80 |
|
81 template<> |
|
82 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> |
|
83 { |
|
84 template <typename T> |
|
85 void operator()(T v, volatile T* p) const { |
|
86 __asm__ volatile ( "xchgw (%2),%0" |
|
87 : "=r" (v) |
|
88 : "0" (v), "r" (p) |
|
89 : "memory"); |
|
90 } |
|
91 }; |
|
92 |
|
93 template<> |
|
94 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> |
|
95 { |
|
96 template <typename T> |
|
97 void operator()(T v, volatile T* p) const { |
|
98 __asm__ volatile ( "xchgl (%2),%0" |
|
99 : "=r" (v) |
|
100 : "0" (v), "r" (p) |
|
101 : "memory"); |
|
102 } |
|
103 }; |
|
104 |
|
105 #ifdef AMD64 |
|
106 template<> |
|
107 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> |
|
108 { |
|
109 template <typename T> |
|
110 void operator()(T v, volatile T* p) const { |
|
111 __asm__ volatile ( "xchgq (%2), %0" |
|
112 : "=r" (v) |
|
113 : "0" (v), "r" (p) |
|
114 : "memory"); |
|
115 } |
|
116 }; |
|
117 #endif // AMD64 |
|
118 |
|
119 #endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP |
69 #endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP |