1 // Copyright (c) 2008 Google Inc. All rights reserved.
2 //
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
5 // met:
6 //
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
12 // distribution.
13 // * Neither the name of Google Inc. nor the name Chromium Embedded
14 // Framework nor the names of its contributors may be used to endorse
15 // or promote products derived from this software without specific prior
16 // written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 // Do not include this header file directly. Use base/cef_atomicops.h
31 // instead.
32
33 #ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_MSVC_H_
34 #define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_MSVC_H_
35
36 #include <windows.h>
37
38 #include <intrin.h>
39
40 #include "include/base/cef_macros.h"
41
42 #if defined(ARCH_CPU_64_BITS)
43 // windows.h #defines this (only on x64). This causes problems because the
44 // public API also uses MemoryBarrier at the public name for this fence. So, on
45 // X64, undef it, and call its documented
46 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
47 // implementation directly.
48 #undef MemoryBarrier
49 #endif
50
51 namespace base {
52 namespace subtle {
53
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
55 Atomic32 old_value,
56 Atomic32 new_value) {
57 LONG result = _InterlockedCompareExchange(
58 reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
59 static_cast<LONG>(old_value));
60 return static_cast<Atomic32>(result);
61 }
62
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)63 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
64 Atomic32 new_value) {
65 LONG result = _InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
66 static_cast<LONG>(new_value));
67 return static_cast<Atomic32>(result);
68 }
69
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)70 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
71 Atomic32 increment) {
72 return _InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
73 static_cast<LONG>(increment)) +
74 increment;
75 }
76
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)77 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
78 Atomic32 increment) {
79 return Barrier_AtomicIncrement(ptr, increment);
80 }
81
82 #if !(defined(_MSC_VER) && _MSC_VER >= 1400)
83 #error "We require at least vs2005 for MemoryBarrier"
84 #endif
MemoryBarrier()85 inline void MemoryBarrier() {
86 #if defined(ARCH_CPU_64_BITS)
87 // See #undef and note at the top of this file.
88 __faststorefence();
89 #else
90 // We use MemoryBarrier from WinNT.h
91 ::MemoryBarrier();
92 #endif
93 }
94
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)95 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
96 Atomic32 old_value,
97 Atomic32 new_value) {
98 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
99 }
100
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)101 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
102 Atomic32 old_value,
103 Atomic32 new_value) {
104 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
105 }
106
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)107 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
108 *ptr = value;
109 }
110
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)111 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
112 NoBarrier_AtomicExchange(ptr, value);
113 // acts as a barrier in this implementation
114 }
115
Release_Store(volatile Atomic32 * ptr,Atomic32 value)116 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
117 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
118 // See comments in Atomic64 version of Release_Store() below.
119 }
120
NoBarrier_Load(volatile const Atomic32 * ptr)121 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
122 return *ptr;
123 }
124
Acquire_Load(volatile const Atomic32 * ptr)125 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
126 Atomic32 value = *ptr;
127 return value;
128 }
129
Release_Load(volatile const Atomic32 * ptr)130 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
131 MemoryBarrier();
132 return *ptr;
133 }
134
135 #if defined(_WIN64)
136
137 // 64-bit low-level operations on 64-bit platform.
138
139 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
140
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)141 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
142 Atomic64 old_value,
143 Atomic64 new_value) {
144 PVOID result = InterlockedCompareExchangePointer(
145 reinterpret_cast<volatile PVOID*>(ptr),
146 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
147 return reinterpret_cast<Atomic64>(result);
148 }
149
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)150 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
151 Atomic64 new_value) {
152 PVOID result =
153 InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
154 reinterpret_cast<PVOID>(new_value));
155 return reinterpret_cast<Atomic64>(result);
156 }
157
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)158 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
159 Atomic64 increment) {
160 return InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
161 static_cast<LONGLONG>(increment)) +
162 increment;
163 }
164
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)165 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
166 Atomic64 increment) {
167 return Barrier_AtomicIncrement(ptr, increment);
168 }
169
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)170 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
171 *ptr = value;
172 }
173
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)174 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
175 NoBarrier_AtomicExchange(ptr, value);
176 // acts as a barrier in this implementation
177 }
178
Release_Store(volatile Atomic64 * ptr,Atomic64 value)179 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
180 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
181
182 // When new chips come out, check:
183 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
184 // System Programming Guide, Chatper 7: Multiple-processor management,
185 // Section 7.2, Memory Ordering.
186 // Last seen at:
187 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
188 }
189
NoBarrier_Load(volatile const Atomic64 * ptr)190 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
191 return *ptr;
192 }
193
Acquire_Load(volatile const Atomic64 * ptr)194 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
195 Atomic64 value = *ptr;
196 return value;
197 }
198
Release_Load(volatile const Atomic64 * ptr)199 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
200 MemoryBarrier();
201 return *ptr;
202 }
203
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)204 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
205 Atomic64 old_value,
206 Atomic64 new_value) {
207 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
208 }
209
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)210 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
211 Atomic64 old_value,
212 Atomic64 new_value) {
213 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
214 }
215
216 #endif // defined(_WIN64)
217
218 } // namespace base::subtle
219 } // namespace base
220
221 #endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_MSVC_H_
222