V8 Project
atomicops_internals_mips64_gcc.h
Go to the documentation of this file.
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // This file is an internal atomic implementation, use atomicops.h instead.
29 
30 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
31 #define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
32 
33 namespace v8 {
34 namespace base {
35 
36 // Atomically execute:
37 // result = *ptr;
38 // if (*ptr == old_value)
39 // *ptr = new_value;
40 // return result;
41 //
42 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
43 // Always return the old value of "*ptr"
44 //
45 // This routine implies no memory barriers.
46 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
47  Atomic32 old_value,
48  Atomic32 new_value) {
49  Atomic32 prev, tmp;
50  __asm__ __volatile__(".set push\n"
51  ".set noreorder\n"
52  "1:\n"
53  "ll %0, %5\n" // prev = *ptr
54  "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
55  "move %2, %4\n" // tmp = new_value
56  "sc %2, %1\n" // *ptr = tmp (with atomic check)
57  "beqz %2, 1b\n" // start again on atomic error
58  "nop\n" // delay slot nop
59  "2:\n"
60  ".set pop\n"
61  : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
62  : "Ir" (old_value), "r" (new_value), "m" (*ptr)
63  : "memory");
64  return prev;
65 }
66 
67 // Atomically store new_value into *ptr, returning the previous value held in
68 // *ptr. This routine implies no memory barriers.
69 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
70  Atomic32 new_value) {
71  Atomic32 temp, old;
72  __asm__ __volatile__(".set push\n"
73  ".set noreorder\n"
74  "1:\n"
75  "ll %1, %2\n" // old = *ptr
76  "move %0, %3\n" // temp = new_value
77  "sc %0, %2\n" // *ptr = temp (with atomic check)
78  "beqz %0, 1b\n" // start again on atomic error
79  "nop\n" // delay slot nop
80  ".set pop\n"
81  : "=&r" (temp), "=&r" (old), "=m" (*ptr)
82  : "r" (new_value), "m" (*ptr)
83  : "memory");
84 
85  return old;
86 }
87 
88 // Atomically increment *ptr by "increment". Returns the new value of
89 // *ptr with the increment applied. This routine implies no memory barriers.
90 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
91  Atomic32 increment) {
92  Atomic32 temp, temp2;
93 
94  __asm__ __volatile__(".set push\n"
95  ".set noreorder\n"
96  "1:\n"
97  "ll %0, %2\n" // temp = *ptr
98  "addu %1, %0, %3\n" // temp2 = temp + increment
99  "sc %1, %2\n" // *ptr = temp2 (with atomic check)
100  "beqz %1, 1b\n" // start again on atomic error
101  "addu %1, %0, %3\n" // temp2 = temp + increment
102  ".set pop\n"
103  : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
104  : "Ir" (increment), "m" (*ptr)
105  : "memory");
106  // temp2 now holds the final value.
107  return temp2;
108 }
109 
110 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
111  Atomic32 increment) {
112  MemoryBarrier();
113  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
114  MemoryBarrier();
115  return res;
116 }
117 
118 // "Acquire" operations
119 // ensure that no later memory access can be reordered ahead of the operation.
120 // "Release" operations ensure that no previous memory access can be reordered
121 // after the operation. "Barrier" operations have both "Acquire" and "Release"
122 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
123 // access.
124 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
125  Atomic32 old_value,
126  Atomic32 new_value) {
127  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
128  MemoryBarrier();
129  return res;
130 }
131 
132 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
133  Atomic32 old_value,
134  Atomic32 new_value) {
135  MemoryBarrier();
136  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
137 }
138 
139 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
140  *ptr = value;
141 }
142 
143 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
144  *ptr = value;
145 }
146 
147 inline void MemoryBarrier() {
148  __asm__ __volatile__("sync" : : : "memory");
149 }
150 
151 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
152  *ptr = value;
153  MemoryBarrier();
154 }
155 
156 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
157  MemoryBarrier();
158  *ptr = value;
159 }
160 
161 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
162  return *ptr;
163 }
164 
165 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
166  return *ptr;
167 }
168 
169 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
170  Atomic32 value = *ptr;
171  MemoryBarrier();
172  return value;
173 }
174 
175 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
176  MemoryBarrier();
177  return *ptr;
178 }
179 
180 
181 // 64-bit versions of the atomic ops.
182 
183 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
184  Atomic64 old_value,
185  Atomic64 new_value) {
186  Atomic64 prev, tmp;
187  __asm__ __volatile__(".set push\n"
188  ".set noreorder\n"
189  "1:\n"
190  "lld %0, %5\n" // prev = *ptr
191  "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
192  "move %2, %4\n" // tmp = new_value
193  "scd %2, %1\n" // *ptr = tmp (with atomic check)
194  "beqz %2, 1b\n" // start again on atomic error
195  "nop\n" // delay slot nop
196  "2:\n"
197  ".set pop\n"
198  : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
199  : "Ir" (old_value), "r" (new_value), "m" (*ptr)
200  : "memory");
201  return prev;
202 }
203 
204 // Atomically store new_value into *ptr, returning the previous value held in
205 // *ptr. This routine implies no memory barriers.
206 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
207  Atomic64 new_value) {
208  Atomic64 temp, old;
209  __asm__ __volatile__(".set push\n"
210  ".set noreorder\n"
211  "1:\n"
212  "lld %1, %2\n" // old = *ptr
213  "move %0, %3\n" // temp = new_value
214  "scd %0, %2\n" // *ptr = temp (with atomic check)
215  "beqz %0, 1b\n" // start again on atomic error
216  "nop\n" // delay slot nop
217  ".set pop\n"
218  : "=&r" (temp), "=&r" (old), "=m" (*ptr)
219  : "r" (new_value), "m" (*ptr)
220  : "memory");
221 
222  return old;
223 }
224 
225 // Atomically increment *ptr by "increment". Returns the new value of
226 // *ptr with the increment applied. This routine implies no memory barriers.
227 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
228  Atomic64 increment) {
229  Atomic64 temp, temp2;
230 
231  __asm__ __volatile__(".set push\n"
232  ".set noreorder\n"
233  "1:\n"
234  "lld %0, %2\n" // temp = *ptr
235  "daddu %1, %0, %3\n" // temp2 = temp + increment
236  "scd %1, %2\n" // *ptr = temp2 (with atomic check)
237  "beqz %1, 1b\n" // start again on atomic error
238  "daddu %1, %0, %3\n" // temp2 = temp + increment
239  ".set pop\n"
240  : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
241  : "Ir" (increment), "m" (*ptr)
242  : "memory");
243  // temp2 now holds the final value.
244  return temp2;
245 }
246 
247 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
248  Atomic64 increment) {
249  MemoryBarrier();
250  Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
251  MemoryBarrier();
252  return res;
253 }
254 
255 // "Acquire" operations
256 // ensure that no later memory access can be reordered ahead of the operation.
257 // "Release" operations ensure that no previous memory access can be reordered
258 // after the operation. "Barrier" operations have both "Acquire" and "Release"
259 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
260 // access.
261 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
262  Atomic64 old_value,
263  Atomic64 new_value) {
264  Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
265  MemoryBarrier();
266  return res;
267 }
268 
269 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
270  Atomic64 old_value,
271  Atomic64 new_value) {
272  MemoryBarrier();
273  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
274 }
275 
276 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
277  *ptr = value;
278 }
279 
280 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
281  *ptr = value;
282  MemoryBarrier();
283 }
284 
285 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
286  MemoryBarrier();
287  *ptr = value;
288 }
289 
290 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
291  return *ptr;
292 }
293 
294 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
295  Atomic64 value = *ptr;
296  MemoryBarrier();
297  return value;
298 }
299 
300 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
301  MemoryBarrier();
302  return *ptr;
303 }
304 
305 } } // namespace v8::base
306 
307 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
Atomic32 Release_Load(volatile const Atomic32 *ptr)
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
int32_t Atomic32
Definition: atomicops.h:44
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
char Atomic8
Definition: atomicops.h:43
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20