comparison src/atomic/linux/SDL_atomic.c @ 3261:72b542f34739

The new, cleaner, version of the atomic operations. The dummy code is what you should start working with to port atomic ops. The linux code appears to be complete and *should* be the base of all Unix and GCC based versions. The macosx and win32 versions are currently just copies of the dummy code. I will begin working on the windows version as soon as this check in is done. I need someone to work on the Mac OS X version. I'm afraid that this check in will break QNX (Sorry!)
author Bob Pendleton <bob@pendleton.com>
date Thu, 17 Sep 2009 20:35:12 +0000
parents 48a80f2a7ff2
children 6fe620d7ce92
comparison
equal deleted inserted replaced
3260:85bf3f297b5c 3261:72b542f34739
1 /* 1 /*
2 SDL - Simple DirectMedia Layer 2 SDL - Simple DirectMedia Layer
3 Copyright (C) 1997-2009 Sam Lantinga 3 Copyright (C) 1997-2009 Sam Lantinga
4 4
5 This library is free software; you can redistribute it and/or 5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public 6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either 7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version. 8 version 2.1 of the License, or (at your option) any later version.
9 9
10 This library is distributed in the hope that it will be useful, 10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details. 13 Lesser General Public License for more details.
14 14
15 You should have received a copy of the GNU Lesser General Public 15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, write to the Free Software 16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 18
19 Sam Lantinga 19 Sam Lantinga
20 slouken@libsdl.org 20 slouken@libsdl.org
21
22 Contributed by Bob Pendleton, bob@pendleton.com
21 */ 23 */
22 24
23 #include "SDL_stdinc.h" 25 #include "SDL_stdinc.h"
24 #include "SDL_atomic.h" 26 #include "SDL_atomic.h"
25 27
28 #include "SDL_error.h"
29
26 /* 30 /*
27 This file provides 8, 16, 32, and 64 bit atomic operations. If the 31 This file provides 32, and 64 bit atomic operations. If the
28 operations are provided by the native hardware and operating system 32 operations are provided by the native hardware and operating system
29 they are used. If they are not then the operations are emulated 33 they are used. If they are not then the operations are emulated
30 using the SDL mutex operations. 34 using the SDL spin lock operations. If spin lock can not be
31 */ 35 implemented then these functions must fail.
36 */
32 37
33 /* 38 /*
34 First, detect whether the operations are supported and create 39 LINUX/GCC VERSION.
35 #defines that indicate that they do exist. The goal is to have all 40
36 the system dependent code in the top part of the file so that the 41 This version of the code assumes support of the atomic builtins as
37 bottom can be use unchanged across all platforms. 42 documented at gcc.gnu.org/onlinedocs/gcc/Atomic-Builtins.html This
38 43 code should work on any modern x86 or other processor supported by
39 Second, #define all the operations in each size class that are 44 GCC.
40 supported. Doing this allows supported operations to be used along 45
41 side of emulated operations. 46 Some processors will only support some of these operations so
42 */ 47 #ifdefs will have to be added as incompatibilities are discovered
48 */
49
50 /*
51 Native spinlock routines.
52 */
53
54 void
55 SDL_AtomicLock(SDL_SpinLock *lock)
56 {
57 while (0 != __sync_lock_test_and_set(lock, 1))
58 {
59 }
60 }
61
62 void
63 SDL_AtomicUnlock(SDL_SpinLock *lock)
64 {
65 __sync_lock_test_and_set(lock, 0);
66 }
67
68 /*
69 Note that platform specific versions can be built from this version
70 by changing the #undefs to #defines and adding platform specific
71 code.
72 */
73
74 #define nativeTestThenSet32
75 #define nativeClear32
76 #define nativeFetchThenIncrement32
77 #define nativeFetchThenDecrement32
78 #define nativeFetchThenAdd32
79 #define nativeFetchThenSubtract32
80 #define nativeIncrementThenFetch32
81 #define nativeDecrementThenFetch32
82 #define nativeAddThenFetch32
83 #define nativeSubtractThenFetch32
84
85 #define nativeTestThenSet64
86 #define nativeClear64
87 #define nativeFetchThenIncrement64
88 #define nativeFetchThenDecrement64
89 #define nativeFetchThenAdd64
90 #define nativeFetchThenSubtract64
91 #define nativeIncrementThenFetch64
92 #define nativeDecrementThenFetch64
93 #define nativeAddThenFetch64
94 #define nativeSubtractThenFetch64
43 95
44 /* 96 /*
45 Linux version. 97 If any of the operations are not provided then we must emulate some
46 98 of them. That means we need a nice implementation of spin locks
47 Test for gnu C builtin support for atomic operations. The only way 99 that avoids the "one big lock" problem. We use a vector of spin
48 I know of is to check to see if the 100 locks and pick which one to use based on the address of the operand
49 __GCC_HAVE_SYNC_COMPARE_AND_SWAP_* macros are defined. 101 of the function.
50 */ 102
51 103 To generate the index of the lock we first shift by 3 bits to get
52 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 104 rid on the zero bits that result from 32 and 64 bit allignment of
53 #define HAVE_ALL_8_BIT_OPS 105 data. We then mask off all but 5 bits and use those 5 bits as an
54 106 index into the table.
55 #define nativeExchange8(ptr, value) (__sync_lock_test_and_set(ptr, value)) 107
56 #define nativeCompareThenSet8(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue)) 108 Picking the lock this way insures that accesses to the same data at
57 #define nativeTestThenSet8(ptr) (0 == __sync_lock_test_and_set(ptr, 1)) 109 the same time will go to the same lock. OTOH, accesses to different
58 #define nativeClear8(ptr) (__sync_lock_release(ptr)) 110 data have only a 1/32 chance of hitting the same lock. That should
59 #define nativeFetchThenIncrement8(ptr) (__sync_fetch_and_add(ptr, 1)) 111 pretty much eliminate the chances of several atomic operations on
60 #define nativeFetchThenDecrement8(ptr) (__sync_fetch_and_sub(ptr, 1)) 112 different data from waiting on the same "big lock". If it isn't
61 #define nativeFetchThenAdd8(ptr, value) (__sync_fetch_and_add(ptr, value)) 113 then the table of locks can be expanded to a new size so long as
62 #define nativeFetchThenSubtract8(ptr, value) (__sync_fetch_and_sub(ptr, value)) 114 the new size if a power of two.
63 #define nativeIncrementThenFetch8(ptr) (__sync_add_and_fetch(ptr, 1)) 115 */
64 #define nativeDecrementThenFetch8(ptr) (__sync_sub_and_fetch(ptr, 1)) 116
65 #define nativeAddThenFetch8(ptr, value) (__sync_add_and_fetch(ptr, value)) 117 static SDL_SpinLock locks[32] = {
66 #define nativeSubtractThenFetch8(ptr, value) (__sync_sub_and_fetch(ptr, value)) 118 0, 0, 0, 0, 0, 0, 0, 0,
67 #endif 119 0, 0, 0, 0, 0, 0, 0, 0,
68 120 0, 0, 0, 0, 0, 0, 0, 0,
69 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 121 0, 0, 0, 0, 0, 0, 0, 0,
70 #define HAVE_ALL_16_BIT_OPS 122 };
71 123
72 #define nativeExchange16(ptr, value) (__sync_lock_test_and_set(ptr, value)) 124 static __inline__ void
73 #define nativeCompareThenSet16(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue)) 125 privateWaitLock(volatile void *ptr)
74 #define nativeTestThenSet16(ptr) (0 == __sync_lock_test_and_set(ptr, 1)) 126 {
75 #define nativeClear16(ptr) (__sync_lock_release(ptr)) 127 #if SIZEOF_VOIDP == 4
76 #define nativeFetchThenIncrement16(ptr) (__sync_fetch_and_add(ptr, 1)) 128 Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
77 #define nativeFetchThenDecrement16(ptr) (__sync_fetch_and_sub(ptr, 1)) 129 #elif SIZEOF_VOIDP == 8
78 #define nativeFetchThenAdd16(ptr, value) (__sync_fetch_and_add(ptr, value)) 130 Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
79 #define nativeFetchThenSubtract16(ptr, value) (__sync_fetch_and_sub(ptr, value)) 131 #endif
80 #define nativeIncrementThenFetch16(ptr) (__sync_add_and_fetch(ptr, 1)) 132
81 #define nativeDecrementThenFetch16(ptr) (__sync_sub_and_fetch(ptr, 1)) 133 SDL_AtomicLock(&locks[index]);
82 #define nativeAddThenFetch16(ptr, value) (__sync_add_and_fetch(ptr, value)) 134 }
83 #define nativeSubtractThenFetch16(ptr, value) (__sync_sub_and_fetch(ptr, value)) 135
84 #endif 136 static __inline__ void
85 137 privateUnlock(volatile void *ptr)
86 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 138 {
87 #define HAVE_ALL_32_BIT_OPS 139 #if SIZEOF_VOIDP == 4
88 140 Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
89 #define nativeExchange32(ptr, value) (__sync_lock_test_and_set(ptr, value)) 141 #elif SIZEOF_VOIDP == 8
90 #define nativeCompareThenSet32(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue)) 142 Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
91 #define nativeTestThenSet32(ptr) (0 == __sync_lock_test_and_set(ptr, 1)) 143 #endif
92 #define nativeClear32(ptr) (__sync_lock_release(ptr)) 144
93 #define nativeFetchThenIncrement32(ptr) (__sync_fetch_and_add(ptr, 1)) 145 SDL_AtomicUnlock(&locks[index]);
94 #define nativeFetchThenDecrement32(ptr) (__sync_fetch_and_sub(ptr, 1)) 146 }
95 #define nativeFetchThenAdd32(ptr, value) (__sync_fetch_and_add(ptr, value)) 147
96 #define nativeFetchThenSubtract32(ptr, value) (__sync_fetch_and_sub(ptr, value)) 148 /* 32 bit atomic operations */
97 #define nativeIncrementThenFetch32(ptr) (__sync_add_and_fetch(ptr, 1))
98 #define nativeDecrementThenFetch32(ptr) (__sync_sub_and_fetch(ptr, 1))
99 #define nativeAddThenFetch32(ptr, value) (__sync_add_and_fetch(ptr, value))
100 #define nativeSubtractThenFetch32(ptr, value) (__sync_sub_and_fetch(ptr, value))
101 #endif
102
103 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
104 #define HAVE_ALL_64_BIT_OPS
105
106 #define nativeExchange64(ptr, value) (__sync_lock_test_and_set(ptr, value))
107 #define nativeCompareThenSet64(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue))
108 #define nativeTestThenSet64(ptr) (0 == __sync_lock_test_and_set(ptr, 1))
109 #define nativeClear64(ptr) (__sync_lock_release(ptr))
110 #define nativeFetchThenIncrement64(ptr) (__sync_fetch_and_add(ptr, 1))
111 #define nativeFetchThenDecrement64(ptr) (__sync_fetch_and_sub(ptr, 1))
112 #define nativeFetchThenAdd64(ptr, value) (__sync_fetch_and_add(ptr, value))
113 #define nativeFetchThenSubtract64(ptr, value) (__sync_fetch_and_sub(ptr, value))
114 #define nativeIncrementThenFetch64(ptr) (__sync_add_and_fetch(ptr, 1))
115 #define nativeDecrementThenFetch64(ptr) (__sync_sub_and_fetch(ptr, 1))
116 #define nativeAddThenFetch64(ptr, value) (__sync_add_and_fetch(ptr, value))
117 #define nativeSubtractThenFetch64(ptr, value) (__sync_sub_and_fetch(ptr, value))
118 #endif
119
120 /*
121 If any of the operations are not provided then we must emulate some of
122 them.
123 */
124
125 #if !defined(HAVE_ALL_8_BIT_OPS) || !defined(HAVE_ALL_16_BIT_OPS) || !defined(HAVE_ALL_32_BIT_OPS) || !defined(HAVE_ALL_64_BIT_OPS)
126
127 static Uint32 lock = 0;
128
129 #define privateWaitLock() \
130 while (nativeTestThenSet32(&lock)) \
131 { \
132 };
133
134 #define privateUnlock() (nativeClear32(&lock))
135 #endif
136
137 /* 8 bit atomic operations */
138
139 Uint8
140 SDL_AtomicExchange8(volatile Uint8 * ptr, Uint8 value)
141 {
142 #ifdef nativeExchange8
143 return nativeExchange8(ptr, value);
144 #else
145 Uint8 tmp = 0;
146
147 privateWaitLock();
148 tmp = *ptr;
149 *ptr = value;
150 privateUnlock();
151
152 return tmp;
153 #endif
154 }
155 149
156 SDL_bool 150 SDL_bool
157 SDL_AtomicCompareThenSet8(volatile Uint8 * ptr, Uint8 oldvalue, Uint8 newvalue) 151 SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
158 { 152 {
159 #ifdef nativeCompareThenSet8 153 #ifdef nativeTestThenSet32
160 return (SDL_bool)nativeCompareThenSet8(ptr, oldvalue, newvalue); 154 return 0 == __sync_lock_test_and_set(ptr, 1);
161 #else 155 #else
162 SDL_bool result = SDL_FALSE; 156 SDL_bool result = SDL_FALSE;
163 157
164 privateWaitLock(); 158 privateWaitLock(ptr);
165 result = (*ptr == oldvalue);
166 if (result)
167 {
168 *ptr = newvalue;
169 }
170 privateUnlock();
171
172 return result;
173 #endif
174 }
175
176 SDL_bool
177 SDL_AtomicTestThenSet8(volatile Uint8 * ptr)
178 {
179 #ifdef nativeTestThenSet8
180 return (SDL_bool)nativeTestThenSet8(ptr);
181 #else
182 SDL_bool result = SDL_FALSE;
183
184 privateWaitLock();
185 result = (*ptr == 0); 159 result = (*ptr == 0);
186 if (result) 160 if (result)
187 { 161 {
188 *ptr = 1; 162 *ptr = 1;
189 } 163 }
190 privateUnlock(); 164 privateUnlock(ptr);
191 165
192 return result; 166 return result;
193 #endif 167 #endif
194 } 168 }
195 169
196 void 170 void
197 SDL_AtomicClear8(volatile Uint8 * ptr) 171 SDL_AtomicClear32(volatile Uint32 * ptr)
198 { 172 {
199 #ifdef nativeClear8 173 #ifdef nativeClear32
200 nativeClear8(ptr); 174 __sync_lock_test_and_set(ptr, 0);
201 #else 175 return;
202 privateWaitLock(); 176 #else
177 privateWaitLock(ptr);
203 *ptr = 0; 178 *ptr = 0;
204 privateUnlock(); 179 privateUnlock(ptr);
205 180
206 return; 181 return;
207 #endif 182 #endif
208 } 183 }
209 184
210 Uint8 185 Uint32
211 SDL_AtomicFetchThenIncrement8(volatile Uint8 * ptr) 186 SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
212 { 187 {
213 #ifdef nativeFetchThenIncrement8 188 #ifdef nativeFetchThenIncrement32
214 return nativeFetchThenIncrement8(ptr); 189 return __sync_fetch_and_add(ptr, 1);
215 #else 190 #else
216 Uint8 tmp = 0; 191 Uint32 tmp = 0;
217 192
218 privateWaitLock(); 193 privateWaitLock(ptr);
219 tmp = *ptr; 194 tmp = *ptr;
220 (*ptr)+= 1; 195 (*ptr)+= 1;
221 privateUnlock(); 196 privateUnlock(ptr);
222 197
223 return tmp; 198 return tmp;
224 #endif 199 #endif
225 } 200 }
226 201
227 Uint8 202 Uint32
228 SDL_AtomicFetchThenDecrement8(volatile Uint8 * ptr) 203 SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
229 { 204 {
230 #ifdef nativeFetchThenDecrement8 205 #ifdef nativeFetchThenDecrement32
231 return nativeFetchThenDecrement8(ptr); 206 return __sync_fetch_and_sub(ptr, 1);
232 #else 207 #else
233 Uint8 tmp = 0; 208 Uint32 tmp = 0;
234 209
235 privateWaitLock(); 210 privateWaitLock(ptr);
236 tmp = *ptr; 211 tmp = *ptr;
237 (*ptr) -= 1; 212 (*ptr) -= 1;
238 privateUnlock(); 213 privateUnlock(ptr);
239 214
240 return tmp; 215 return tmp;
241 #endif 216 #endif
242 } 217 }
243 218
244 Uint8 219 Uint32
245 SDL_AtomicFetchThenAdd8(volatile Uint8 * ptr, Uint8 value) 220 SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
246 { 221 {
247 #ifdef nativeFetchThenAdd8 222 #ifdef nativeFetchThenAdd32
248 return nativeFetchThenAdd8(ptr, value); 223 return __sync_fetch_and_add(ptr, value);
249 #else 224 #else
250 Uint8 tmp = 0; 225 Uint32 tmp = 0;
251 226
252 privateWaitLock(); 227 privateWaitLock(ptr);
253 tmp = *ptr; 228 tmp = *ptr;
254 (*ptr)+= value; 229 (*ptr)+= value;
255 privateUnlock(); 230 privateUnlock(ptr);
256 231
257 return tmp; 232 return tmp;
258 #endif 233 #endif
259 } 234 }
260 235
261 Uint8 236 Uint32
262 SDL_AtomicFetchThenSubtract8(volatile Uint8 * ptr, Uint8 value) 237 SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
263 { 238 {
264 #ifdef nativeFetchThenSubtract8 239 #ifdef nativeFetchThenSubtract32
265 return nativeFetchThenSubtract8(ptr, value); 240 return __sync_fetch_and_sub(ptr, value);
266 #else 241 #else
267 Uint8 tmp = 0; 242 Uint32 tmp = 0;
268 243
269 privateWaitLock(); 244 privateWaitLock(ptr);
270 tmp = *ptr; 245 tmp = *ptr;
271 (*ptr)-= value; 246 (*ptr)-= value;
272 privateUnlock(); 247 privateUnlock(ptr);
273 248
274 return tmp; 249 return tmp;
275 #endif 250 #endif
276 } 251 }
277 252
278 Uint8 253 Uint32
279 SDL_AtomicIncrementThenFetch8(volatile Uint8 * ptr) 254 SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
280 { 255 {
281 #ifdef nativeIncrementThenFetch8 256 #ifdef nativeIncrementThenFetch32
282 return nativeIncrementThenFetch8(ptr); 257 return __sync_add_and_fetch(ptr, 1);
283 #else 258 #else
284 Uint8 tmp = 0; 259 Uint32 tmp = 0;
285 260
286 privateWaitLock(); 261 privateWaitLock(ptr);
287 (*ptr)+= 1; 262 (*ptr)+= 1;
288 tmp = *ptr; 263 tmp = *ptr;
289 privateUnlock(); 264 privateUnlock(ptr);
290 265
291 return tmp; 266 return tmp;
292 #endif 267 #endif
293 } 268 }
294 269
295 Uint8 270 Uint32
296 SDL_AtomicDecrementThenFetch8(volatile Uint8 * ptr) 271 SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
297 { 272 {
298 #ifdef nativeDecrementThenFetch8 273 #ifdef nativeDecrementThenFetch32
299 return nativeDecrementThenFetch8(ptr); 274 return __sync_sub_and_fetch(ptr, 1);
300 #else 275 #else
301 Uint8 tmp = 0; 276 Uint32 tmp = 0;
302 277
303 privateWaitLock(); 278 privateWaitLock(ptr);
304 (*ptr)-= 1; 279 (*ptr)-= 1;
305 tmp = *ptr; 280 tmp = *ptr;
306 privateUnlock(); 281 privateUnlock(ptr);
307 282
308 return tmp; 283 return tmp;
309 #endif 284 #endif
310 } 285 }
311 286
312 Uint8 287 Uint32
313 SDL_AtomicAddThenFetch8(volatile Uint8 * ptr, Uint8 value) 288 SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
314 { 289 {
315 #ifdef nativeAddThenFetch8 290 #ifdef nativeAddThenFetch32
316 return nativeAddThenFetch8(ptr, value); 291 return __sync_add_and_fetch(ptr, value);
317 #else 292 #else
318 Uint8 tmp = 0; 293 Uint32 tmp = 0;
319 294
320 privateWaitLock(); 295 privateWaitLock(ptr);
321 (*ptr)+= value; 296 (*ptr)+= value;
322 tmp = *ptr; 297 tmp = *ptr;
323 privateUnlock(); 298 privateUnlock(ptr);
324 299
325 return tmp; 300 return tmp;
326 #endif 301 #endif
327 } 302 }
328 303
329 Uint8 304 Uint32
330 SDL_AtomicSubtractThenFetch8(volatile Uint8 * ptr, Uint8 value) 305 SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
331 { 306 {
332 #ifdef nativeSubtractThenFetch8 307 #ifdef nativeSubtractThenFetch32
333 return nativeSubtractThenFetch8(ptr, value); 308 return __sync_sub_and_fetch(ptr, value);
334 #else 309 #else
335 Uint8 tmp = 0; 310 Uint32 tmp = 0;
336 311
337 privateWaitLock(); 312 privateWaitLock(ptr);
338 (*ptr)-= value; 313 (*ptr)-= value;
339 tmp = *ptr; 314 tmp = *ptr;
340 privateUnlock(); 315 privateUnlock(ptr);
341 316
342 return tmp; 317 return tmp;
343 #endif 318 #endif
344 } 319 }
345 320
346 /* 16 bit atomic operations */ 321 /* 64 bit atomic operations */
347 322 #ifdef SDL_HAS_64BIT_TYPE
348 Uint16
349 SDL_AtomicExchange16(volatile Uint16 * ptr, Uint16 value)
350 {
351 #ifdef nativeExchange16
352 return nativeExchange16(ptr, value);
353 #else
354 Uint16 tmp = 0;
355
356 privateWaitLock();
357 tmp = *ptr;
358 *ptr = value;
359 privateUnlock();
360
361 return tmp;
362 #endif
363 }
364 323
365 SDL_bool 324 SDL_bool
366 SDL_AtomicCompareThenSet16(volatile Uint16 * ptr, Uint16 oldvalue, Uint16 newvalue) 325 SDL_AtomicTestThenSet64(volatile Uint64 * ptr)
367 { 326 {
368 #ifdef nativeCompareThenSet16 327 #ifdef nativeTestThenSet64
369 return (SDL_bool)nativeCompareThenSet16(ptr, oldvalue, newvalue); 328 return 0 == __sync_lock_test_and_set(ptr, 1);
370 #else 329 #else
371 SDL_bool result = SDL_FALSE; 330 SDL_bool result = SDL_FALSE;
372 331
373 privateWaitLock(); 332 privateWaitLock(ptr);
374 result = (*ptr == oldvalue);
375 if (result)
376 {
377 *ptr = newvalue;
378 }
379 privateUnlock();
380
381 return result;
382 #endif
383 }
384
385 SDL_bool
386 SDL_AtomicTestThenSet16(volatile Uint16 * ptr)
387 {
388 #ifdef nativeTestThenSet16
389 return (SDL_bool)nativeTestThenSet16(ptr);
390 #else
391 SDL_bool result = SDL_FALSE;
392
393 privateWaitLock();
394 result = (*ptr == 0); 333 result = (*ptr == 0);
395 if (result) 334 if (result)
396 { 335 {
397 *ptr = 1; 336 *ptr = 1;
398 } 337 }
399 privateUnlock(); 338 privateUnlock(ptr);
400
401 return result;
402 #endif
403 }
404
405 void
406 SDL_AtomicClear16(volatile Uint16 * ptr)
407 {
408 #ifdef nativeClear16
409 nativeClear16(ptr);
410 #else
411 privateWaitLock();
412 *ptr = 0;
413 privateUnlock();
414
415 return;
416 #endif
417 }
418
419 Uint16
420 SDL_AtomicFetchThenIncrement16(volatile Uint16 * ptr)
421 {
422 #ifdef nativeFetchThenIncrement16
423 return nativeFetchThenIncrement16(ptr);
424 #else
425 Uint16 tmp = 0;
426
427 privateWaitLock();
428 tmp = *ptr;
429 (*ptr)+= 1;
430 privateUnlock();
431
432 return tmp;
433 #endif
434 }
435
436 Uint16
437 SDL_AtomicFetchThenDecrement16(volatile Uint16 * ptr)
438 {
439 #ifdef nativeFetchThenDecrement16
440 return nativeFetchThenDecrement16(ptr);
441 #else
442 Uint16 tmp = 0;
443
444 privateWaitLock();
445 tmp = *ptr;
446 (*ptr) -= 1;
447 privateUnlock();
448
449 return tmp;
450 #endif
451 }
452
453 Uint16
454 SDL_AtomicFetchThenAdd16(volatile Uint16 * ptr, Uint16 value)
455 {
456 #ifdef nativeFetchThenAdd16
457 return nativeFetchThenAdd16(ptr, value);
458 #else
459 Uint16 tmp = 0;
460
461 privateWaitLock();
462 tmp = *ptr;
463 (*ptr)+= value;
464 privateUnlock();
465
466 return tmp;
467 #endif
468 }
469
470 Uint16
471 SDL_AtomicFetchThenSubtract16(volatile Uint16 * ptr, Uint16 value)
472 {
473 #ifdef nativeFetchThenSubtract16
474 return nativeFetchThenSubtract16(ptr, value);
475 #else
476 Uint16 tmp = 0;
477
478 privateWaitLock();
479 tmp = *ptr;
480 (*ptr)-= value;
481 privateUnlock();
482
483 return tmp;
484 #endif
485 }
486
487 Uint16
488 SDL_AtomicIncrementThenFetch16(volatile Uint16 * ptr)
489 {
490 #ifdef nativeIncrementThenFetch16
491 return nativeIncrementThenFetch16(ptr);
492 #else
493 Uint16 tmp = 0;
494
495 privateWaitLock();
496 (*ptr)+= 1;
497 tmp = *ptr;
498 privateUnlock();
499
500 return tmp;
501 #endif
502 }
503
504 Uint16
505 SDL_AtomicDecrementThenFetch16(volatile Uint16 * ptr)
506 {
507 #ifdef nativeDecrementThenFetch16
508 return nativeDecrementThenFetch16(ptr);
509 #else
510 Uint16 tmp = 0;
511
512 privateWaitLock();
513 (*ptr)-= 1;
514 tmp = *ptr;
515 privateUnlock();
516
517 return tmp;
518 #endif
519 }
520
521 Uint16
522 SDL_AtomicAddThenFetch16(volatile Uint16 * ptr, Uint16 value)
523 {
524 #ifdef nativeAddThenFetch16
525 return nativeAddThenFetch16(ptr, value);
526 #else
527 Uint16 tmp = 0;
528
529 privateWaitLock();
530 (*ptr)+= value;
531 tmp = *ptr;
532 privateUnlock();
533
534 return tmp;
535 #endif
536 }
537
538 Uint16
539 SDL_AtomicSubtractThenFetch16(volatile Uint16 * ptr, Uint16 value)
540 {
541 #ifdef nativeSubtractThenFetch16
542 return nativeSubtractThenFetch16(ptr, value);
543 #else
544 Uint16 tmp = 0;
545
546 privateWaitLock();
547 (*ptr)-= value;
548 tmp = *ptr;
549 privateUnlock();
550
551 return tmp;
552 #endif
553 }
554
555 /* 32 bit atomic operations */
556
557 Uint32
558 SDL_AtomicExchange32(volatile Uint32 * ptr, Uint32 value)
559 {
560 #ifdef nativeExchange32
561 return nativeExchange32(ptr, value);
562 #else
563 Uint32 tmp = 0;
564
565 privateWaitLock();
566 tmp = *ptr;
567 *ptr = value;
568 privateUnlock();
569
570 return tmp;
571 #endif
572 }
573
574 SDL_bool
575 SDL_AtomicCompareThenSet32(volatile Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue)
576 {
577 #ifdef nativeCompareThenSet32
578 return (SDL_bool)nativeCompareThenSet32(ptr, oldvalue, newvalue);
579 #else
580 SDL_bool result = SDL_FALSE;
581
582 privateWaitLock();
583 result = (*ptr == oldvalue);
584 if (result)
585 {
586 *ptr = newvalue;
587 }
588 privateUnlock();
589
590 return result;
591 #endif
592 }
593
594 SDL_bool
595 SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
596 {
597 #ifdef nativeTestThenSet32
598 return (SDL_bool)nativeTestThenSet32(ptr);
599 #else
600 SDL_bool result = SDL_FALSE;
601
602 privateWaitLock();
603 result = (*ptr == 0);
604 if (result)
605 {
606 *ptr = 1;
607 }
608 privateUnlock();
609
610 return result;
611 #endif
612 }
613
614 void
615 SDL_AtomicClear32(volatile Uint32 * ptr)
616 {
617 #ifdef nativeClear32
618 nativeClear32(ptr);
619 #else
620 privateWaitLock();
621 *ptr = 0;
622 privateUnlock();
623
624 return;
625 #endif
626 }
627
628 Uint32
629 SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
630 {
631 #ifdef nativeFetchThenIncrement32
632 return nativeFetchThenIncrement32(ptr);
633 #else
634 Uint32 tmp = 0;
635
636 privateWaitLock();
637 tmp = *ptr;
638 (*ptr)+= 1;
639 privateUnlock();
640
641 return tmp;
642 #endif
643 }
644
645 Uint32
646 SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
647 {
648 #ifdef nativeFetchThenDecrement32
649 return nativeFetchThenDecrement32(ptr);
650 #else
651 Uint32 tmp = 0;
652
653 privateWaitLock();
654 tmp = *ptr;
655 (*ptr) -= 1;
656 privateUnlock();
657
658 return tmp;
659 #endif
660 }
661
662 Uint32
663 SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
664 {
665 #ifdef nativeFetchThenAdd32
666 return nativeFetchThenAdd32(ptr, value);
667 #else
668 Uint32 tmp = 0;
669
670 privateWaitLock();
671 tmp = *ptr;
672 (*ptr)+= value;
673 privateUnlock();
674
675 return tmp;
676 #endif
677 }
678
679 Uint32
680 SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
681 {
682 #ifdef nativeFetchThenSubtract32
683 return nativeFetchThenSubtract32(ptr, value);
684 #else
685 Uint32 tmp = 0;
686
687 privateWaitLock();
688 tmp = *ptr;
689 (*ptr)-= value;
690 privateUnlock();
691
692 return tmp;
693 #endif
694 }
695
696 Uint32
697 SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
698 {
699 #ifdef nativeIncrementThenFetch32
700 return nativeIncrementThenFetch32(ptr);
701 #else
702 Uint32 tmp = 0;
703
704 privateWaitLock();
705 (*ptr)+= 1;
706 tmp = *ptr;
707 privateUnlock();
708
709 return tmp;
710 #endif
711 }
712
713 Uint32
714 SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
715 {
716 #ifdef nativeDecrementThenFetch32
717 return nativeDecrementThenFetch32(ptr);
718 #else
719 Uint32 tmp = 0;
720
721 privateWaitLock();
722 (*ptr)-= 1;
723 tmp = *ptr;
724 privateUnlock();
725
726 return tmp;
727 #endif
728 }
729
730 Uint32
731 SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
732 {
733 #ifdef nativeAddThenFetch32
734 return nativeAddThenFetch32(ptr, value);
735 #else
736 Uint32 tmp = 0;
737
738 privateWaitLock();
739 (*ptr)+= value;
740 tmp = *ptr;
741 privateUnlock();
742
743 return tmp;
744 #endif
745 }
746
747 Uint32
748 SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
749 {
750 #ifdef nativeSubtractThenFetch32
751 return nativeSubtractThenFetch32(ptr, value);
752 #else
753 Uint32 tmp = 0;
754
755 privateWaitLock();
756 (*ptr)-= value;
757 tmp = *ptr;
758 privateUnlock();
759
760 return tmp;
761 #endif
762 }
763
764 /* 64 bit atomic operations */
765 #ifdef SDL_HAS_64BIT_TYPE
766
767 Uint64
768 SDL_AtomicExchange64(volatile Uint64 * ptr, Uint64 value)
769 {
770 #ifdef nativeExchange64
771 return nativeExchange64(ptr, value);
772 #else
773 Uint64 tmp = 0;
774
775 privateWaitLock();
776 tmp = *ptr;
777 *ptr = value;
778 privateUnlock();
779
780 return tmp;
781 #endif
782 }
783
784 SDL_bool
785 SDL_AtomicCompareThenSet64(volatile Uint64 * ptr, Uint64 oldvalue, Uint64 newvalue)
786 {
787 #ifdef nativeCompareThenSet64
788 return (SDL_bool)nativeCompareThenSet64(ptr, oldvalue, newvalue);
789 #else
790 SDL_bool result = SDL_FALSE;
791
792 privateWaitLock();
793 result = (*ptr == oldvalue);
794 if (result)
795 {
796 *ptr = newvalue;
797 }
798 privateUnlock();
799
800 return result;
801 #endif
802 }
803
804 SDL_bool
805 SDL_AtomicTestThenSet64(volatile Uint64 * ptr)
806 {
807 #ifdef nativeTestThenSet64
808 return (SDL_bool)nativeTestThenSet64(ptr);
809 #else
810 SDL_bool result = SDL_FALSE;
811
812 privateWaitLock();
813 result = (*ptr == 0);
814 if (result)
815 {
816 *ptr = 1;
817 }
818 privateUnlock();
819 339
820 return result; 340 return result;
821 #endif 341 #endif
822 } 342 }
823 343
824 void 344 void
825 SDL_AtomicClear64(volatile Uint64 * ptr) 345 SDL_AtomicClear64(volatile Uint64 * ptr)
826 { 346 {
827 #ifdef nativeClear64 347 #ifdef nativeClear64
828 nativeClear64(ptr); 348 __sync_lock_test_and_set(ptr, 0);
829 #else 349 return;
830 privateWaitLock(); 350 #else
351 privateWaitLock(ptr);
831 *ptr = 0; 352 *ptr = 0;
832 privateUnlock(); 353 privateUnlock(ptr);
833 354
834 return; 355 return;
835 #endif 356 #endif
836 } 357 }
837 358
838 Uint64 359 Uint64
839 SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr) 360 SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr)
840 { 361 {
841 #ifdef nativeFetchThenIncrement64 362 #ifdef nativeFetchThenIncrement64
842 return nativeFetchThenIncrement64(ptr); 363 return __sync_fetch_and_add(ptr, 1);
843 #else 364 #else
844 Uint64 tmp = 0; 365 Uint64 tmp = 0;
845 366
846 privateWaitLock(); 367 privateWaitLock(ptr);
847 tmp = *ptr; 368 tmp = *ptr;
848 (*ptr)+= 1; 369 (*ptr)+= 1;
849 privateUnlock(); 370 privateUnlock(ptr);
850 371
851 return tmp; 372 return tmp;
852 #endif 373 #endif
853 } 374 }
854 375
855 Uint64 376 Uint64
856 SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr) 377 SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr)
857 { 378 {
858 #ifdef nativeFetchThenDecrement64 379 #ifdef nativeFetchThenDecrement64
859 return nativeFetchThenDecrement64(ptr); 380 return __sync_fetch_and_sub(ptr, 1);
860 #else 381 #else
861 Uint64 tmp = 0; 382 Uint64 tmp = 0;
862 383
863 privateWaitLock(); 384 privateWaitLock(ptr);
864 tmp = *ptr; 385 tmp = *ptr;
865 (*ptr) -= 1; 386 (*ptr) -= 1;
866 privateUnlock(); 387 privateUnlock(ptr);
867 388
868 return tmp; 389 return tmp;
869 #endif 390 #endif
870 } 391 }
871 392
872 Uint64 393 Uint64
873 SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value) 394 SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value)
874 { 395 {
875 #ifdef nativeFetchThenAdd64 396 #ifdef nativeFetchThenAdd64
876 return nativeFetchThenAdd64(ptr, value); 397 return __sync_fetch_and_add(ptr, value);
877 #else 398 #else
878 Uint64 tmp = 0; 399 Uint64 tmp = 0;
879 400
880 privateWaitLock(); 401 privateWaitLock(ptr);
881 tmp = *ptr; 402 tmp = *ptr;
882 (*ptr)+= value; 403 (*ptr)+= value;
883 privateUnlock(); 404 privateUnlock(ptr);
884 405
885 return tmp; 406 return tmp;
886 #endif 407 #endif
887 } 408 }
888 409
889 Uint64 410 Uint64
890 SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value) 411 SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value)
891 { 412 {
892 #ifdef nativeFetchThenSubtract64 413 #ifdef nativeFetchThenSubtract64
893 return nativeFetchThenSubtract64(ptr, value); 414 return __sync_fetch_and_sub(ptr, value);
894 #else 415 #else
895 Uint64 tmp = 0; 416 Uint64 tmp = 0;
896 417
897 privateWaitLock(); 418 privateWaitLock(ptr);
898 tmp = *ptr; 419 tmp = *ptr;
899 (*ptr)-= value; 420 (*ptr)-= value;
900 privateUnlock(); 421 privateUnlock(ptr);
901 422
902 return tmp; 423 return tmp;
903 #endif 424 #endif
904 } 425 }
905 426
906 Uint64 427 Uint64
907 SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr) 428 SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr)
908 { 429 {
909 #ifdef nativeIncrementThenFetch64 430 #ifdef nativeIncrementThenFetch64
910 return nativeIncrementThenFetch64(ptr); 431 return __sync_add_and_fetch(ptr, 1);
911 #else 432 #else
912 Uint64 tmp = 0; 433 Uint64 tmp = 0;
913 434
914 privateWaitLock(); 435 privateWaitLock(ptr);
915 (*ptr)+= 1; 436 (*ptr)+= 1;
916 tmp = *ptr; 437 tmp = *ptr;
917 privateUnlock(); 438 privateUnlock(ptr);
918 439
919 return tmp; 440 return tmp;
920 #endif 441 #endif
921 } 442 }
922 443
923 Uint64 444 Uint64
924 SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr) 445 SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr)
925 { 446 {
926 #ifdef nativeDecrementThenFetch64 447 #ifdef nativeDecrementThenFetch64
927 return nativeDecrementThenFetch64(ptr); 448 return __sync_sub_and_fetch(ptr, 1);
928 #else 449 #else
929 Uint64 tmp = 0; 450 Uint64 tmp = 0;
930 451
931 privateWaitLock(); 452 privateWaitLock(ptr);
932 (*ptr)-= 1; 453 (*ptr)-= 1;
933 tmp = *ptr; 454 tmp = *ptr;
934 privateUnlock(); 455 privateUnlock(ptr);
935 456
936 return tmp; 457 return tmp;
937 #endif 458 #endif
938 } 459 }
939 460
940 Uint64 461 Uint64
941 SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value) 462 SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value)
942 { 463 {
943 #ifdef nativeAddThenFetch64 464 #ifdef nativeAddThenFetch64
944 return nativeAddThenFetch64(ptr, value); 465 return __sync_add_and_fetch(ptr, value);
945 #else 466 #else
946 Uint64 tmp = 0; 467 Uint64 tmp = 0;
947 468
948 privateWaitLock(); 469 privateWaitLock(ptr);
949 (*ptr)+= value; 470 (*ptr)+= value;
950 tmp = *ptr; 471 tmp = *ptr;
951 privateUnlock(); 472 privateUnlock(ptr);
952 473
953 return tmp; 474 return tmp;
954 #endif 475 #endif
955 } 476 }
956 477
957 Uint64 478 Uint64
958 SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value) 479 SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value)
959 { 480 {
960 #ifdef nativeSubtractThenFetch64 481 #ifdef nativeSubtractThenFetch64
961 return nativeSubtractThenFetch64(ptr, value); 482 return __sync_sub_and_fetch(ptr, value);
962 #else 483 #else
963 Uint64 tmp = 0; 484 Uint64 tmp = 0;
964 485
965 privateWaitLock(); 486 privateWaitLock(ptr);
966 (*ptr)-= value; 487 (*ptr)-= value;
967 tmp = *ptr; 488 tmp = *ptr;
968 privateUnlock(); 489 privateUnlock(ptr);
969 490
970 return tmp; 491 return tmp;
971 #endif 492 #endif
972 } 493 }
973 #endif 494
974 495 #endif /* SDL_HAS_64BIT_TYPE */