Mercurial > sdl-ios-xcode
comparison src/atomic/dummy/SDL_atomic.c @ 3261:72b542f34739
The new, cleaner, version of the atomic operations. The dummy code is what you should start working with to port atomic ops.
The linux code appears to be complete and *should* be the base of all Unix and GCC based versions. The macosx and win32 versions
are currently just copies of the dummy code. I will begin working on the windows version as soon as this check in is done. I
need someone to work on the Mac OS X version.
I'm afraid that this check in will break QNX (Sorry!)
author | Bob Pendleton <bob@pendleton.com> |
---|---|
date | Thu, 17 Sep 2009 20:35:12 +0000 |
parents | 48a80f2a7ff2 |
children | 0e000afe3dc0 |
comparison
equal
deleted
inserted
replaced
3260:85bf3f297b5c | 3261:72b542f34739 |
---|---|
1 /* | 1 /* |
2 SDL - Simple DirectMedia Layer | 2 SDL - Simple DirectMedia Layer |
3 Copyright (C) 1997-2009 Sam Lantinga | 3 Copyright (C) 1997-2009 Sam Lantinga |
4 | 4 |
5 This library is free software; you can redistribute it and/or | 5 This library is free software; you can redistribute it and/or |
6 modify it under the terms of the GNU Lesser General Public | 6 modify it under the terms of the GNU Lesser General Public |
7 License as published by the Free Software Foundation; either | 7 License as published by the Free Software Foundation; either |
8 version 2.1 of the License, or (at your option) any later version. | 8 version 2.1 of the License, or (at your option) any later version. |
9 | 9 |
10 This library is distributed in the hope that it will be useful, | 10 This library is distributed in the hope that it will be useful, |
11 but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 Lesser General Public License for more details. | 13 Lesser General Public License for more details. |
14 | 14 |
15 You should have received a copy of the GNU Lesser General Public | 15 You should have received a copy of the GNU Lesser General Public |
16 License along with this library; if not, write to the Free Software | 16 License along with this library; if not, write to the Free Software |
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
18 | 18 |
19 Sam Lantinga | 19 Sam Lantinga |
20 slouken@libsdl.org | 20 slouken@libsdl.org |
21 | |
22 Contributed by Bob Pendleton, bob@pendleton.com | |
21 */ | 23 */ |
22 | 24 |
23 #include "SDL_stdinc.h" | 25 #include "SDL_stdinc.h" |
24 #include "SDL_atomic.h" | 26 #include "SDL_atomic.h" |
25 | 27 |
28 #include "SDL_error.h" | |
29 | |
26 /* | 30 /* |
27 This file provides 8, 16, 32, and 64 bit atomic operations. If the | 31 This file provides 32, and 64 bit atomic operations. If the |
28 operations are provided by the native hardware and operating system | 32 operations are provided by the native hardware and operating system |
29 they are used. If they are not then the operations are emulated | 33 they are used. If they are not then the operations are emulated |
30 using the SDL mutex operations. | 34 using the SDL spin lock operations. If spin lock can not be |
31 */ | 35 implemented then these functions must fail. |
36 */ | |
32 | 37 |
33 /* | 38 /* |
34 First, detect whether the operations are supported and create | 39 DUMMY VERSION. |
35 #defines that indicate that they do exist. The goal is to have all | 40 |
36 the system dependent code in the top part of the file so that the | 41 This version of the code assumes there is no support for atomic |
37 bottom can be use unchanged across all platforms. | 42 operations. Therefore, every function sets the SDL error |
38 | 43 message. Oddly enough, if you only have one thread then this |
39 Second, #define all the operations in each size class that are | 44 version actuallys works. |
40 supported. Doing this allows supported operations to be used along | 45 */ |
41 side of emulated operations. | 46 |
42 */ | 47 /* |
48 Native spinlock routines. Because this is the dummy implementation | |
49 these will always call SDL_SetError() and do nothing. | |
50 */ | |
51 | |
52 void | |
53 SDL_AtomicLock(SDL_SpinLock *lock) | |
54 { | |
55 SDL_SetError("SDL_atomic.c: is not implemented on this platform"); | |
56 } | |
57 | |
58 void | |
59 SDL_AtomicUnlock(SDL_SpinLock *lock) | |
60 { | |
61 SDL_SetError("SDL_atomic.c: is not implemented on this platform"); | |
62 } | |
63 | |
64 /* | |
65 Note that platform specific versions can be built from this version | |
66 by changing the #undefs to #defines and adding platform specific | |
67 code. | |
68 */ | |
69 | |
70 #undef nativeTestThenSet32 | |
71 #undef nativeClear32 | |
72 #undef nativeFetchThenIncrement32 | |
73 #undef nativeFetchThenDecrement32 | |
74 #undef nativeFetchThenAdd32 | |
75 #undef nativeFetchThenSubtract32 | |
76 #undef nativeIncrementThenFetch32 | |
77 #undef nativeDecrementThenFetch32 | |
78 #undef nativeAddThenFetch32 | |
79 #undef nativeSubtractThenFetch32 | |
80 | |
81 #undef nativeTestThenSet64 | |
82 #undef nativeClear64 | |
83 #undef nativeFetchThenIncrement64 | |
84 #undef nativeFetchThenDecrement64 | |
85 #undef nativeFetchThenAdd64 | |
86 #undef nativeFetchThenSubtract64 | |
87 #undef nativeIncrementThenFetch64 | |
88 #undef nativeDecrementThenFetch64 | |
89 #undef nativeAddThenFetch64 | |
90 #undef nativeSubtractThenFetch64 | |
43 | 91 |
44 /* | 92 /* |
45 Emmulated version. | 93 If any of the operations are not provided then we must emulate some |
46 | 94 of them. That means we need a nice implementation of spin locks |
47 Assume there is no support for atomic operations. All such | 95 that avoids the "one big lock" problem. We use a vector of spin |
48 operations are implemented using SDL mutex operations. | 96 locks and pick which one to use based on the address of the operand |
49 */ | 97 of the function. |
50 | 98 |
51 #ifdef EMULATED_ATOMIC_OPERATIONS | 99 To generate the index of the lock we first shift by 3 bits to get |
52 #undef EMULATED_ATOMIC_OPERATIONS | 100 rid on the zero bits that result from 32 and 64 bit allignment of |
53 #endif | 101 data. We then mask off all but 5 bits and use those 5 bits as an |
54 | 102 index into the table. |
55 #ifdef EMULATED_ATOMIC_OPERATIONS | 103 |
56 #define HAVE_ALL_8_BIT_OPS | 104 Picking the lock this way insures that accesses to the same data at |
57 | 105 the same time will go to the same lock. OTOH, accesses to different |
58 #define nativeExchange8(ptr, value) () | 106 data have only a 1/32 chance of hitting the same lock. That should |
59 #define nativeCompareThenSet8(ptr, oldvalue, newvalue) () | 107 pretty much eliminate the chances of several atomic operations on |
60 #define nativeTestThenSet8(ptr) () | 108 different data from waiting on the same "big lock". If it isn't |
61 #define nativeClear8(ptr) () | 109 then the table of locks can be expanded to a new size so long as |
62 #define nativeFetchThenIncrement8(ptr) () | 110 the new size if a power of two. |
63 #define nativeFetchThenDecrement8(ptr) () | 111 */ |
64 #define nativeFetchThenAdd8(ptr, value) () | 112 |
65 #define nativeFetchThenSubtract8(ptr, value) () | 113 static SDL_SpinLock locks[32] = { |
66 #define nativeIncrementThenFetch8(ptr) () | 114 0, 0, 0, 0, 0, 0, 0, 0, |
67 #define nativeDecrementThenFetch8(ptr) () | 115 0, 0, 0, 0, 0, 0, 0, 0, |
68 #define nativeAddThenFetch8(ptr, value) () | 116 0, 0, 0, 0, 0, 0, 0, 0, |
69 #define nativeSubtractThenFetch8(ptr, value) () | 117 0, 0, 0, 0, 0, 0, 0, 0, |
70 #endif | 118 }; |
71 | |
72 #ifdef EMULATED_ATOMIC_OPERATIONS | |
73 #define HAVE_ALL_16_BIT_OPS | |
74 | |
75 #define nativeExchange16(ptr, value) () | |
76 #define nativeCompareThenSet16(ptr, oldvalue, newvalue) () | |
77 #define nativeTestThenSet16(ptr) () | |
78 #define nativeClear16(ptr) () | |
79 #define nativeFetchThenIncrement16(ptr) () | |
80 #define nativeFetchThenDecrement16(ptr) () | |
81 #define nativeFetchThenAdd16(ptr, value) () | |
82 #define nativeFetchThenSubtract16(ptr, value) () | |
83 #define nativeIncrementThenFetch16(ptr) () | |
84 #define nativeDecrementThenFetch16(ptr) () | |
85 #define nativeAddThenFetch16(ptr, value) () | |
86 #define nativeSubtractThenFetch16(ptr, value) () | |
87 #endif | |
88 | |
89 #ifdef EMULATED_ATOMIC_OPERATIONS | |
90 #define HAVE_ALL_32_BIT_OPS | |
91 | |
92 #define nativeExchange32(ptr, value) () | |
93 #define nativeCompareThenSet32(ptr, oldvalue, newvalue) () | |
94 #define nativeTestThenSet32(ptr) () | |
95 #define nativeClear32(ptr) () | |
96 #define nativeFetchThenIncrement32(ptr) () | |
97 #define nativeFetchThenDecrement32(ptr) () | |
98 #define nativeFetchThenAdd32(ptr, value) () | |
99 #define nativeFetchThenSubtract32(ptr, value) () | |
100 #define nativeIncrementThenFetch32(ptr) () | |
101 #define nativeDecrementThenFetch32(ptr) () | |
102 #define nativeAddThenFetch32(ptr, value) () | |
103 #define nativeSubtractThenFetch32(ptr, value) () | |
104 #endif | |
105 | |
106 #ifdef EMULATED_ATOMIC_OPERATIONS | |
107 #define HAVE_ALL_64_BIT_OPS | |
108 | |
109 #define nativeExchange64(ptr, value) () | |
110 #define nativeCompareThenSet64(ptr, oldvalue, newvalue) () | |
111 #define nativeTestThenSet64(ptr) () | |
112 #define nativeClear64(ptr) () | |
113 #define nativeFetchThenIncrement64(ptr) () | |
114 #define nativeFetchThenDecrement64(ptr) () | |
115 #define nativeFetchThenAdd64(ptr, value) () | |
116 #define nativeFetchThenSubtract64(ptr, value) () | |
117 #define nativeIncrementThenFetch64(ptr) () | |
118 #define nativeDecrementThenFetch64(ptr) () | |
119 #define nativeAddThenFetch64(ptr, value) () | |
120 #define nativeSubtractThenFetch64(ptr, value) () | |
121 #endif | |
122 | |
123 /* | |
124 If any of the operations are not provided then we must emulate some of | |
125 them. | |
126 */ | |
127 | |
128 #if !defined(HAVE_ALL_8_BIT_OPS) || !defined(HAVE_ALL_16_BIT_OPS) || !defined(HAVE_ALL_32_BIT_OPS) || !defined(HAVE_ALL_64_BIT_OPS) | |
129 | |
130 #include "SDL_mutex.h" | |
131 #include "SDL_error.h" | |
132 | |
133 static SDL_mutex * lock = NULL; | |
134 | 119 |
135 static __inline__ void | 120 static __inline__ void |
136 privateWaitLock() | 121 privateWaitLock(volatile void *ptr) |
137 { | 122 { |
138 if(NULL == lock) | 123 #if SIZEOF_VOIDP == 4 |
139 { | 124 Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f); |
140 lock = SDL_CreateMutex(); | 125 #elif SIZEOF_VOIDP == 8 |
141 if (NULL == lock) | 126 Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f); |
142 { | 127 #endif |
143 SDL_SetError("SDL_atomic.c: can't create a mutex"); | 128 |
144 return; | 129 SDL_AtomicLock(&locks[index]); |
145 } | |
146 } | |
147 | |
148 if (-1 == SDL_LockMutex(lock)) | |
149 { | |
150 SDL_SetError("SDL_atomic.c: can't lock mutex"); | |
151 } | |
152 } | 130 } |
153 | 131 |
154 static __inline__ void | 132 static __inline__ void |
155 privateUnlock() | 133 privateUnlock(volatile void *ptr) |
156 { | 134 { |
157 if (-1 == SDL_UnlockMutex(lock)) | 135 #if SIZEOF_VOIDP == 4 |
158 { | 136 Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f); |
159 SDL_SetError("SDL_atomic.c: can't unlock mutex"); | 137 #elif SIZEOF_VOIDP == 8 |
160 } | 138 Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f); |
161 } | 139 #endif |
162 | 140 |
163 #endif | 141 SDL_AtomicUnlock(&locks[index]); |
164 | 142 } |
165 /* 8 bit atomic operations */ | 143 |
166 | 144 /* 32 bit atomic operations */ |
167 Uint8 | |
168 SDL_AtomicExchange8(volatile Uint8 * ptr, Uint8 value) | |
169 { | |
170 #ifdef nativeExchange8 | |
171 return nativeExchange8(ptr, value); | |
172 #else | |
173 Uint8 tmp = 0; | |
174 | |
175 privateWaitLock(); | |
176 tmp = *ptr; | |
177 *ptr = value; | |
178 privateUnlock(); | |
179 | |
180 return tmp; | |
181 #endif | |
182 } | |
183 | 145 |
184 SDL_bool | 146 SDL_bool |
185 SDL_AtomicCompareThenSet8(volatile Uint8 * ptr, Uint8 oldvalue, Uint8 newvalue) | 147 SDL_AtomicTestThenSet32(volatile Uint32 * ptr) |
186 { | 148 { |
187 #ifdef nativeCompareThenSet8 | 149 #ifdef nativeTestThenSet32 |
188 return (SDL_bool)nativeCompareThenSet8(ptr, oldvalue, newvalue); | |
189 #else | 150 #else |
190 SDL_bool result = SDL_FALSE; | 151 SDL_bool result = SDL_FALSE; |
191 | 152 |
192 privateWaitLock(); | 153 privateWaitLock(ptr); |
193 result = (*ptr == oldvalue); | |
194 if (result) | |
195 { | |
196 *ptr = newvalue; | |
197 } | |
198 privateUnlock(); | |
199 | |
200 return result; | |
201 #endif | |
202 } | |
203 | |
204 SDL_bool | |
205 SDL_AtomicTestThenSet8(volatile Uint8 * ptr) | |
206 { | |
207 #ifdef nativeTestThenSet8 | |
208 return (SDL_bool)nativeTestThenSet8(ptr); | |
209 #else | |
210 SDL_bool result = SDL_FALSE; | |
211 | |
212 privateWaitLock(); | |
213 result = (*ptr == 0); | 154 result = (*ptr == 0); |
214 if (result) | 155 if (result) |
215 { | 156 { |
216 *ptr = 1; | 157 *ptr = 1; |
217 } | 158 } |
218 privateUnlock(); | 159 privateUnlock(ptr); |
219 | 160 |
220 return result; | 161 return result; |
221 #endif | 162 #endif |
222 } | 163 } |
223 | 164 |
224 void | 165 void |
225 SDL_AtomicClear8(volatile Uint8 * ptr) | 166 SDL_AtomicClear32(volatile Uint32 * ptr) |
226 { | 167 { |
227 #ifdef nativeClear8 | 168 #ifdef nativeClear32 |
228 nativeClear8(ptr); | 169 #else |
229 #else | 170 privateWaitLock(ptr); |
230 privateWaitLock(); | |
231 *ptr = 0; | 171 *ptr = 0; |
232 privateUnlock(); | 172 privateUnlock(ptr); |
233 | 173 |
234 return; | 174 return; |
235 #endif | 175 #endif |
236 } | 176 } |
237 | 177 |
238 Uint8 | 178 Uint32 |
239 SDL_AtomicFetchThenIncrement8(volatile Uint8 * ptr) | 179 SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr) |
240 { | 180 { |
241 #ifdef nativeFetchThenIncrement8 | 181 #ifdef nativeFetchThenIncrement32 |
242 return nativeFetchThenIncrement8(ptr); | 182 #else |
243 #else | 183 Uint32 tmp = 0; |
244 Uint8 tmp = 0; | 184 |
245 | 185 privateWaitLock(ptr); |
246 privateWaitLock(); | |
247 tmp = *ptr; | 186 tmp = *ptr; |
248 (*ptr)+= 1; | 187 (*ptr)+= 1; |
249 privateUnlock(); | 188 privateUnlock(ptr); |
250 | 189 |
251 return tmp; | 190 return tmp; |
252 #endif | 191 #endif |
253 } | 192 } |
254 | 193 |
255 Uint8 | 194 Uint32 |
256 SDL_AtomicFetchThenDecrement8(volatile Uint8 * ptr) | 195 SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr) |
257 { | 196 { |
258 #ifdef nativeFetchThenDecrement8 | 197 #ifdef nativeFetchThenDecrement32 |
259 return nativeFetchThenDecrement8(ptr); | 198 #else |
260 #else | 199 Uint32 tmp = 0; |
261 Uint8 tmp = 0; | 200 |
262 | 201 privateWaitLock(ptr); |
263 privateWaitLock(); | |
264 tmp = *ptr; | 202 tmp = *ptr; |
265 (*ptr) -= 1; | 203 (*ptr) -= 1; |
266 privateUnlock(); | 204 privateUnlock(ptr); |
267 | 205 |
268 return tmp; | 206 return tmp; |
269 #endif | 207 #endif |
270 } | 208 } |
271 | 209 |
272 Uint8 | 210 Uint32 |
273 SDL_AtomicFetchThenAdd8(volatile Uint8 * ptr, Uint8 value) | 211 SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value) |
274 { | 212 { |
275 #ifdef nativeFetchThenAdd8 | 213 #ifdef nativeFetchThenAdd32 |
276 return nativeFetchThenAdd8(ptr, value); | 214 #else |
277 #else | 215 Uint32 tmp = 0; |
278 Uint8 tmp = 0; | 216 |
279 | 217 privateWaitLock(ptr); |
280 privateWaitLock(); | |
281 tmp = *ptr; | 218 tmp = *ptr; |
282 (*ptr)+= value; | 219 (*ptr)+= value; |
283 privateUnlock(); | 220 privateUnlock(ptr); |
284 | 221 |
285 return tmp; | 222 return tmp; |
286 #endif | 223 #endif |
287 } | 224 } |
288 | 225 |
289 Uint8 | 226 Uint32 |
290 SDL_AtomicFetchThenSubtract8(volatile Uint8 * ptr, Uint8 value) | 227 SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value) |
291 { | 228 { |
292 #ifdef nativeFetchThenSubtract8 | 229 #ifdef nativeFetchThenSubtract32 |
293 return nativeFetchThenSubtract8(ptr, value); | 230 #else |
294 #else | 231 Uint32 tmp = 0; |
295 Uint8 tmp = 0; | 232 |
296 | 233 privateWaitLock(ptr); |
297 privateWaitLock(); | |
298 tmp = *ptr; | 234 tmp = *ptr; |
299 (*ptr)-= value; | 235 (*ptr)-= value; |
300 privateUnlock(); | 236 privateUnlock(ptr); |
301 | 237 |
302 return tmp; | 238 return tmp; |
303 #endif | 239 #endif |
304 } | 240 } |
305 | 241 |
306 Uint8 | 242 Uint32 |
307 SDL_AtomicIncrementThenFetch8(volatile Uint8 * ptr) | 243 SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr) |
308 { | 244 { |
309 #ifdef nativeIncrementThenFetch8 | 245 #ifdef nativeIncrementThenFetch32 |
310 return nativeIncrementThenFetch8(ptr); | 246 #else |
311 #else | 247 Uint32 tmp = 0; |
312 Uint8 tmp = 0; | 248 |
313 | 249 privateWaitLock(ptr); |
314 privateWaitLock(); | |
315 (*ptr)+= 1; | 250 (*ptr)+= 1; |
316 tmp = *ptr; | 251 tmp = *ptr; |
317 privateUnlock(); | 252 privateUnlock(ptr); |
318 | 253 |
319 return tmp; | 254 return tmp; |
320 #endif | 255 #endif |
321 } | 256 } |
322 | 257 |
323 Uint8 | 258 Uint32 |
324 SDL_AtomicDecrementThenFetch8(volatile Uint8 * ptr) | 259 SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr) |
325 { | 260 { |
326 #ifdef nativeDecrementThenFetch8 | 261 #ifdef nativeDecrementThenFetch32 |
327 return nativeDecrementThenFetch8(ptr); | 262 #else |
328 #else | 263 Uint32 tmp = 0; |
329 Uint8 tmp = 0; | 264 |
330 | 265 privateWaitLock(ptr); |
331 privateWaitLock(); | |
332 (*ptr)-= 1; | 266 (*ptr)-= 1; |
333 tmp = *ptr; | 267 tmp = *ptr; |
334 privateUnlock(); | 268 privateUnlock(ptr); |
335 | 269 |
336 return tmp; | 270 return tmp; |
337 #endif | 271 #endif |
338 } | 272 } |
339 | 273 |
340 Uint8 | 274 Uint32 |
341 SDL_AtomicAddThenFetch8(volatile Uint8 * ptr, Uint8 value) | 275 SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value) |
342 { | 276 { |
343 #ifdef nativeAddThenFetch8 | 277 #ifdef nativeAddThenFetch32 |
344 return nativeAddThenFetch8(ptr, value); | 278 #else |
345 #else | 279 Uint32 tmp = 0; |
346 Uint8 tmp = 0; | 280 |
347 | 281 privateWaitLock(ptr); |
348 privateWaitLock(); | |
349 (*ptr)+= value; | 282 (*ptr)+= value; |
350 tmp = *ptr; | 283 tmp = *ptr; |
351 privateUnlock(); | 284 privateUnlock(ptr); |
352 | 285 |
353 return tmp; | 286 return tmp; |
354 #endif | 287 #endif |
355 } | 288 } |
356 | 289 |
357 Uint8 | 290 Uint32 |
358 SDL_AtomicSubtractThenFetch8(volatile Uint8 * ptr, Uint8 value) | 291 SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value) |
359 { | 292 { |
360 #ifdef nativeSubtractThenFetch8 | 293 #ifdef nativeSubtractThenFetch32 |
361 return nativeSubtractThenFetch8(ptr, value); | 294 #else |
362 #else | 295 Uint32 tmp = 0; |
363 Uint8 tmp = 0; | 296 |
364 | 297 privateWaitLock(ptr); |
365 privateWaitLock(); | |
366 (*ptr)-= value; | 298 (*ptr)-= value; |
367 tmp = *ptr; | 299 tmp = *ptr; |
368 privateUnlock(); | 300 privateUnlock(ptr); |
369 | 301 |
370 return tmp; | 302 return tmp; |
371 #endif | 303 #endif |
372 } | 304 } |
373 | 305 |
374 /* 16 bit atomic operations */ | 306 /* 64 bit atomic operations */ |
375 | 307 #ifdef SDL_HAS_64BIT_TYPE |
376 Uint16 | |
377 SDL_AtomicExchange16(volatile Uint16 * ptr, Uint16 value) | |
378 { | |
379 #ifdef nativeExchange16 | |
380 return nativeExchange16(ptr, value); | |
381 #else | |
382 Uint16 tmp = 0; | |
383 | |
384 privateWaitLock(); | |
385 tmp = *ptr; | |
386 *ptr = value; | |
387 privateUnlock(); | |
388 | |
389 return tmp; | |
390 #endif | |
391 } | |
392 | 308 |
393 SDL_bool | 309 SDL_bool |
394 SDL_AtomicCompareThenSet16(volatile Uint16 * ptr, Uint16 oldvalue, Uint16 newvalue) | 310 SDL_AtomicTestThenSet64(volatile Uint64 * ptr) |
395 { | 311 { |
396 #ifdef nativeCompareThenSet16 | 312 #ifdef nativeTestThenSet64 |
397 return (SDL_bool)nativeCompareThenSet16(ptr, oldvalue, newvalue); | |
398 #else | 313 #else |
399 SDL_bool result = SDL_FALSE; | 314 SDL_bool result = SDL_FALSE; |
400 | 315 |
401 privateWaitLock(); | 316 privateWaitLock(ptr); |
402 result = (*ptr == oldvalue); | |
403 if (result) | |
404 { | |
405 *ptr = newvalue; | |
406 } | |
407 privateUnlock(); | |
408 | |
409 return result; | |
410 #endif | |
411 } | |
412 | |
413 SDL_bool | |
414 SDL_AtomicTestThenSet16(volatile Uint16 * ptr) | |
415 { | |
416 #ifdef nativeTestThenSet16 | |
417 return (SDL_bool)nativeTestThenSet16(ptr); | |
418 #else | |
419 SDL_bool result = SDL_FALSE; | |
420 | |
421 privateWaitLock(); | |
422 result = (*ptr == 0); | 317 result = (*ptr == 0); |
423 if (result) | 318 if (result) |
424 { | 319 { |
425 *ptr = 1; | 320 *ptr = 1; |
426 } | 321 } |
427 privateUnlock(); | 322 privateUnlock(ptr); |
428 | |
429 return result; | |
430 #endif | |
431 } | |
432 | |
433 void | |
434 SDL_AtomicClear16(volatile Uint16 * ptr) | |
435 { | |
436 #ifdef nativeClear16 | |
437 nativeClear16(ptr); | |
438 #else | |
439 privateWaitLock(); | |
440 *ptr = 0; | |
441 privateUnlock(); | |
442 | |
443 return; | |
444 #endif | |
445 } | |
446 | |
447 Uint16 | |
448 SDL_AtomicFetchThenIncrement16(volatile Uint16 * ptr) | |
449 { | |
450 #ifdef nativeFetchThenIncrement16 | |
451 return nativeFetchThenIncrement16(ptr); | |
452 #else | |
453 Uint16 tmp = 0; | |
454 | |
455 privateWaitLock(); | |
456 tmp = *ptr; | |
457 (*ptr)+= 1; | |
458 privateUnlock(); | |
459 | |
460 return tmp; | |
461 #endif | |
462 } | |
463 | |
464 Uint16 | |
465 SDL_AtomicFetchThenDecrement16(volatile Uint16 * ptr) | |
466 { | |
467 #ifdef nativeFetchThenDecrement16 | |
468 return nativeFetchThenDecrement16(ptr); | |
469 #else | |
470 Uint16 tmp = 0; | |
471 | |
472 privateWaitLock(); | |
473 tmp = *ptr; | |
474 (*ptr) -= 1; | |
475 privateUnlock(); | |
476 | |
477 return tmp; | |
478 #endif | |
479 } | |
480 | |
481 Uint16 | |
482 SDL_AtomicFetchThenAdd16(volatile Uint16 * ptr, Uint16 value) | |
483 { | |
484 #ifdef nativeFetchThenAdd16 | |
485 return nativeFetchThenAdd16(ptr, value); | |
486 #else | |
487 Uint16 tmp = 0; | |
488 | |
489 privateWaitLock(); | |
490 tmp = *ptr; | |
491 (*ptr)+= value; | |
492 privateUnlock(); | |
493 | |
494 return tmp; | |
495 #endif | |
496 } | |
497 | |
498 Uint16 | |
499 SDL_AtomicFetchThenSubtract16(volatile Uint16 * ptr, Uint16 value) | |
500 { | |
501 #ifdef nativeFetchThenSubtract16 | |
502 return nativeFetchThenSubtract16(ptr, value); | |
503 #else | |
504 Uint16 tmp = 0; | |
505 | |
506 privateWaitLock(); | |
507 tmp = *ptr; | |
508 (*ptr)-= value; | |
509 privateUnlock(); | |
510 | |
511 return tmp; | |
512 #endif | |
513 } | |
514 | |
515 Uint16 | |
516 SDL_AtomicIncrementThenFetch16(volatile Uint16 * ptr) | |
517 { | |
518 #ifdef nativeIncrementThenFetch16 | |
519 return nativeIncrementThenFetch16(ptr); | |
520 #else | |
521 Uint16 tmp = 0; | |
522 | |
523 privateWaitLock(); | |
524 (*ptr)+= 1; | |
525 tmp = *ptr; | |
526 privateUnlock(); | |
527 | |
528 return tmp; | |
529 #endif | |
530 } | |
531 | |
532 Uint16 | |
533 SDL_AtomicDecrementThenFetch16(volatile Uint16 * ptr) | |
534 { | |
535 #ifdef nativeDecrementThenFetch16 | |
536 return nativeDecrementThenFetch16(ptr); | |
537 #else | |
538 Uint16 tmp = 0; | |
539 | |
540 privateWaitLock(); | |
541 (*ptr)-= 1; | |
542 tmp = *ptr; | |
543 privateUnlock(); | |
544 | |
545 return tmp; | |
546 #endif | |
547 } | |
548 | |
549 Uint16 | |
550 SDL_AtomicAddThenFetch16(volatile Uint16 * ptr, Uint16 value) | |
551 { | |
552 #ifdef nativeAddThenFetch16 | |
553 return nativeAddThenFetch16(ptr, value); | |
554 #else | |
555 Uint16 tmp = 0; | |
556 | |
557 privateWaitLock(); | |
558 (*ptr)+= value; | |
559 tmp = *ptr; | |
560 privateUnlock(); | |
561 | |
562 return tmp; | |
563 #endif | |
564 } | |
565 | |
566 Uint16 | |
567 SDL_AtomicSubtractThenFetch16(volatile Uint16 * ptr, Uint16 value) | |
568 { | |
569 #ifdef nativeSubtractThenFetch16 | |
570 return nativeSubtractThenFetch16(ptr, value); | |
571 #else | |
572 Uint16 tmp = 0; | |
573 | |
574 privateWaitLock(); | |
575 (*ptr)-= value; | |
576 tmp = *ptr; | |
577 privateUnlock(); | |
578 | |
579 return tmp; | |
580 #endif | |
581 } | |
582 | |
583 /* 32 bit atomic operations */ | |
584 | |
585 Uint32 | |
586 SDL_AtomicExchange32(volatile Uint32 * ptr, Uint32 value) | |
587 { | |
588 #ifdef nativeExchange32 | |
589 return nativeExchange32(ptr, value); | |
590 #else | |
591 Uint32 tmp = 0; | |
592 | |
593 privateWaitLock(); | |
594 tmp = *ptr; | |
595 *ptr = value; | |
596 privateUnlock(); | |
597 | |
598 return tmp; | |
599 #endif | |
600 } | |
601 | |
602 SDL_bool | |
603 SDL_AtomicCompareThenSet32(volatile Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue) | |
604 { | |
605 #ifdef nativeCompareThenSet32 | |
606 return (SDL_bool)nativeCompareThenSet32(ptr, oldvalue, newvalue); | |
607 #else | |
608 SDL_bool result = SDL_FALSE; | |
609 | |
610 privateWaitLock(); | |
611 result = (*ptr == oldvalue); | |
612 if (result) | |
613 { | |
614 *ptr = newvalue; | |
615 } | |
616 privateUnlock(); | |
617 | |
618 return result; | |
619 #endif | |
620 } | |
621 | |
622 SDL_bool | |
623 SDL_AtomicTestThenSet32(volatile Uint32 * ptr) | |
624 { | |
625 #ifdef nativeTestThenSet32 | |
626 return (SDL_bool)nativeTestThenSet32(ptr); | |
627 #else | |
628 SDL_bool result = SDL_FALSE; | |
629 | |
630 privateWaitLock(); | |
631 result = (*ptr == 0); | |
632 if (result) | |
633 { | |
634 *ptr = 1; | |
635 } | |
636 privateUnlock(); | |
637 | |
638 return result; | |
639 #endif | |
640 } | |
641 | |
642 void | |
643 SDL_AtomicClear32(volatile Uint32 * ptr) | |
644 { | |
645 #ifdef nativeClear32 | |
646 nativeClear32(ptr); | |
647 #else | |
648 privateWaitLock(); | |
649 *ptr = 0; | |
650 privateUnlock(); | |
651 | |
652 return; | |
653 #endif | |
654 } | |
655 | |
656 Uint32 | |
657 SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr) | |
658 { | |
659 #ifdef nativeFetchThenIncrement32 | |
660 return nativeFetchThenIncrement32(ptr); | |
661 #else | |
662 Uint32 tmp = 0; | |
663 | |
664 privateWaitLock(); | |
665 tmp = *ptr; | |
666 (*ptr)+= 1; | |
667 privateUnlock(); | |
668 | |
669 return tmp; | |
670 #endif | |
671 } | |
672 | |
673 Uint32 | |
674 SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr) | |
675 { | |
676 #ifdef nativeFetchThenDecrement32 | |
677 return nativeFetchThenDecrement32(ptr); | |
678 #else | |
679 Uint32 tmp = 0; | |
680 | |
681 privateWaitLock(); | |
682 tmp = *ptr; | |
683 (*ptr) -= 1; | |
684 privateUnlock(); | |
685 | |
686 return tmp; | |
687 #endif | |
688 } | |
689 | |
690 Uint32 | |
691 SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value) | |
692 { | |
693 #ifdef nativeFetchThenAdd32 | |
694 return nativeFetchThenAdd32(ptr, value); | |
695 #else | |
696 Uint32 tmp = 0; | |
697 | |
698 privateWaitLock(); | |
699 tmp = *ptr; | |
700 (*ptr)+= value; | |
701 privateUnlock(); | |
702 | |
703 return tmp; | |
704 #endif | |
705 } | |
706 | |
707 Uint32 | |
708 SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value) | |
709 { | |
710 #ifdef nativeFetchThenSubtract32 | |
711 return nativeFetchThenSubtract32(ptr, value); | |
712 #else | |
713 Uint32 tmp = 0; | |
714 | |
715 privateWaitLock(); | |
716 tmp = *ptr; | |
717 (*ptr)-= value; | |
718 privateUnlock(); | |
719 | |
720 return tmp; | |
721 #endif | |
722 } | |
723 | |
724 Uint32 | |
725 SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr) | |
726 { | |
727 #ifdef nativeIncrementThenFetch32 | |
728 return nativeIncrementThenFetch32(ptr); | |
729 #else | |
730 Uint32 tmp = 0; | |
731 | |
732 privateWaitLock(); | |
733 (*ptr)+= 1; | |
734 tmp = *ptr; | |
735 privateUnlock(); | |
736 | |
737 return tmp; | |
738 #endif | |
739 } | |
740 | |
741 Uint32 | |
742 SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr) | |
743 { | |
744 #ifdef nativeDecrementThenFetch32 | |
745 return nativeDecrementThenFetch32(ptr); | |
746 #else | |
747 Uint32 tmp = 0; | |
748 | |
749 privateWaitLock(); | |
750 (*ptr)-= 1; | |
751 tmp = *ptr; | |
752 privateUnlock(); | |
753 | |
754 return tmp; | |
755 #endif | |
756 } | |
757 | |
758 Uint32 | |
759 SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value) | |
760 { | |
761 #ifdef nativeAddThenFetch32 | |
762 return nativeAddThenFetch32(ptr, value); | |
763 #else | |
764 Uint32 tmp = 0; | |
765 | |
766 privateWaitLock(); | |
767 (*ptr)+= value; | |
768 tmp = *ptr; | |
769 privateUnlock(); | |
770 | |
771 return tmp; | |
772 #endif | |
773 } | |
774 | |
775 Uint32 | |
776 SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value) | |
777 { | |
778 #ifdef nativeSubtractThenFetch32 | |
779 return nativeSubtractThenFetch32(ptr, value); | |
780 #else | |
781 Uint32 tmp = 0; | |
782 | |
783 privateWaitLock(); | |
784 (*ptr)-= value; | |
785 tmp = *ptr; | |
786 privateUnlock(); | |
787 | |
788 return tmp; | |
789 #endif | |
790 } | |
791 | |
792 /* 64 bit atomic operations */ | |
793 #ifdef SDL_HAS_64BIT_TYPE | |
794 | |
795 Uint64 | |
796 SDL_AtomicExchange64(volatile Uint64 * ptr, Uint64 value) | |
797 { | |
798 #ifdef nativeExchange64 | |
799 return nativeExchange64(ptr, value); | |
800 #else | |
801 Uint64 tmp = 0; | |
802 | |
803 privateWaitLock(); | |
804 tmp = *ptr; | |
805 *ptr = value; | |
806 privateUnlock(); | |
807 | |
808 return tmp; | |
809 #endif | |
810 } | |
811 | |
812 SDL_bool | |
813 SDL_AtomicCompareThenSet64(volatile Uint64 * ptr, Uint64 oldvalue, Uint64 newvalue) | |
814 { | |
815 #ifdef nativeCompareThenSet64 | |
816 return (SDL_bool)nativeCompareThenSet64(ptr, oldvalue, newvalue); | |
817 #else | |
818 SDL_bool result = SDL_FALSE; | |
819 | |
820 privateWaitLock(); | |
821 result = (*ptr == oldvalue); | |
822 if (result) | |
823 { | |
824 *ptr = newvalue; | |
825 } | |
826 privateUnlock(); | |
827 | |
828 return result; | |
829 #endif | |
830 } | |
831 | |
832 SDL_bool | |
833 SDL_AtomicTestThenSet64(volatile Uint64 * ptr) | |
834 { | |
835 #ifdef nativeTestThenSet64 | |
836 return (SDL_bool)nativeTestThenSet64(ptr); | |
837 #else | |
838 SDL_bool result = SDL_FALSE; | |
839 | |
840 privateWaitLock(); | |
841 result = (*ptr == 0); | |
842 if (result) | |
843 { | |
844 *ptr = 1; | |
845 } | |
846 privateUnlock(); | |
847 | 323 |
848 return result; | 324 return result; |
849 #endif | 325 #endif |
850 } | 326 } |
851 | 327 |
852 void | 328 void |
853 SDL_AtomicClear64(volatile Uint64 * ptr) | 329 SDL_AtomicClear64(volatile Uint64 * ptr) |
854 { | 330 { |
855 #ifdef nativeClear64 | 331 #ifdef nativeClear64 |
856 nativeClear64(ptr); | 332 #else |
857 #else | 333 privateWaitLock(ptr); |
858 privateWaitLock(); | |
859 *ptr = 0; | 334 *ptr = 0; |
860 privateUnlock(); | 335 privateUnlock(ptr); |
861 | 336 |
862 return; | 337 return; |
863 #endif | 338 #endif |
864 } | 339 } |
865 | 340 |
866 Uint64 | 341 Uint64 |
867 SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr) | 342 SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr) |
868 { | 343 { |
869 #ifdef nativeFetchThenIncrement64 | 344 #ifdef nativeFetchThenIncrement64 |
870 return nativeFetchThenIncrement64(ptr); | 345 #else |
871 #else | 346 Uint64 tmp = 0; |
872 Uint64 tmp = 0; | 347 |
873 | 348 privateWaitLock(ptr); |
874 privateWaitLock(); | |
875 tmp = *ptr; | 349 tmp = *ptr; |
876 (*ptr)+= 1; | 350 (*ptr)+= 1; |
877 privateUnlock(); | 351 privateUnlock(ptr); |
878 | 352 |
879 return tmp; | 353 return tmp; |
880 #endif | 354 #endif |
881 } | 355 } |
882 | 356 |
883 Uint64 | 357 Uint64 |
884 SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr) | 358 SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr) |
885 { | 359 { |
886 #ifdef nativeFetchThenDecrement64 | 360 #ifdef nativeFetchThenDecrement64 |
887 return nativeFetchThenDecrement64(ptr); | 361 #else |
888 #else | 362 Uint64 tmp = 0; |
889 Uint64 tmp = 0; | 363 |
890 | 364 privateWaitLock(ptr); |
891 privateWaitLock(); | |
892 tmp = *ptr; | 365 tmp = *ptr; |
893 (*ptr) -= 1; | 366 (*ptr) -= 1; |
894 privateUnlock(); | 367 privateUnlock(ptr); |
895 | 368 |
896 return tmp; | 369 return tmp; |
897 #endif | 370 #endif |
898 } | 371 } |
899 | 372 |
900 Uint64 | 373 Uint64 |
901 SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value) | 374 SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value) |
902 { | 375 { |
903 #ifdef nativeFetchThenAdd64 | 376 #ifdef nativeFetchThenAdd64 |
904 return nativeFetchThenAdd64(ptr, value); | 377 #else |
905 #else | 378 Uint64 tmp = 0; |
906 Uint64 tmp = 0; | 379 |
907 | 380 privateWaitLock(ptr); |
908 privateWaitLock(); | |
909 tmp = *ptr; | 381 tmp = *ptr; |
910 (*ptr)+= value; | 382 (*ptr)+= value; |
911 privateUnlock(); | 383 privateUnlock(ptr); |
912 | 384 |
913 return tmp; | 385 return tmp; |
914 #endif | 386 #endif |
915 } | 387 } |
916 | 388 |
917 Uint64 | 389 Uint64 |
918 SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value) | 390 SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value) |
919 { | 391 { |
920 #ifdef nativeFetchThenSubtract64 | 392 #ifdef nativeFetchThenSubtract64 |
921 return nativeFetchThenSubtract64(ptr, value); | 393 #else |
922 #else | 394 Uint64 tmp = 0; |
923 Uint64 tmp = 0; | 395 |
924 | 396 privateWaitLock(ptr); |
925 privateWaitLock(); | |
926 tmp = *ptr; | 397 tmp = *ptr; |
927 (*ptr)-= value; | 398 (*ptr)-= value; |
928 privateUnlock(); | 399 privateUnlock(ptr); |
929 | 400 |
930 return tmp; | 401 return tmp; |
931 #endif | 402 #endif |
932 } | 403 } |
933 | 404 |
934 Uint64 | 405 Uint64 |
935 SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr) | 406 SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr) |
936 { | 407 { |
937 #ifdef nativeIncrementThenFetch64 | 408 #ifdef nativeIncrementThenFetch64 |
938 return nativeIncrementThenFetch64(ptr); | 409 #else |
939 #else | 410 Uint64 tmp = 0; |
940 Uint64 tmp = 0; | 411 |
941 | 412 privateWaitLock(ptr); |
942 privateWaitLock(); | |
943 (*ptr)+= 1; | 413 (*ptr)+= 1; |
944 tmp = *ptr; | 414 tmp = *ptr; |
945 privateUnlock(); | 415 privateUnlock(ptr); |
946 | 416 |
947 return tmp; | 417 return tmp; |
948 #endif | 418 #endif |
949 } | 419 } |
950 | 420 |
951 Uint64 | 421 Uint64 |
952 SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr) | 422 SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr) |
953 { | 423 { |
954 #ifdef nativeDecrementThenFetch64 | 424 #ifdef nativeDecrementThenFetch64 |
955 return nativeDecrementThenFetch64(ptr); | 425 #else |
956 #else | 426 Uint64 tmp = 0; |
957 Uint64 tmp = 0; | 427 |
958 | 428 privateWaitLock(ptr); |
959 privateWaitLock(); | |
960 (*ptr)-= 1; | 429 (*ptr)-= 1; |
961 tmp = *ptr; | 430 tmp = *ptr; |
962 privateUnlock(); | 431 privateUnlock(ptr); |
963 | 432 |
964 return tmp; | 433 return tmp; |
965 #endif | 434 #endif |
966 } | 435 } |
967 | 436 |
968 Uint64 | 437 Uint64 |
969 SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value) | 438 SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value) |
970 { | 439 { |
971 #ifdef nativeAddThenFetch64 | 440 #ifdef nativeAddThenFetch64 |
972 return nativeAddThenFetch64(ptr, value); | 441 #else |
973 #else | 442 Uint64 tmp = 0; |
974 Uint64 tmp = 0; | 443 |
975 | 444 privateWaitLock(ptr); |
976 privateWaitLock(); | |
977 (*ptr)+= value; | 445 (*ptr)+= value; |
978 tmp = *ptr; | 446 tmp = *ptr; |
979 privateUnlock(); | 447 privateUnlock(ptr); |
980 | 448 |
981 return tmp; | 449 return tmp; |
982 #endif | 450 #endif |
983 } | 451 } |
984 | 452 |
985 Uint64 | 453 Uint64 |
986 SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value) | 454 SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value) |
987 { | 455 { |
988 #ifdef nativeSubtractThenFetch64 | 456 #ifdef nativeSubtractThenFetch64 |
989 return nativeSubtractThenFetch64(ptr, value); | 457 #else |
990 #else | 458 Uint64 tmp = 0; |
991 Uint64 tmp = 0; | 459 |
992 | 460 privateWaitLock(ptr); |
993 privateWaitLock(); | |
994 (*ptr)-= value; | 461 (*ptr)-= value; |
995 tmp = *ptr; | 462 tmp = *ptr; |
996 privateUnlock(); | 463 privateUnlock(ptr); |
997 | 464 |
998 return tmp; | 465 return tmp; |
999 #endif | 466 #endif |
1000 } | 467 } |
1001 #endif | 468 #endif |
1002 | 469 |