Mercurial > sdl-ios-xcode
comparison src/atomic/qnx/SDL_atomic.c @ 3364:70bfe3337f8a
Support for the atomic operations for ARM, PPC, MIPS, SH, X86 platforms has been added.
author | Mike Gorchak <lestat@i.com.ua> |
---|---|
date | Sat, 10 Oct 2009 08:06:18 +0000 |
parents | 26ce0b98f2fb |
children | f7b03b6838cb |
comparison
equal
deleted
inserted
replaced
3363:90aec03bf9fd | 3364:70bfe3337f8a |
---|---|
16 License along with this library; if not, write to the Free Software | 16 License along with this library; if not, write to the Free Software |
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
18 | 18 |
19 Sam Lantinga | 19 Sam Lantinga |
20 slouken@libsdl.org | 20 slouken@libsdl.org |
21 | |
22 QNX native atomic operations | |
23 Copyright (C) 2009 Mike Gorchak | |
24 (mike@malva.ua, lestat@i.com.ua) | |
21 */ | 25 */ |
22 | 26 |
23 #include "SDL_stdinc.h" | 27 #include "SDL_stdinc.h" |
24 #include "SDL_atomic.h" | 28 #include "SDL_atomic.h" |
29 #include "SDL_error.h" | |
25 | 30 |
26 #include <atomic.h> | 31 #include <atomic.h> |
27 | 32 |
33 /* SMP Exchange for PPC platform */ | |
34 #ifdef __PPC__ | |
35 #include <ppc/smpxchg.h> | |
36 #endif /* __PPC__ */ | |
37 | |
38 /* SMP Exchange for ARM platform */ | |
39 #ifdef __ARM__ | |
40 #include <arm/smpxchg.h> | |
41 #endif /* __ARM__ */ | |
42 | |
43 /* SMP Exchange for MIPS platform */ | |
44 #if defined (__MIPSEB__) || defined(__MIPSEL__) | |
45 #include <mips/smpxchg.h> | |
46 #endif /* __MIPSEB__ || __MIPSEL__ */ | |
47 | |
48 /* SMP Exchange for SH platform */ | |
49 #ifdef __SH__ | |
50 #include <sh/smpxchg.h> | |
51 #endif /* __SH__ */ | |
52 | |
53 /* SMP Exchange for x86 platform */ | |
54 #ifdef __X86__ | |
55 #include <x86/smpxchg.h> | |
56 #endif /* __X86__ */ | |
57 | |
28 /* | 58 /* |
29 This file provides 8, 16, 32, and 64 bit atomic operations. If the | 59 This file provides 32, and 64 bit atomic operations. If the |
30 operations are provided by the native hardware and operating system | 60 operations are provided by the native hardware and operating system |
31 they are used. If they are not then the operations are emulated | 61 they are used. If they are not then the operations are emulated |
32 using the SDL mutex operations. | 62 using the SDL spin lock operations. If spin lock can not be |
33 */ | 63 implemented then these functions must fail. |
34 | |
35 /* | |
36 First, detect whether the operations are supported and create | |
37 #defines that indicate that they do exist. The goal is to have all | |
38 the system dependent code in the top part of the file so that the | |
39 bottom can be use unchanged across all platforms. | |
40 | |
41 Second, #define all the operations in each size class that are | |
42 supported. Doing this allows supported operations to be used along | |
43 side of emulated operations. | |
44 */ | 64 */ |
45 | 65 |
46 /* | 66 void |
47 Emmulated version. | 67 SDL_AtomicLock(SDL_SpinLock *lock) |
48 | 68 { |
49 Assume there is no support for atomic operations. All such | 69 unsigned volatile* l = (unsigned volatile*)lock; |
50 operations are implemented using SDL mutex operations. | 70 Uint32 oldval = 0; |
51 */ | 71 Uint32 newval = 1; |
52 | 72 |
53 #ifdef EMULATED_ATOMIC_OPERATIONS | 73 oldval = _smp_xchg(l, newval); |
54 #undef EMULATED_ATOMIC_OPERATIONS | 74 while(1 == oldval) |
55 #endif | 75 { |
56 | 76 oldval = _smp_xchg(l, newval); |
57 #ifdef EMULATED_ATOMIC_OPERATIONS | 77 } |
58 #define HAVE_ALL_8_BIT_OPS | 78 } |
59 | 79 |
60 #define nativeExchange8(ptr, value) () | 80 void |
61 #define nativeCompareThenSet8(ptr, oldvalue, newvalue) () | 81 SDL_AtomicUnlock(SDL_SpinLock *lock) |
62 #define nativeTestThenSet8(ptr) () | 82 { |
63 #define nativeClear8(ptr) () | 83 unsigned volatile* l = (unsigned volatile*)lock; |
64 #define nativeFetchThenIncrement8(ptr) () | 84 Uint32 newval = 0; |
65 #define nativeFetchThenDecrement8(ptr) () | 85 |
66 #define nativeFetchThenAdd8(ptr, value) () | 86 _smp_xchg(l, newval); |
67 #define nativeFetchThenSubtract8(ptr, value) () | 87 } |
68 #define nativeIncrementThenFetch8(ptr) () | |
69 #define nativeDecrementThenFetch8(ptr) () | |
70 #define nativeAddThenFetch8(ptr, value) () | |
71 #define nativeSubtractThenFetch8(ptr, value) () | |
72 #endif | |
73 | |
74 #ifdef EMULATED_ATOMIC_OPERATIONS | |
75 #define HAVE_ALL_16_BIT_OPS | |
76 | |
77 #define nativeExchange16(ptr, value) () | |
78 #define nativeCompareThenSet16(ptr, oldvalue, newvalue) () | |
79 #define nativeTestThenSet16(ptr) () | |
80 #define nativeClear16(ptr) () | |
81 #define nativeFetchThenIncrement16(ptr) () | |
82 #define nativeFetchThenDecrement16(ptr) () | |
83 #define nativeFetchThenAdd16(ptr, value) () | |
84 #define nativeFetchThenSubtract16(ptr, value) () | |
85 #define nativeIncrementThenFetch16(ptr) () | |
86 #define nativeDecrementThenFetch16(ptr) () | |
87 #define nativeAddThenFetch16(ptr, value) () | |
88 #define nativeSubtractThenFetch16(ptr, value) () | |
89 #endif | |
90 | |
91 #ifdef EMULATED_ATOMIC_OPERATIONS | |
92 #define HAVE_ALL_64_BIT_OPS | |
93 | |
94 #define nativeExchange64(ptr, value) () | |
95 #define nativeCompareThenSet64(ptr, oldvalue, newvalue) () | |
96 #define nativeTestThenSet64(ptr) () | |
97 #define nativeClear64(ptr) () | |
98 #define nativeFetchThenIncrement64(ptr) () | |
99 #define nativeFetchThenDecrement64(ptr) () | |
100 #define nativeFetchThenAdd64(ptr, value) () | |
101 #define nativeFetchThenSubtract64(ptr, value) () | |
102 #define nativeIncrementThenFetch64(ptr) () | |
103 #define nativeDecrementThenFetch64(ptr) () | |
104 #define nativeAddThenFetch64(ptr, value) () | |
105 #define nativeSubtractThenFetch64(ptr, value) () | |
106 #endif | |
107 | 88 |
108 /* | 89 /* |
109 If any of the operations are not provided then we must emulate some of | 90 QNX 6.4.1 supports only 32 bit atomic access |
110 them. | 91 */ |
111 */ | 92 |
112 | 93 #undef nativeTestThenSet32 |
113 #if !defined(HAVE_ALL_8_BIT_OPS) || !defined(HAVE_ALL_16_BIT_OPS) || !defined(HAVE_ALL_64_BIT_OPS) | 94 #define nativeClear32 |
114 | 95 #define nativeFetchThenIncrement32 |
115 #include "SDL_mutex.h" | 96 #define nativeFetchThenDecrement32 |
116 #include "SDL_error.h" | 97 #define nativeFetchThenAdd32 |
117 | 98 #define nativeFetchThenSubtract32 |
118 static SDL_mutex * lock = NULL; | 99 #define nativeIncrementThenFetch32 |
100 #define nativeDecrementThenFetch32 | |
101 #define nativeAddThenFetch32 | |
102 #define nativeSubtractThenFetch32 | |
103 | |
104 #undef nativeTestThenSet64 | |
105 #undef nativeClear64 | |
106 #undef nativeFetchThenIncrement64 | |
107 #undef nativeFetchThenDecrement64 | |
108 #undef nativeFetchThenAdd64 | |
109 #undef nativeFetchThenSubtract64 | |
110 #undef nativeIncrementThenFetch64 | |
111 #undef nativeDecrementThenFetch64 | |
112 #undef nativeAddThenFetch64 | |
113 #undef nativeSubtractThenFetch64 | |
114 | |
115 /* | |
116 If any of the operations are not provided then we must emulate some | |
117 of them. That means we need a nice implementation of spin locks | |
118 that avoids the "one big lock" problem. We use a vector of spin | |
119 locks and pick which one to use based on the address of the operand | |
120 of the function. | |
121 | |
122 To generate the index of the lock we first shift by 3 bits to get | |
123 rid on the zero bits that result from 32 and 64 bit allignment of | |
124 data. We then mask off all but 5 bits and use those 5 bits as an | |
125 index into the table. | |
126 | |
127 Picking the lock this way insures that accesses to the same data at | |
128 the same time will go to the same lock. OTOH, accesses to different | |
129 data have only a 1/32 chance of hitting the same lock. That should | |
130 pretty much eliminate the chances of several atomic operations on | |
131 different data from waiting on the same "big lock". If it isn't | |
132 then the table of locks can be expanded to a new size so long as | |
133 the new size is a power of two. | |
134 */ | |
135 | |
136 static SDL_SpinLock locks[32] = { | |
137 0, 0, 0, 0, 0, 0, 0, 0, | |
138 0, 0, 0, 0, 0, 0, 0, 0, | |
139 0, 0, 0, 0, 0, 0, 0, 0, | |
140 0, 0, 0, 0, 0, 0, 0, 0, | |
141 }; | |
119 | 142 |
120 static __inline__ void | 143 static __inline__ void |
121 privateWaitLock() | 144 privateWaitLock(volatile void *ptr) |
122 { | 145 { |
123 if(NULL == lock) | 146 #if SIZEOF_VOIDP == 4 |
124 { | 147 Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f); |
125 lock = SDL_CreateMutex(); | 148 #elif SIZEOF_VOIDP == 8 |
126 if (NULL == lock) | 149 Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f); |
127 { | 150 #endif /* SIZEOF_VOIDP */ |
128 SDL_SetError("SDL_atomic.c: can't create a mutex"); | 151 |
129 return; | 152 SDL_AtomicLock(&locks[index]); |
130 } | |
131 } | |
132 | |
133 if (-1 == SDL_LockMutex(lock)) | |
134 { | |
135 SDL_SetError("SDL_atomic.c: can't lock mutex"); | |
136 } | |
137 } | 153 } |
138 | 154 |
139 static __inline__ void | 155 static __inline__ void |
140 privateUnlock() | 156 privateUnlock(volatile void *ptr) |
141 { | 157 { |
142 if (-1 == SDL_UnlockMutex(lock)) | 158 #if SIZEOF_VOIDP == 4 |
143 { | 159 Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f); |
144 SDL_SetError("SDL_atomic.c: can't unlock mutex"); | 160 #elif SIZEOF_VOIDP == 8 |
145 } | 161 Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f); |
146 } | 162 #endif /* SIZEOF_VOIDP */ |
147 | 163 |
148 #endif | 164 SDL_AtomicUnlock(&locks[index]); |
149 | 165 } |
150 /* 8 bit atomic operations */ | 166 |
151 | 167 /* 32 bit atomic operations */ |
152 Uint8 | |
153 SDL_AtomicExchange8(volatile Uint8 * ptr, Uint8 value) | |
154 { | |
155 #ifdef nativeExchange8 | |
156 return nativeExchange8(ptr, value); | |
157 #else | |
158 Uint8 tmp = 0; | |
159 | |
160 privateWaitLock(); | |
161 tmp = *ptr; | |
162 *ptr = value; | |
163 privateUnlock(); | |
164 | |
165 return tmp; | |
166 #endif | |
167 } | |
168 | 168 |
169 SDL_bool | 169 SDL_bool |
170 SDL_AtomicCompareThenSet8(volatile Uint8 * ptr, Uint8 oldvalue, Uint8 newvalue) | 170 SDL_AtomicTestThenSet32(volatile Uint32 * ptr) |
171 { | 171 { |
172 #ifdef nativeCompareThenSet8 | 172 #ifdef nativeTestThenSet32 |
173 return (SDL_bool)nativeCompareThenSet8(ptr, oldvalue, newvalue); | |
174 #else | 173 #else |
175 SDL_bool result = SDL_FALSE; | 174 SDL_bool result = SDL_FALSE; |
176 | 175 |
177 privateWaitLock(); | 176 privateWaitLock(ptr); |
178 result = (*ptr == oldvalue); | |
179 if (result) | |
180 { | |
181 *ptr = newvalue; | |
182 } | |
183 privateUnlock(); | |
184 | |
185 return result; | |
186 #endif | |
187 } | |
188 | |
189 SDL_bool | |
190 SDL_AtomicTestThenSet8(volatile Uint8 * ptr) | |
191 { | |
192 #ifdef nativeTestThenSet8 | |
193 return (SDL_bool)nativeTestThenSet8(ptr); | |
194 #else | |
195 SDL_bool result = SDL_FALSE; | |
196 | |
197 privateWaitLock(); | |
198 result = (*ptr == 0); | 177 result = (*ptr == 0); |
199 if (result) | 178 if (result) |
200 { | 179 { |
201 *ptr = 1; | 180 *ptr = 1; |
202 } | 181 } |
203 privateUnlock(); | 182 privateUnlock(ptr); |
204 | 183 |
205 return result; | 184 return result; |
206 #endif | 185 #endif /* nativeTestThenSet32 */ |
207 } | 186 } |
208 | 187 |
209 void | 188 void |
210 SDL_AtomicClear8(volatile Uint8 * ptr) | 189 SDL_AtomicClear32(volatile Uint32 * ptr) |
211 { | 190 { |
212 #ifdef nativeClear8 | 191 #ifdef nativeClear32 |
213 nativeClear8(ptr); | 192 atomic_clr(ptr, 0xFFFFFFFF); |
214 #else | 193 #else |
215 privateWaitLock(); | 194 privateWaitLock(ptr); |
216 *ptr = 0; | 195 *ptr = 0; |
217 privateUnlock(); | 196 privateUnlock(ptr); |
218 | 197 |
219 return; | 198 return; |
220 #endif | 199 #endif /* nativeClear32 */ |
221 } | 200 } |
222 | 201 |
223 Uint8 | 202 Uint32 |
224 SDL_AtomicFetchThenIncrement8(volatile Uint8 * ptr) | 203 SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr) |
225 { | 204 { |
226 #ifdef nativeFetchThenIncrement8 | 205 #ifdef nativeFetchThenIncrement32 |
227 return nativeFetchThenIncrement8(ptr); | 206 return atomic_add_value(ptr, 0x00000001); |
228 #else | 207 #else |
229 Uint8 tmp = 0; | 208 Uint32 tmp = 0; |
230 | 209 |
231 privateWaitLock(); | 210 privateWaitLock(ptr); |
232 tmp = *ptr; | 211 tmp = *ptr; |
233 (*ptr)+= 1; | 212 (*ptr)+= 1; |
234 privateUnlock(); | 213 privateUnlock(ptr); |
235 | 214 |
236 return tmp; | 215 return tmp; |
237 #endif | 216 #endif /* nativeFetchThenIncrement32 */ |
238 } | 217 } |
239 | 218 |
240 Uint8 | 219 Uint32 |
241 SDL_AtomicFetchThenDecrement8(volatile Uint8 * ptr) | 220 SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr) |
242 { | 221 { |
243 #ifdef nativeFetchThenDecrement8 | 222 #ifdef nativeFetchThenDecrement32 |
244 return nativeFetchThenDecrement8(ptr); | 223 return atomic_sub_value(ptr, 0x00000001); |
245 #else | 224 #else |
246 Uint8 tmp = 0; | 225 Uint32 tmp = 0; |
247 | 226 |
248 privateWaitLock(); | 227 privateWaitLock(ptr); |
249 tmp = *ptr; | 228 tmp = *ptr; |
250 (*ptr) -= 1; | 229 (*ptr) -= 1; |
251 privateUnlock(); | 230 privateUnlock(ptr); |
252 | 231 |
253 return tmp; | 232 return tmp; |
254 #endif | 233 #endif /* nativeFetchThenDecrement32 */ |
255 } | 234 } |
256 | 235 |
257 Uint8 | 236 Uint32 |
258 SDL_AtomicFetchThenAdd8(volatile Uint8 * ptr, Uint8 value) | 237 SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value) |
259 { | 238 { |
260 #ifdef nativeFetchThenAdd8 | 239 #ifdef nativeFetchThenAdd32 |
261 return nativeFetchThenAdd8(ptr, value); | 240 return atomic_add_value(ptr, value); |
262 #else | 241 #else |
263 Uint8 tmp = 0; | 242 Uint32 tmp = 0; |
264 | 243 |
265 privateWaitLock(); | 244 privateWaitLock(ptr); |
266 tmp = *ptr; | 245 tmp = *ptr; |
267 (*ptr)+= value; | 246 (*ptr)+= value; |
268 privateUnlock(); | 247 privateUnlock(ptr); |
269 | 248 |
270 return tmp; | 249 return tmp; |
271 #endif | 250 #endif /* nativeFetchThenAdd32 */ |
272 } | 251 } |
273 | 252 |
274 Uint8 | 253 Uint32 |
275 SDL_AtomicFetchThenSubtract8(volatile Uint8 * ptr, Uint8 value) | 254 SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value) |
276 { | 255 { |
277 #ifdef nativeFetchThenSubtract8 | 256 #ifdef nativeFetchThenSubtract32 |
278 return nativeFetchThenSubtract8(ptr, value); | 257 return atomic_sub_value(ptr, value); |
279 #else | 258 #else |
280 Uint8 tmp = 0; | 259 Uint32 tmp = 0; |
281 | 260 |
282 privateWaitLock(); | 261 privateWaitLock(ptr); |
283 tmp = *ptr; | 262 tmp = *ptr; |
284 (*ptr)-= value; | 263 (*ptr)-= value; |
285 privateUnlock(); | 264 privateUnlock(ptr); |
286 | 265 |
287 return tmp; | 266 return tmp; |
288 #endif | 267 #endif /* nativeFetchThenSubtract32 */ |
289 } | 268 } |
290 | 269 |
291 Uint8 | 270 Uint32 |
292 SDL_AtomicIncrementThenFetch8(volatile Uint8 * ptr) | 271 SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr) |
293 { | 272 { |
294 #ifdef nativeIncrementThenFetch8 | 273 #ifdef nativeIncrementThenFetch32 |
295 return nativeIncrementThenFetch8(ptr); | 274 atomic_add(ptr, 0x00000001); |
296 #else | 275 return atomic_add_value(ptr, 0x00000000); |
297 Uint8 tmp = 0; | 276 #else |
298 | 277 Uint32 tmp = 0; |
299 privateWaitLock(); | 278 |
279 privateWaitLock(ptr); | |
300 (*ptr)+= 1; | 280 (*ptr)+= 1; |
301 tmp = *ptr; | 281 tmp = *ptr; |
302 privateUnlock(); | 282 privateUnlock(ptr); |
303 | 283 |
304 return tmp; | 284 return tmp; |
305 #endif | 285 #endif /* nativeIncrementThenFetch32 */ |
306 } | 286 } |
307 | 287 |
308 Uint8 | 288 Uint32 |
309 SDL_AtomicDecrementThenFetch8(volatile Uint8 * ptr) | 289 SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr) |
310 { | 290 { |
311 #ifdef nativeDecrementThenFetch8 | 291 #ifdef nativeDecrementThenFetch32 |
312 return nativeDecrementThenFetch8(ptr); | 292 atomic_sub(ptr, 0x00000001); |
313 #else | 293 return atomic_sub_value(ptr, 0x00000000); |
314 Uint8 tmp = 0; | 294 #else |
315 | 295 Uint32 tmp = 0; |
316 privateWaitLock(); | 296 |
297 privateWaitLock(ptr); | |
317 (*ptr)-= 1; | 298 (*ptr)-= 1; |
318 tmp = *ptr; | 299 tmp = *ptr; |
319 privateUnlock(); | 300 privateUnlock(ptr); |
320 | 301 |
321 return tmp; | 302 return tmp; |
322 #endif | 303 #endif /* nativeDecrementThenFetch32 */ |
323 } | 304 } |
324 | 305 |
325 Uint8 | 306 Uint32 |
326 SDL_AtomicAddThenFetch8(volatile Uint8 * ptr, Uint8 value) | 307 SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value) |
327 { | 308 { |
328 #ifdef nativeAddThenFetch8 | 309 #ifdef nativeAddThenFetch32 |
329 return nativeAddThenFetch8(ptr, value); | 310 atomic_add(ptr, value); |
330 #else | 311 return atomic_add_value(ptr, 0x00000000); |
331 Uint8 tmp = 0; | 312 #else |
332 | 313 Uint32 tmp = 0; |
333 privateWaitLock(); | 314 |
315 privateWaitLock(ptr); | |
334 (*ptr)+= value; | 316 (*ptr)+= value; |
335 tmp = *ptr; | 317 tmp = *ptr; |
336 privateUnlock(); | 318 privateUnlock(ptr); |
337 | 319 |
338 return tmp; | 320 return tmp; |
339 #endif | 321 #endif /* nativeAddThenFetch32 */ |
340 } | 322 } |
341 | 323 |
342 Uint8 | 324 Uint32 |
343 SDL_AtomicSubtractThenFetch8(volatile Uint8 * ptr, Uint8 value) | 325 SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value) |
344 { | 326 { |
345 #ifdef nativeSubtractThenFetch8 | 327 #ifdef nativeSubtractThenFetch32 |
346 return nativeSubtractThenFetch8(ptr, value); | 328 atomic_sub(ptr, value); |
347 #else | 329 return atomic_sub_value(ptr, 0x00000000); |
348 Uint8 tmp = 0; | 330 #else |
349 | 331 Uint32 tmp = 0; |
350 privateWaitLock(); | 332 |
333 privateWaitLock(ptr); | |
351 (*ptr)-= value; | 334 (*ptr)-= value; |
352 tmp = *ptr; | 335 tmp = *ptr; |
353 privateUnlock(); | 336 privateUnlock(ptr); |
354 | 337 |
355 return tmp; | 338 return tmp; |
356 #endif | 339 #endif /* nativeSubtractThenFetch32 */ |
357 } | 340 } |
358 | 341 |
359 /* 16 bit atomic operations */ | 342 /* 64 bit atomic operations */ |
360 | 343 #ifdef SDL_HAS_64BIT_TYPE |
361 Uint16 | |
362 SDL_AtomicExchange16(volatile Uint16 * ptr, Uint16 value) | |
363 { | |
364 #ifdef nativeExchange16 | |
365 return nativeExchange16(ptr, value); | |
366 #else | |
367 Uint16 tmp = 0; | |
368 | |
369 privateWaitLock(); | |
370 tmp = *ptr; | |
371 *ptr = value; | |
372 privateUnlock(); | |
373 | |
374 return tmp; | |
375 #endif | |
376 } | |
377 | 344 |
378 SDL_bool | 345 SDL_bool |
379 SDL_AtomicCompareThenSet16(volatile Uint16 * ptr, Uint16 oldvalue, Uint16 newvalue) | 346 SDL_AtomicTestThenSet64(volatile Uint64 * ptr) |
380 { | 347 { |
381 #ifdef nativeCompareThenSet16 | 348 #ifdef nativeTestThenSet64 |
382 return (SDL_bool)nativeCompareThenSet16(ptr, oldvalue, newvalue); | |
383 #else | 349 #else |
384 SDL_bool result = SDL_FALSE; | 350 SDL_bool result = SDL_FALSE; |
385 | 351 |
386 privateWaitLock(); | 352 privateWaitLock(ptr); |
387 result = (*ptr == oldvalue); | |
388 if (result) | |
389 { | |
390 *ptr = newvalue; | |
391 } | |
392 privateUnlock(); | |
393 | |
394 return result; | |
395 #endif | |
396 } | |
397 | |
398 SDL_bool | |
399 SDL_AtomicTestThenSet16(volatile Uint16 * ptr) | |
400 { | |
401 #ifdef nativeTestThenSet16 | |
402 return (SDL_bool)nativeTestThenSet16(ptr); | |
403 #else | |
404 SDL_bool result = SDL_FALSE; | |
405 | |
406 privateWaitLock(); | |
407 result = (*ptr == 0); | 353 result = (*ptr == 0); |
408 if (result) | 354 if (result) |
409 { | 355 { |
410 *ptr = 1; | 356 *ptr = 1; |
411 } | 357 } |
412 privateUnlock(); | 358 privateUnlock(ptr); |
413 | 359 |
414 return result; | 360 return result; |
415 #endif | 361 #endif /* nativeTestThenSet64 */ |
416 } | |
417 | |
418 void | |
419 SDL_AtomicClear16(volatile Uint16 * ptr) | |
420 { | |
421 #ifdef nativeClear16 | |
422 nativeClear16(ptr); | |
423 #else | |
424 privateWaitLock(); | |
425 *ptr = 0; | |
426 privateUnlock(); | |
427 | |
428 return; | |
429 #endif | |
430 } | |
431 | |
432 Uint16 | |
433 SDL_AtomicFetchThenIncrement16(volatile Uint16 * ptr) | |
434 { | |
435 #ifdef nativeFetchThenIncrement16 | |
436 return nativeFetchThenIncrement16(ptr); | |
437 #else | |
438 Uint16 tmp = 0; | |
439 | |
440 privateWaitLock(); | |
441 tmp = *ptr; | |
442 (*ptr)+= 1; | |
443 privateUnlock(); | |
444 | |
445 return tmp; | |
446 #endif | |
447 } | |
448 | |
449 Uint16 | |
450 SDL_AtomicFetchThenDecrement16(volatile Uint16 * ptr) | |
451 { | |
452 #ifdef nativeFetchThenDecrement16 | |
453 return nativeFetchThenDecrement16(ptr); | |
454 #else | |
455 Uint16 tmp = 0; | |
456 | |
457 privateWaitLock(); | |
458 tmp = *ptr; | |
459 (*ptr) -= 1; | |
460 privateUnlock(); | |
461 | |
462 return tmp; | |
463 #endif | |
464 } | |
465 | |
466 Uint16 | |
467 SDL_AtomicFetchThenAdd16(volatile Uint16 * ptr, Uint16 value) | |
468 { | |
469 #ifdef nativeFetchThenAdd16 | |
470 return nativeFetchThenAdd16(ptr, value); | |
471 #else | |
472 Uint16 tmp = 0; | |
473 | |
474 privateWaitLock(); | |
475 tmp = *ptr; | |
476 (*ptr)+= value; | |
477 privateUnlock(); | |
478 | |
479 return tmp; | |
480 #endif | |
481 } | |
482 | |
483 Uint16 | |
484 SDL_AtomicFetchThenSubtract16(volatile Uint16 * ptr, Uint16 value) | |
485 { | |
486 #ifdef nativeFetchThenSubtract16 | |
487 return nativeFetchThenSubtract16(ptr, value); | |
488 #else | |
489 Uint16 tmp = 0; | |
490 | |
491 privateWaitLock(); | |
492 tmp = *ptr; | |
493 (*ptr)-= value; | |
494 privateUnlock(); | |
495 | |
496 return tmp; | |
497 #endif | |
498 } | |
499 | |
500 Uint16 | |
501 SDL_AtomicIncrementThenFetch16(volatile Uint16 * ptr) | |
502 { | |
503 #ifdef nativeIncrementThenFetch16 | |
504 return nativeIncrementThenFetch16(ptr); | |
505 #else | |
506 Uint16 tmp = 0; | |
507 | |
508 privateWaitLock(); | |
509 (*ptr)+= 1; | |
510 tmp = *ptr; | |
511 privateUnlock(); | |
512 | |
513 return tmp; | |
514 #endif | |
515 } | |
516 | |
517 Uint16 | |
518 SDL_AtomicDecrementThenFetch16(volatile Uint16 * ptr) | |
519 { | |
520 #ifdef nativeDecrementThenFetch16 | |
521 return nativeDecrementThenFetch16(ptr); | |
522 #else | |
523 Uint16 tmp = 0; | |
524 | |
525 privateWaitLock(); | |
526 (*ptr)-= 1; | |
527 tmp = *ptr; | |
528 privateUnlock(); | |
529 | |
530 return tmp; | |
531 #endif | |
532 } | |
533 | |
534 Uint16 | |
535 SDL_AtomicAddThenFetch16(volatile Uint16 * ptr, Uint16 value) | |
536 { | |
537 #ifdef nativeAddThenFetch16 | |
538 return nativeAddThenFetch16(ptr, value); | |
539 #else | |
540 Uint16 tmp = 0; | |
541 | |
542 privateWaitLock(); | |
543 (*ptr)+= value; | |
544 tmp = *ptr; | |
545 privateUnlock(); | |
546 | |
547 return tmp; | |
548 #endif | |
549 } | |
550 | |
551 Uint16 | |
552 SDL_AtomicSubtractThenFetch16(volatile Uint16 * ptr, Uint16 value) | |
553 { | |
554 #ifdef nativeSubtractThenFetch16 | |
555 return nativeSubtractThenFetch16(ptr, value); | |
556 #else | |
557 Uint16 tmp = 0; | |
558 | |
559 privateWaitLock(); | |
560 (*ptr)-= value; | |
561 tmp = *ptr; | |
562 privateUnlock(); | |
563 | |
564 return tmp; | |
565 #endif | |
566 } | |
567 | |
568 /* 64 bit atomic operations */ | |
569 #ifdef SDL_HAS_64BIT_TYPE | |
570 | |
571 Uint64 | |
572 SDL_AtomicExchange64(volatile Uint64 * ptr, Uint64 value) | |
573 { | |
574 #ifdef nativeExchange64 | |
575 return nativeExchange64(ptr, value); | |
576 #else | |
577 Uint64 tmp = 0; | |
578 | |
579 privateWaitLock(); | |
580 tmp = *ptr; | |
581 *ptr = value; | |
582 privateUnlock(); | |
583 | |
584 return tmp; | |
585 #endif | |
586 } | |
587 | |
588 SDL_bool | |
589 SDL_AtomicCompareThenSet64(volatile Uint64 * ptr, Uint64 oldvalue, Uint64 newvalue) | |
590 { | |
591 #ifdef nativeCompareThenSet64 | |
592 return (SDL_bool)nativeCompareThenSet64(ptr, oldvalue, newvalue); | |
593 #else | |
594 SDL_bool result = SDL_FALSE; | |
595 | |
596 privateWaitLock(); | |
597 result = (*ptr == oldvalue); | |
598 if (result) | |
599 { | |
600 *ptr = newvalue; | |
601 } | |
602 privateUnlock(); | |
603 | |
604 return result; | |
605 #endif | |
606 } | |
607 | |
608 SDL_bool | |
609 SDL_AtomicTestThenSet64(volatile Uint64 * ptr) | |
610 { | |
611 #ifdef nativeTestThenSet64 | |
612 return (SDL_bool)nativeTestThenSet64(ptr); | |
613 #else | |
614 SDL_bool result = SDL_FALSE; | |
615 | |
616 privateWaitLock(); | |
617 result = (*ptr == 0); | |
618 if (result) | |
619 { | |
620 *ptr = 1; | |
621 } | |
622 privateUnlock(); | |
623 | |
624 return result; | |
625 #endif | |
626 } | 362 } |
627 | 363 |
628 void | 364 void |
629 SDL_AtomicClear64(volatile Uint64 * ptr) | 365 SDL_AtomicClear64(volatile Uint64 * ptr) |
630 { | 366 { |
631 #ifdef nativeClear64 | 367 #ifdef nativeClear64 |
632 nativeClear64(ptr); | 368 #else |
633 #else | 369 privateWaitLock(ptr); |
634 privateWaitLock(); | |
635 *ptr = 0; | 370 *ptr = 0; |
636 privateUnlock(); | 371 privateUnlock(ptr); |
637 | 372 |
638 return; | 373 return; |
639 #endif | 374 #endif /* nativeClear64 */ |
640 } | 375 } |
641 | 376 |
642 Uint64 | 377 Uint64 |
643 SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr) | 378 SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr) |
644 { | 379 { |
645 #ifdef nativeFetchThenIncrement64 | 380 #ifdef nativeFetchThenIncrement64 |
646 return nativeFetchThenIncrement64(ptr); | 381 #else |
647 #else | 382 Uint64 tmp = 0; |
648 Uint64 tmp = 0; | 383 |
649 | 384 privateWaitLock(ptr); |
650 privateWaitLock(); | |
651 tmp = *ptr; | 385 tmp = *ptr; |
652 (*ptr)+= 1; | 386 (*ptr)+= 1; |
653 privateUnlock(); | 387 privateUnlock(ptr); |
654 | 388 |
655 return tmp; | 389 return tmp; |
656 #endif | 390 #endif /* nativeFetchThenIncrement64 */ |
657 } | 391 } |
658 | 392 |
659 Uint64 | 393 Uint64 |
660 SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr) | 394 SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr) |
661 { | 395 { |
662 #ifdef nativeFetchThenDecrement64 | 396 #ifdef nativeFetchThenDecrement64 |
663 return nativeFetchThenDecrement64(ptr); | 397 #else |
664 #else | 398 Uint64 tmp = 0; |
665 Uint64 tmp = 0; | 399 |
666 | 400 privateWaitLock(ptr); |
667 privateWaitLock(); | |
668 tmp = *ptr; | 401 tmp = *ptr; |
669 (*ptr) -= 1; | 402 (*ptr) -= 1; |
670 privateUnlock(); | 403 privateUnlock(ptr); |
671 | 404 |
672 return tmp; | 405 return tmp; |
673 #endif | 406 #endif /* nativeFetchThenDecrement64 */ |
674 } | 407 } |
675 | 408 |
676 Uint64 | 409 Uint64 |
677 SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value) | 410 SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value) |
678 { | 411 { |
679 #ifdef nativeFetchThenAdd64 | 412 #ifdef nativeFetchThenAdd64 |
680 return nativeFetchThenAdd64(ptr, value); | 413 #else |
681 #else | 414 Uint64 tmp = 0; |
682 Uint64 tmp = 0; | 415 |
683 | 416 privateWaitLock(ptr); |
684 privateWaitLock(); | |
685 tmp = *ptr; | 417 tmp = *ptr; |
686 (*ptr)+= value; | 418 (*ptr)+= value; |
687 privateUnlock(); | 419 privateUnlock(ptr); |
688 | 420 |
689 return tmp; | 421 return tmp; |
690 #endif | 422 #endif /* nativeFetchThenAdd64 */ |
691 } | 423 } |
692 | 424 |
693 Uint64 | 425 Uint64 |
694 SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value) | 426 SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value) |
695 { | 427 { |
696 #ifdef nativeFetchThenSubtract64 | 428 #ifdef nativeFetchThenSubtract64 |
697 return nativeFetchThenSubtract64(ptr, value); | 429 #else |
698 #else | 430 Uint64 tmp = 0; |
699 Uint64 tmp = 0; | 431 |
700 | 432 privateWaitLock(ptr); |
701 privateWaitLock(); | |
702 tmp = *ptr; | 433 tmp = *ptr; |
703 (*ptr)-= value; | 434 (*ptr)-= value; |
704 privateUnlock(); | 435 privateUnlock(ptr); |
705 | 436 |
706 return tmp; | 437 return tmp; |
707 #endif | 438 #endif /* nativeFetchThenSubtract64 */ |
708 } | 439 } |
709 | 440 |
710 Uint64 | 441 Uint64 |
711 SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr) | 442 SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr) |
712 { | 443 { |
713 #ifdef nativeIncrementThenFetch64 | 444 #ifdef nativeIncrementThenFetch64 |
714 return nativeIncrementThenFetch64(ptr); | 445 #else |
715 #else | 446 Uint64 tmp = 0; |
716 Uint64 tmp = 0; | 447 |
717 | 448 privateWaitLock(ptr); |
718 privateWaitLock(); | |
719 (*ptr)+= 1; | 449 (*ptr)+= 1; |
720 tmp = *ptr; | 450 tmp = *ptr; |
721 privateUnlock(); | 451 privateUnlock(ptr); |
722 | 452 |
723 return tmp; | 453 return tmp; |
724 #endif | 454 #endif /* nativeIncrementThenFetch64 */ |
725 } | 455 } |
726 | 456 |
727 Uint64 | 457 Uint64 |
728 SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr) | 458 SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr) |
729 { | 459 { |
730 #ifdef nativeDecrementThenFetch64 | 460 #ifdef nativeDecrementThenFetch64 |
731 return nativeDecrementThenFetch64(ptr); | 461 #else |
732 #else | 462 Uint64 tmp = 0; |
733 Uint64 tmp = 0; | 463 |
734 | 464 privateWaitLock(ptr); |
735 privateWaitLock(); | |
736 (*ptr)-= 1; | 465 (*ptr)-= 1; |
737 tmp = *ptr; | 466 tmp = *ptr; |
738 privateUnlock(); | 467 privateUnlock(ptr); |
739 | 468 |
740 return tmp; | 469 return tmp; |
741 #endif | 470 #endif /* nativeDecrementThenFetch64 */ |
742 } | 471 } |
743 | 472 |
744 Uint64 | 473 Uint64 |
745 SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value) | 474 SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value) |
746 { | 475 { |
747 #ifdef nativeAddThenFetch64 | 476 #ifdef nativeAddThenFetch64 |
748 return nativeAddThenFetch64(ptr, value); | 477 #else |
749 #else | 478 Uint64 tmp = 0; |
750 Uint64 tmp = 0; | 479 |
751 | 480 privateWaitLock(ptr); |
752 privateWaitLock(); | |
753 (*ptr)+= value; | 481 (*ptr)+= value; |
754 tmp = *ptr; | 482 tmp = *ptr; |
755 privateUnlock(); | 483 privateUnlock(ptr); |
756 | 484 |
757 return tmp; | 485 return tmp; |
758 #endif | 486 #endif /* nativeAddThenFetch64 */ |
759 } | 487 } |
760 | 488 |
761 Uint64 | 489 Uint64 |
762 SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value) | 490 SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value) |
763 { | 491 { |
764 #ifdef nativeSubtractThenFetch64 | 492 #ifdef nativeSubtractThenFetch64 |
765 return nativeSubtractThenFetch64(ptr, value); | 493 #else |
766 #else | 494 Uint64 tmp = 0; |
767 Uint64 tmp = 0; | 495 |
768 | 496 privateWaitLock(ptr); |
769 privateWaitLock(); | |
770 (*ptr)-= value; | 497 (*ptr)-= value; |
771 tmp = *ptr; | 498 tmp = *ptr; |
772 privateUnlock(); | 499 privateUnlock(ptr); |
773 | 500 |
774 return tmp; | 501 return tmp; |
775 #endif | 502 #endif /* nativeSubtractThenFetch64 */ |
776 } | 503 } |
777 #endif | 504 |
778 | 505 #endif /* SDL_HAS_64BIT_TYPE */ |
779 /* QNX native 32 bit atomic operations */ | |
780 | |
781 Uint32 | |
782 SDL_AtomicExchange32(volatile Uint32 * ptr, Uint32 value) | |
783 { | |
784 Uint32 tmp = 0; | |
785 | |
786 privateWaitLock(); | |
787 tmp = *ptr; | |
788 *ptr = value; | |
789 privateUnlock(); | |
790 | |
791 return tmp; | |
792 } | |
793 | |
794 SDL_bool | |
795 SDL_AtomicCompareThenSet32(volatile Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue) | |
796 { | |
797 SDL_bool result = SDL_FALSE; | |
798 | |
799 privateWaitLock(); | |
800 result = (*ptr == oldvalue); | |
801 if (result) | |
802 { | |
803 *ptr = newvalue; | |
804 } | |
805 privateUnlock(); | |
806 | |
807 return result; | |
808 } | |
809 | |
810 SDL_bool | |
811 SDL_AtomicTestThenSet32(volatile Uint32 * ptr) | |
812 { | |
813 SDL_bool result = SDL_FALSE; | |
814 | |
815 privateWaitLock(); | |
816 result = (*ptr == 0); | |
817 if (result) | |
818 { | |
819 *ptr = 1; | |
820 } | |
821 privateUnlock(); | |
822 | |
823 return result; | |
824 } | |
825 | |
826 void | |
827 SDL_AtomicClear32(volatile Uint32 * ptr) | |
828 { | |
829 atomic_clr(ptr, 0xFFFFFFFF); | |
830 } | |
831 | |
832 Uint32 | |
833 SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr) | |
834 { | |
835 return atomic_add_value(ptr, 0x00000001); | |
836 } | |
837 | |
838 Uint32 | |
839 SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr) | |
840 { | |
841 return atomic_sub_value(ptr, 0x00000001); | |
842 } | |
843 | |
844 Uint32 | |
845 SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value) | |
846 { | |
847 return atomic_add_value(ptr, value); | |
848 } | |
849 | |
850 Uint32 | |
851 SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value) | |
852 { | |
853 return atomic_sub_value(ptr, value); | |
854 } | |
855 | |
856 Uint32 | |
857 SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr) | |
858 { | |
859 atomic_add(ptr, 0x00000001); | |
860 return atomic_add_value(ptr, 0x00000000); | |
861 } | |
862 | |
863 Uint32 | |
864 SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr) | |
865 { | |
866 atomic_sub(ptr, 0x00000001); | |
867 return atomic_sub_value(ptr, 0x00000000); | |
868 } | |
869 | |
870 Uint32 | |
871 SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value) | |
872 { | |
873 atomic_add(ptr, value); | |
874 return atomic_add_value(ptr, 0x00000000); | |
875 } | |
876 | |
877 Uint32 | |
878 SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value) | |
879 { | |
880 atomic_sub(ptr, value); | |
881 return atomic_sub_value(ptr, 0x00000000); | |
882 } |