Mercurial > sdl-ios-xcode
comparison include/SDL_atomic.h @ 3199:3e1bf2b8bd81
This check in updates SDL_atomic.h to reflect the new set of atomic operations in 32 and 64 bit form.
It also update configure.in to compile the linux version of the library. The three versions are all dummies
implementations that do nothing. They are being checked in as place holders. Mostly, I just wanted to get
place holders and the configure.in checked in.
author | Bob Pendleton <bob@pendleton.com> |
---|---|
date | Wed, 24 Jun 2009 20:04:08 +0000 |
parents | e041d2c603fe |
children | 3aa519a5c676 |
comparison
equal
deleted
inserted
replaced
3198:fefe74ca604d | 3199:3e1bf2b8bd81 |
---|---|
21 */ | 21 */ |
22 | 22 |
23 /** | 23 /** |
24 * \file SDL_atomic.h | 24 * \file SDL_atomic.h |
25 * | 25 * |
26 * Atomic int and pointer magic | 26 * Atomic operations. |
27 */ | 27 */ |
28 | 28 |
29 #ifndef _SDL_atomic_h_ | 29 #ifndef _SDL_atomic_h_ |
30 #define _SDL_atomic_h_ | 30 #define _SDL_atomic_h_ |
31 | 31 |
40 /* *INDENT-OFF* */ | 40 /* *INDENT-OFF* */ |
41 extern "C" { | 41 extern "C" { |
42 /* *INDENT-ON* */ | 42 /* *INDENT-ON* */ |
43 #endif | 43 #endif |
44 | 44 |
45 /* indent is really bad at handling assembly */ | |
46 /* *INDENT-OFF* */ | 45 /* *INDENT-OFF* */ |
47 | 46 /** |
48 #if defined(__GNUC__) && (defined(i386) || defined(__i386__) || defined(__x86_64__)) | 47 * \def SDL_AtomicBusyWait32 (ptr) |
49 static __inline__ void | 48 * |
50 SDL_atomic_int_add(volatile int* atomic, int value) | 49 * \brief Implements a simple busy wait for use with |
51 { | 50 * SDL_AtomicTestThenSet and SDL_AtomicClear. |
52 __asm__ __volatile__("lock;" | 51 * |
53 "addl %1, %0" | 52 * Note: This can be an infinite loop. |
54 : "=m" (*atomic) | 53 * |
55 : "ir" (value), | 54 */ |
56 "m" (*atomic)); | 55 #define SDL_AtomicBusyWait32(ptr) \ |
57 } | 56 { \ |
58 | 57 while (!SDL_AtomicTestThenSet32(ptr) \ |
59 static __inline__ int | 58 { \ |
60 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | 59 }; \ |
61 { | 60 }; |
62 int rv; | 61 |
63 __asm__ __volatile__("lock;" | 62 /** |
64 "xaddl %0, %1" | 63 * \def SDL_AtomicWait32(ptr) |
65 : "=r" (rv), | 64 * |
66 "=m" (*atomic) | 65 * \brief A safer way to wait for a test-then-set lock to be cleared. |
67 : "0" (value), | 66 * |
68 "m" (*atomic)); | 67 * This assumes that the SDL_Sleep(0) call acts as a thread_yeild |
69 return rv; | 68 * operation. |
70 } | 69 * |
71 | 70 */ |
72 static __inline__ SDL_bool | 71 #define SDL_AtomicWait32(ptr) \ |
73 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | 72 { \ |
74 { | 73 while (!SDL_AtomicTestThenSet32(ptr) \ |
75 int rv; | 74 { \ |
76 __asm__ __volatile__("lock;" | 75 SDL_Sleep(0); \ |
77 "cmpxchgl %2, %1" | 76 }; \ |
78 : "=a" (rv), | 77 }; |
79 "=m" (*atomic) | 78 |
80 : "r" (newvalue), | 79 /** |
81 "m" (*atomic), | 80 * \def SDL_AtomicBusyWait64(ptr) |
82 "0" (oldvalue)); | 81 * |
83 return (SDL_bool)(rv == oldvalue); | 82 * \brief 64 bit version of busy wait |
84 } | 83 * |
85 | 84 * \sa SDL_AtomicBusyWait32 |
86 static __inline__ SDL_bool | 85 */ |
87 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | 86 #define SDL_AtomicBusyWait64(ptr) \ |
88 { | 87 { \ |
89 void* rv; | 88 while (!SDL_AtomicTestThenSet64(ptr) \ |
90 __asm__ __volatile__("lock;" | 89 { \ |
91 # if defined(__x86_64__) | 90 }; \ |
92 "cmpxchgq %q2, %1" | 91 }; |
93 # else | 92 |
94 "cmpxchgl %2, %1" | 93 /** |
95 # endif | 94 * \def SDL_AtomicWait64(ptr) |
96 : "=a" (rv), | 95 * |
97 "=m" (*atomic) | 96 * \brief 64 bit version of SDL_AtomicWait32 |
98 : "r" (newvalue), | 97 * |
99 "m" (*atomic), | 98 * \sa SDL_AtomicWait32 |
100 "0" (oldvalue)); | 99 */ |
101 return (SDL_bool)(rv == oldvalue); | 100 #define SDL_AtomicWait64(ptr) \ |
102 } | 101 { \ |
103 #elif defined(__GNUC__) && defined(__alpha__) | 102 while (!SDL_AtomicTestThenSet64(ptr) \ |
104 # define ATOMIC_MEMORY_BARRIER (__asm__ __volatile__ ("mb" : : : "memory")) | 103 { \ |
105 # define ATOMIC_INT_CMP_XCHG(atomic,value) \ | 104 SDL_Sleep(0); \ |
106 ({ \ | 105 }; \ |
107 int rv,prev; \ | 106 }; |
108 __asm__ __volatile__(" mb\n" \ | |
109 "1: ldl_l %0,%2\n" \ | |
110 " cmpeq %0,%3,%1\n" \ | |
111 " beq %1,2f\n" \ | |
112 " mov %4,%1\n" \ | |
113 " stl_c %1,%2\n" \ | |
114 " beq %1,1b\n" \ | |
115 " mb\n" \ | |
116 "2:" \ | |
117 : "=&r" (prev), \ | |
118 "=&r" (rv) \ | |
119 : "m" (*(atomic)), \ | |
120 "Ir" (oldvalue), \ | |
121 "Ir" (newvalue) \ | |
122 : "memory"); \ | |
123 (rv != 0); \ | |
124 }) | |
125 | |
126 # if (SIZEOF_VOIDP == 4) | |
127 static __inline__ SDL_bool | |
128 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
129 { | |
130 int rv; | |
131 void* prev; | |
132 __asm__ __volatile__(" mb\n" | |
133 "1: ldl_l %0,%2\n" | |
134 " cmpeq %0,%3,%1\n" | |
135 " beq $1,2f\n" | |
136 " mov %4,%1\n" | |
137 " stl_c %1,%2\n" | |
138 " beq %1,1b\n" | |
139 " mb\n" | |
140 "2:" | |
141 : "=&r" (prev), | |
142 "=&r" (rv) | |
143 : "m" (*atomic), | |
144 "Ir" (oldvalue), | |
145 "Ir" (newvalue) | |
146 : "memory"); | |
147 return (SDL_bool)(rv != 0); | |
148 } | |
149 # elif (SIZEOF_VOIDP == 8) | |
150 static __inline__ SDL_bool | |
151 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
152 { | |
153 int rv; | |
154 void* prev; | |
155 __asm__ __volatile__(" mb\n" | |
156 "1: ldq_l %0,%2\n" | |
157 " cmpeq %0,%3,%1\n" | |
158 " beq %1,2f\n" | |
159 " mov %4,%1\n" | |
160 " stq_c %1,%2\n" | |
161 " beq %1,1b\n" | |
162 " mb\n" | |
163 "2:" | |
164 : "=&r" (prev), | |
165 "=&r" (rv) | |
166 : "m" (*atomic), | |
167 "Ir" (oldvalue), | |
168 "Ir" (newvalue) | |
169 : "memory"); | |
170 return (SDL_bool)(rv != 0); | |
171 } | |
172 # else | |
173 # error "Your system has an unsupported pointer size" | |
174 # endif /* SIZEOF_VOIDP */ | |
175 #elif defined(__GNUC__) && defined(__sparc__) | |
176 # define ATOMIC_MEMORY_BARRIER \ | |
177 (__asm__ __volatile__("membar #LoadLoad | #LoadStore" \ | |
178 " | #StoreLoad | #StoreStore" : : : "memory")) | |
179 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue) \ | |
180 ({ \ | |
181 int rv; \ | |
182 __asm__ __volatile__("cas [%4], %2, %0" \ | |
183 : "=r" (rv), "=m" (*(atomic)) \ | |
184 : "r" (oldvalue), "m" (*(atomic)), \ | |
185 "r" (atomic), "0" (newvalue)); \ | |
186 rv == oldvalue; \ | |
187 }) | |
188 | |
189 # if (SIZEOF_VOIDP == 4) | |
190 static __inline__ SDL_bool | |
191 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
192 { | |
193 void* rv; | |
194 __asm__ __volatile__("cas [%4], %2, %0" | |
195 : "=r" (rv), | |
196 "=m" (*atomic) | |
197 : "r" (oldvalue), | |
198 "m" (*atomic), | |
199 "r" (atomic), | |
200 "0" (newvalue)); | |
201 return (SDL_bool)(rv == oldvalue); | |
202 } | |
203 # elif (SIZEOF_VOIDP == 8) | |
204 static __inline__ SDL_bool | |
205 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
206 { | |
207 void* rv; | |
208 void** a = atomic; | |
209 __asm__ __volatile__("casx [%4], %2, %0" | |
210 : "=r" (rv), | |
211 "=m" (*a) | |
212 : "r" (oldvalue), | |
213 "m" (*a), | |
214 "r" (a), | |
215 "0" (newvalue)); | |
216 return (SDL_bool)(rv == oldvalue); | |
217 } | |
218 # else | |
219 # error "Your system has an unsupported pointer size" | |
220 # endif /* SIZEOF_VOIDP */ | |
221 #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)) | |
222 # define ATOMIC_MEMORY_BARRIER \ | |
223 (__asm__ __volatile__ ("sync" : : : "memory")) | |
224 static __inline__ void | |
225 SDL_atomic_int_add(volatile int* atomic, int value) | |
226 { | |
227 int rv,tmp; | |
228 __asm__ __volatile__("1: lwarx %0, 0, %3\n" | |
229 " add %1, %0, %4\n" | |
230 " stwcx. %1, 0, %3\n" | |
231 " bne- 1b" | |
232 : "=&b" (rv), | |
233 "=&r" (tmp), | |
234 "=m" (*atomic) | |
235 : "b" (atomic), | |
236 "r" (value), | |
237 "m" (*atomic) | |
238 : "cr0", | |
239 "memory"); | |
240 } | |
241 | |
242 static __inline__ int | |
243 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
244 { | |
245 int rv,tmp; | |
246 __asm__ __volatile__("1: lwarx %0, 0, %3\n" | |
247 " add %1, %0, %4\n" | |
248 " stwcx. %1, 0, %3\n" | |
249 " bne- 1b" | |
250 : "=&b" (rv), | |
251 "=&r" (tmp), | |
252 "=m" (*atomic) | |
253 : "b" (atomic), | |
254 "r" (value), | |
255 "m" (*atomic) | |
256 : "cr0", | |
257 "memory"); | |
258 return rv; | |
259 } | |
260 | |
261 # if (SIZEOF_VOIDP == 4) | |
262 static __inline__ SDL_bool | |
263 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
264 { | |
265 int rv; | |
266 __asm__ __volatile__(" sync\n" | |
267 "1: lwarx %0, 0, %1\n" | |
268 " subf. %0, %2, %0\n" | |
269 " bne 2f\n" | |
270 " stwcx. %3, 0, %1\n" | |
271 " bne- 1b\n" | |
272 "2: isync" | |
273 : "=&r" (rv) | |
274 : "b" (atomic), | |
275 "r" (oldvalue), | |
276 "r" | |
277 : "cr0", | |
278 "memory"); | |
279 return (SDL_bool)(rv == 0); | |
280 } | |
281 | |
282 static __inline__ SDL_bool | |
283 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
284 { | |
285 void* rv; | |
286 __asm__ __volatile__("sync\n" | |
287 "1: lwarx %0, 0, %1\n" | |
288 " subf. %0, %2, %0\n" | |
289 " bne 2f\n" | |
290 " stwcx. %3, 0, %1\n" | |
291 " bne- 1b\n" | |
292 "2: isync" | |
293 : "=&r" (rv) | |
294 : "b" (atomic), | |
295 "r" (oldvalue), | |
296 "r" (newvalue) | |
297 : "cr0", | |
298 "memory"); | |
299 return (SDL_bool)(rv == 0); | |
300 } | |
301 # elif (SIZEOF_VOIDP == 8) | |
302 static __inline__ SDL_bool | |
303 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
304 { | |
305 int rv; | |
306 __asm__ __volatile__(" sync\n" | |
307 "1: lwarx %0, 0, %1\n" | |
308 " extsw %0, %0\n" | |
309 " subf. %0, %2, %0\n" | |
310 " bne 2f\n" | |
311 " stwcx. %3, 0, %1\n" | |
312 " bne- 1b\n" | |
313 "2: isync" | |
314 : "=&r" (rv) | |
315 : "b" (atomic), | |
316 "r" (oldvalue), | |
317 "r" | |
318 : "cr0", | |
319 "memory"); | |
320 return (SDL_bool)(rv == 0); | |
321 } | |
322 | |
323 static __inline__ SDL_bool | |
324 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
325 { | |
326 void* rv; | |
327 __asm__ __volatile__("sync\n" | |
328 "1: ldarx %0, 0, %1\n" | |
329 " subf. %0, %2, %0\n" | |
330 " bne 2f\n" | |
331 " stdcx. %3, 0, %1\n" | |
332 " bne- 1b\n" | |
333 "2: isync" | |
334 : "=&r" (rv) | |
335 : "b" (atomic), | |
336 "r" (oldvalue), | |
337 "r" (newvalue) | |
338 : "cr0", | |
339 "memory"); | |
340 return (SDL_bool)(rv == 0); | |
341 } | |
342 # else | |
343 # error "Your system has an unsupported pointer size" | |
344 # endif /* SIZEOF_VOIDP */ | |
345 #elif defined(__GNUC__) && (defined(__IA64__) || defined(__ia64__)) | |
346 # define ATOMIC_MEMORY_BARRIER (__sync_synchronize()) | |
347 # define SDL_atomic_int_xchg_add(atomic, value) \ | |
348 (__sync_fetch_and_add((atomic),(value))) | |
349 # define SDL_atomic_int_add(atomic, value) \ | |
350 ((void)__sync_fetch_and_add((atomic),(value))) | |
351 # define SDL_atomic_int_cmp_xchg(atomic,oldvalue,newvalue) \ | |
352 (__sync_bool_compare_and_swap((atomic),(oldvalue),(newvalue))) | |
353 # define SDL_atomic_ptr_cmp_xchg(atomic,oldvalue,newvalue) \ | |
354 (__sync_bool_compare_and_swap((long*)(atomic),(long)(oldvalue),(long)(newvalue))) | |
355 #elif defined(__GNUC__) && defined(__LINUX__) && (defined(__mips__) || defined(__MIPS__)) | |
356 static __inline__ int | |
357 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
358 { | |
359 int rv,tmp; | |
360 __asm__ __volatile__("1: \n" | |
361 ".set push \n" | |
362 ".set mips2 \n" | |
363 "ll %0,%3 \n" | |
364 "addu %1,%4,%0 \n" | |
365 "sc %1,%2 \n" | |
366 ".set pop \n" | |
367 "beqz %1,1b \n" | |
368 : "=&r" (rv), | |
369 "=&r" (tmp), | |
370 "=m" (*atomic) | |
371 : "m" (*atomic), | |
372 "r" (value) | |
373 : "memory"); | |
374 return rv; | |
375 } | |
376 | |
377 static __inline__ void | |
378 SDL_atomic_int_add(volatile int* atomic, int value) | |
379 { | |
380 int rv; | |
381 __asm__ __volatile__("1: \n" | |
382 ".set push \n" | |
383 ".set mips2 \n" | |
384 "ll %0,%2 \n" | |
385 "addu %0,%3,%0 \n" | |
386 "sc %0,%1 \n" | |
387 ".set pop \n" | |
388 "beqz %0,1b \n" | |
389 : "=&r" (rv), | |
390 "=m" (*atomic) | |
391 : "m" (*atomic), | |
392 "r" (value) | |
393 : "memory"); | |
394 } | |
395 | |
396 static __inline__ SDL_bool | |
397 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
398 { | |
399 int rv; | |
400 __asm__ __volatile__(" .set push \n" | |
401 " .set noat \n" | |
402 " .set mips3 \n" | |
403 "1: ll %0, %2 \n" | |
404 " bne %0, %z3, 2f \n" | |
405 " .set mips0 \n" | |
406 " move $1, %z4 \n" | |
407 " .set mips3 \n" | |
408 " sc $1, %1 \n" | |
409 " beqz $1, 1b \n" | |
410 " sync \n" | |
411 "2: \n" | |
412 " .set pop \n" | |
413 : "=&r" (rv), | |
414 "=R" (*atomic) | |
415 : "R" (*atomic), | |
416 "Jr" (oldvalue), | |
417 "Jr" (newvalue) | |
418 : "memory"); | |
419 return (SDL_bool)rv; | |
420 } | |
421 | |
422 static __inline__ SDL_bool | |
423 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
424 { | |
425 int rv; | |
426 __asm__ __volatile__(" .set push \n" | |
427 " .set noat \n" | |
428 " .set mips3 \n" | |
429 # if defined(__mips64) | |
430 "1: lld %0, %2 \n" | |
431 # else | |
432 "1: ll %0, %2 \n" | |
433 # endif | |
434 " bne %0, %z3, 2f \n" | |
435 " move $1, %z4 \n" | |
436 # if defined(__mips64) | |
437 " sc $1, %1 \n" | |
438 # else | |
439 " scd $1, %1 \n" | |
440 # endif | |
441 " beqz $1, 1b \n" | |
442 " sync \n" | |
443 "2: \n" | |
444 " .set pop \n" | |
445 : "=&r" (rv), | |
446 "=R" (*atomic) | |
447 : "R" (*atomic), | |
448 "Jr" (oldvalue), | |
449 "Jr" (newvalue) | |
450 : "memory"); | |
451 return (SDL_bool)rv; | |
452 } | |
453 #elif defined(__GNUC__) && defined(__m68k__) | |
454 static __inline__ int | |
455 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
456 { | |
457 int rv = *atomic; | |
458 int tmp; | |
459 __asm__ __volatile__("1: move%.l %0,%1 \n" | |
460 " add%.l %2,%1 \n" | |
461 " cas%.l %0,%1,%3 \n" | |
462 " jbne 1b \n" | |
463 : "=d" (rv), | |
464 "=&d" (tmp) | |
465 : "d" (value), | |
466 "m" (*atomic), | |
467 "0" (rv) | |
468 : "memory"); | |
469 return (SDL_bool)rv; | |
470 } | |
471 | |
472 static __inline__ void | |
473 SDL_atomic_int_add(volatile int* atomic, int value) | |
474 { | |
475 __asm__ __volatile__("add%.l %0,%1" | |
476 : | |
477 : "id" (value), | |
478 "m" (*atomic) | |
479 : "memory"); | |
480 } | |
481 | |
482 static __inline__ SDL_bool | |
483 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
484 { | |
485 char rv; | |
486 int readvalue; | |
487 __asm__ __volatile__("cas%.l %2,%3,%1\n" | |
488 "seq %0" | |
489 : "=dm" (rv), | |
490 "=m" (*atomic), | |
491 "=d" (readvalue) | |
492 : "d" (newvalue), | |
493 "m" (*atomic), | |
494 "2" (oldvalue)); | |
495 return (SDL_bool)rv; | |
496 } | |
497 | |
498 static __inline__ SDL_bool | |
499 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
500 { | |
501 char rv; | |
502 int readvalue; | |
503 __asm__ __volatile__("cas%.l %2,%3,%1\n" | |
504 "seq %0" | |
505 : "=dm" (rv), | |
506 "=m" (*atomic), | |
507 "=d" (readvalue) | |
508 : "d" (newvalue), | |
509 "m" (*atomic), | |
510 "2" (oldvalue)); | |
511 return (SDL_bool)rv; | |
512 } | |
513 #elif defined(__GNUC__) && defined(__s390__) | |
514 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue) \ | |
515 ({ \ | |
516 int rv = oldvalue; \ | |
517 __asm__ __volatile__("cs %0, %2, %1" \ | |
518 : "+d" (rv), \ | |
519 "=Q" (*(atomic)) \ | |
520 : "d" (newvalue), \ | |
521 "m" (*(atomic)) \ | |
522 : "cc"); \ | |
523 rv == oldvalue; \ | |
524 }) | |
525 # if (SIZEOF_VOIDP == 4) | |
526 static __inline__ SDL_bool | |
527 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
528 { | |
529 void* rv = oldvalue; | |
530 __asm__ __volatile__("cs %0, %2, %1" | |
531 : "+d" (rv), | |
532 "=Q" (*atomic) | |
533 : "d" (newvalue), | |
534 "m" (*atomic) | |
535 : "cc"); | |
536 return (SDL_bool)(rv == oldvalue); | |
537 } | |
538 # elif (SIZEOF_VOIDP == 8) | |
539 static __inline__ SDL_bool | |
540 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
541 { | |
542 void* rv = oldvalue; | |
543 void** a = atomic; | |
544 __asm__ __volatile__("csg %0, %2, %1" | |
545 : "+d" (rv), | |
546 "=Q" (*a) | |
547 : "d" ((long)(newvalue)), | |
548 "m" (*a) | |
549 : "cc"); | |
550 return (SDL_bool)(rv == oldvalue); | |
551 } | |
552 # else | |
553 # error "Your system has an unsupported pointer size" | |
554 # endif /* SIZEOF_VOIDP */ | |
555 #elif defined(__WIN32__) | |
556 # include <windows.h> | |
557 static __inline__ int | |
558 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
559 { | |
560 return InterlockedExchangeAdd(atomic, value); | |
561 } | |
562 | |
563 static __inline__ void | |
564 SDL_atomic_int_add(volatile int* atomic, int value) | |
565 { | |
566 InterlockedExchangeAdd(atomic, value); | |
567 } | |
568 | |
569 # if (WINVER > 0X0400) | |
570 static __inline__ SDL_bool | |
571 SDL_atmoic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
572 { | |
573 return (SDL_bool)(InterlockedCompareExchangePointer((PVOID*)atomic, | |
574 (PVOID)newvalue, | |
575 (PVOID)oldvalue) == oldvalue); | |
576 } | |
577 | |
578 | |
579 static __inline__ SDL_bool | |
580 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
581 { | |
582 return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) == oldvalue); | |
583 } | |
584 # else /* WINVER <= 0x0400 */ | |
585 # if (SIZEOF_VOIDP != 4) | |
586 # error "InterlockedCompareExchangePointer needed" | |
587 # endif | |
588 | |
589 static __inline__ SDL_bool | |
590 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
591 { | |
592 return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue); | |
593 } | |
594 | |
595 static __inline__ SDL_bool | |
596 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
597 { | |
598 return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue); | |
599 } | |
600 # endif | |
601 #else /* when all else fails */ | |
602 # define SDL_ATOMIC_OPS_NOT_SUPPORTED | |
603 # warning "Atomic Ops for this platform not supported!" | |
604 static __inline__ int | |
605 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
606 { | |
607 int rv = *atomic; | |
608 *(atomic) += value; | |
609 return rv; | |
610 } | |
611 | |
612 static __inline__ SDL_bool | |
613 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
614 { | |
615 return (*atomic == oldvalue) ? | |
616 ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE; | |
617 } | |
618 | |
619 static __inline__ void | |
620 SDL_atomic_int_add(volatile int* atomic, int value) | |
621 { | |
622 *atomic += value; | |
623 } | |
624 #endif /* arch & platforms */ | |
625 | |
626 /* *INDENT-ON* */ | 107 /* *INDENT-ON* */ |
627 | 108 |
628 #ifdef ATOMIC_INT_CMP_XCHG | 109 /* Function prototypes */ |
629 static __inline__ SDL_bool | 110 |
630 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue) | 111 /** |
631 { | 112 * \fn int SDL_AtomicExchange32(Uint32 * ptr, Uint32 value) |
632 return (SDL_bool) ATOMIC_INT_CMP_XCHG(atomic, oldvalue, newvalue); | 113 * |
633 } | 114 * \brief Atomically exchange two 32 bit values. |
634 | 115 * |
635 static __inline__ int | 116 * \return the value point to by ptr. |
636 SDL_atomic_int_xchg_add(volatile int *atomic, int value) | 117 * |
637 { | 118 * \param ptr points to the value to be fetched from *ptr. |
638 int rv; | 119 * \param value is value to be stored at *ptr. |
639 do | 120 * |
640 rv = *atomic; | 121 * The current value stored at *ptr is returned and it is replaced |
641 while (!ATOMIC_INT_CMP_XCHG(atomic, rv, rv + value)); | 122 * with value. This function can be used to implement SDL_TestThenSet. |
642 return rv; | 123 * |
643 } | 124 */ |
644 | 125 extern DECLSPEC Uint32 SDLCALL SDL_AtomicExchange32(Uint32 * ptr, Uint32 value); |
645 static __inline__ void | 126 /** |
646 SDL_atomic_int_add(volatile int *atomic, int value) | 127 * \fn int SDL_AtomicCompareThenSet32(Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue) |
647 { | 128 * |
648 int rv; | 129 * \brief If *ptr == oldvalue then replace the contents of *ptr by new value. |
649 do | 130 * |
650 rv = *atomic; | 131 * \return true if the newvalue was stored. |
651 while (!ATOMIC_INT_CMP_XCHG(atomic, rv, rv + value)); | 132 * |
652 } | 133 * \param *ptr is the value to be compared and replaced. |
653 #endif /* ATOMIC_CMP_XCHG */ | 134 * \param oldvalue is value to be compared to *ptr. |
654 | 135 * \param newvalue is value to be stored at *ptr. |
655 #ifdef ATOMIC_MEMORY_BARRIER | 136 * |
656 # define SDL_atomic_int_get(atomic) \ | 137 */ |
657 (ATOMIC_MEMORY_BARRIER,*(atomic)) | 138 extern DECLSPEC SDL_bool SDLCALL SDL_AtomicCompareThenSet32(Uint32 * ptr, |
658 # define SDL_atomic_int_set(atomic,value) \ | 139 Uint32 oldvalue, Uint32 newvalue); |
659 (*(atomic)=value,ATOMIC_MEMORY_BARRIER) | 140 /** |
660 #else | 141 * \fn SDL_bool SDL_AtomicTestThenSet32(Uint32 * ptr); |
661 # define SDL_atomic_int_get(atomic) (*(atomic)) | 142 * |
662 # define SDL_atomic_int_set(atomic, newvalue) ((void)(*(atomic) = (newvalue))) | 143 * \brief Check to see if *ptr == 0 and set it to non-zero. |
663 #endif /* MEMORY_BARRIER_NEEDED */ | 144 * |
664 | 145 * \return SDL_True if the value pointed to by ptr was zero and |
665 #define SDL_atomic_int_inc(atomic) (SDL_atomic_int_add((atomic),1)) | 146 * SDL_False if it was not zero |
666 #define SDL_atomic_int_dec_test(atomic) (SDL_atomic_int_xchg_add((atomic),-1) == 1) | 147 * |
148 * \param ptr points to the value to be tested and set. | |
149 * | |
150 */ | |
151 extern DECLSPEC SDL_bool SDLCALL SDL_AtomicTestThenSet32(Uint32 * ptr); | |
152 /** | |
153 * \fn void SDL_AtomicClear32(Uint32 * ptr); | |
154 * | |
155 * \brief set the value pointed to by ptr to be zero. | |
156 * | |
157 * \param ptr address of the value to be set to zero | |
158 * | |
159 */ | |
160 extern DECLSPEC void SDLCALL SDL_AtomicClear32(Uint32 * ptr); | |
161 /** | |
162 * \fn Uint32 SDL_AtomicFetchThenIncrement32(Uint32 * ptr); | |
163 * | |
164 * \brief fetch the current value of *ptr and then increment that | |
165 * value in place. | |
166 * | |
167 * \return the value before it was incremented. | |
168 * | |
169 * \param ptr address of the value to fetch and increment | |
170 * | |
171 */ | |
172 extern DECLSPEC Uint32 SDLCALL SDL_AtomicFetchThenIncrement32(Uint32 * ptr); | |
173 /** | |
174 * \fn Uint32 SDL_AtomicFetchThenDecrement32(Uint32 * ptr); | |
175 * | |
176 * \brief fetch *ptr and then decrement the value in place. | |
177 * | |
178 * \return the value before it was decremented. | |
179 * | |
180 * \param ptr address of the value to fetch and drement | |
181 * | |
182 */ | |
183 extern DECLSPEC Uint32 SDLCALL SDL_AtomicFetchThenDecrement32(Uint32 * ptr); | |
184 /** | |
185 * \fn Uint32 SDL_AtomicFetchThenAdd32(Uint32 * ptr, Uint32 value); | |
186 * | |
187 * \brief fetch the current value at ptr and then add value to *ptr. | |
188 * | |
189 * \return *ptr before the addition took place. | |
190 * | |
191 * \param ptr the address of data we are changing. | |
192 * \param value the value to add to *ptr. | |
193 * | |
194 */ | |
195 extern DECLSPEC Uint32 SDLCALL SDL_AtomicFetchThenAdd32(Uint32 * ptr, Uint32 value); | |
196 /** | |
197 * \fn Uint32 SDL_AtomicFetchThenSubtract32(Uint32 * ptr, Uint32 value); | |
198 * | |
199 * \brief Fetch *ptr and then subtract value from it. | |
200 * | |
201 * \return *ptr before the subtraction took place. | |
202 * | |
203 * \param ptr the address of the data being changed. | |
204 * \param value the value to subtract from *ptr. | |
205 * | |
206 */ | |
207 extern DECLSPEC Uint32 SDLCALL SDL_AtomicFetchThenSubtract32(Uint32 * ptr, Uint32 value); | |
208 /** | |
209 * \fn Uint32 SDL_AtomicIncrementThenFetch32(Uint32 * ptr); | |
210 * | |
211 * \brief Add one to the data pointed to by ptr and return that value. | |
212 * | |
213 * \return the incremented value. | |
214 * | |
215 * \param ptr address of the data to increment. | |
216 * | |
217 */ | |
218 extern DECLSPEC Uint32 SDLCALL SDL_AtomicIncrementThenFetch32(Uint32 * ptr); | |
219 /** | |
220 * \fn Uint32 SDL_AtomicDecrementThenFetch32(Uint32 * ptr); | |
221 * | |
222 * \brief Subtract one from data pointed to by ptr and return the new value. | |
223 * | |
224 * \return The decremented value. | |
225 * | |
226 * \param ptr The address of the data to decrement. | |
227 * | |
228 */ | |
229 extern DECLSPEC Uint32 SDLCALL SDL_AtomicDecrementThenFetch32(Uint32 * ptr); | |
230 /** | |
231 * \fn Uint32 SDL_AtomicAddThenFetch32(Uint32 * ptr, Uint32 value); | |
232 * | |
233 * \brief Add value to the data pointed to by ptr and return result. | |
234 * | |
235 * \return The sum of *ptr and value. | |
236 * | |
237 * \param ptr The address of the data to be modified. | |
238 * \param value The value to be added. | |
239 * | |
240 */ | |
241 extern DECLSPEC Uint32 SDLCALL SDL_AtomicAddThenFetch32(Uint32 * ptr, Uint32 value); | |
242 /** | |
243 * \fn Uint32 SDL_AtomicSubtractThenFetch32(Uint32 * ptr, Uint32 value); | |
244 * | |
245 * \brief Subtract value from the data pointed to by ptr and return the result. | |
246 * | |
247 * \return the difference between *ptr and value. | |
248 * | |
249 * \param ptr The address of the data to be modified. | |
250 * \param value The value to be subtracted. | |
251 * | |
252 */ | |
253 extern DECLSPEC Uint32 SDLCALL SDL_AtomicSubtractThenFetch32(Uint32 * ptr, Uint32 value); | |
254 | |
255 #ifdef SDL_HAS_64BIT_TYPE | |
256 | |
257 extern DECLSPEC Uint64 SDLCALL SDL_AtomicExchange64(Uint64 * ptr, Uint64 value); | |
258 extern DECLSPEC SDL_bool SDLCALL SDL_AtomicCompareThenSet64(Uint64 * ptr, | |
259 Uint64 oldvalue, Uint64 newvalue); | |
260 extern DECLSPEC SDL_bool SDLCALL SDL_AtomicTestThenSet64(Uint64 * ptr); | |
261 extern DECLSPEC void SDLCALL SDL_AtomicClear64(Uint64 * ptr); | |
262 extern DECLSPEC Uint64 SDLCALL SDL_AtomicFetchThenIncrement64(Uint64 * ptr); | |
263 extern DECLSPEC Uint64 SDLCALL SDL_AtomicFetchThenDecrement64(Uint64 * ptr); | |
264 extern DECLSPEC Uint64 SDLCALL SDL_AtomicFetchThenAdd64(Uint64 * ptr, Uint64 value); | |
265 extern DECLSPEC Uint64 SDLCALL SDL_AtomicFetchThenSubtract64(Uint64 * ptr, Uint64 value); | |
266 extern DECLSPEC Uint64 SDLCALL SDL_AtomicIncrementThenFetch64(Uint64 * ptr); | |
267 extern DECLSPEC Uint64 SDLCALL SDL_AtomicDecrementThenFetch64(Uint64 * ptr); | |
268 extern DECLSPEC Uint64 SDLCALL SDL_AtomicAddThenFetch64(Uint64 * ptr, Uint64 value); | |
269 extern DECLSPEC Uint64 SDLCALL SDL_AtomicSubtractThenFetch64(Uint64 * ptr, Uint64 value); | |
270 #endif | |
667 | 271 |
668 /* Ends C function definitions when using C++ */ | 272 /* Ends C function definitions when using C++ */ |
669 #ifdef __cplusplus | 273 #ifdef __cplusplus |
670 /* *INDENT-OFF* */ | 274 /* *INDENT-OFF* */ |
671 } | 275 } |