Mercurial > sdl-ios-xcode
comparison include/SDL_atomic.h @ 3180:77d6336711fc
First commit for SDL atomic operations.
On my linux box it compiles and installs correctly and testatomic runs without errors.
author | Bob Pendleton <bob@pendleton.com> |
---|---|
date | Tue, 09 Jun 2009 17:33:44 +0000 |
parents | |
children | 030899df1af5 |
comparison
equal
deleted
inserted
replaced
3179:9b34679fda8b | 3180:77d6336711fc |
---|---|
1 /* | |
2 SDL - Simple DirectMedia Layer | |
3 Copyright (C) 1997-2006 Sam Lantinga | |
4 | |
5 This library is free software; you can redistribute it and/or | |
6 modify it under the terms of the GNU Lesser General Public | |
7 License as published by the Free Software Foundation; either | |
8 version 2.1 of the License, or (at your option) any later version. | |
9 | |
10 This library is distributed in the hope that it will be useful, | |
11 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 Lesser General Public License for more details. | |
14 | |
15 You should have received a copy of the GNU Lesser General Public | |
16 License along with this library; if not, write to the Free Software | |
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | |
19 Sam Lantinga | |
20 slouken@libsdl.org | |
21 */ | |
22 | |
23 /** | |
24 * \file SDL_atomic.h | |
25 * | |
26 * Atomic int and pointer magic | |
27 */ | |
28 | |
29 #ifndef _SDL_atomic_h_ | |
30 #define _SDL_atomic_h_ | |
31 | |
32 | |
33 #include "SDL_stdinc.h" | |
34 #include "SDL_platform.h" | |
35 | |
36 #include "begin_code.h" | |
37 | |
38 /* Set up for C function definitions, even when using C++ */ | |
39 #ifdef __cplusplus | |
40 /* *INDENT-OFF* */ | |
41 extern "C" { | |
42 /* *INDENT-ON* */ | |
43 #endif | |
44 | |
45 #if defined(__GNUC__) && (defined(i386) || defined(__i386__) || defined(__x86_64__)) | |
46 static __inline__ void | |
47 SDL_atomic_int_add(volatile int* atomic, int value) | |
48 { | |
49 __asm__ __volatile__("lock;" | |
50 "addl %1, %0" | |
51 : "=m" (*atomic) | |
52 : "ir" (value), | |
53 "m" (*atomic)); | |
54 } | |
55 | |
56 static __inline__ int | |
57 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
58 { | |
59 int rv; | |
60 __asm__ __volatile__("lock;" | |
61 "xaddl %0, %1" | |
62 : "=r" (rv), | |
63 "=m" (*atomic) | |
64 : "0" (value), | |
65 "m" (*atomic)); | |
66 return rv; | |
67 } | |
68 | |
69 static __inline__ SDL_bool | |
70 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
71 { | |
72 int rv; | |
73 __asm__ __volatile__("lock;" | |
74 "cmpxchgl %2, %1" | |
75 : "=a" (rv), | |
76 "=m" (*atomic) | |
77 : "r" (newvalue), | |
78 "m" (*atomic), | |
79 "0" (oldvalue)); | |
80 return (rv == oldvalue); | |
81 } | |
82 | |
83 static __inline__ SDL_bool | |
84 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
85 { | |
86 void* rv; | |
87 __asm__ __volatile__("lock;" | |
88 # if defined(__x86_64__) | |
89 "cmpxchgq %q2, %1" | |
90 # else | |
91 "cmpxchgl %2, %1" | |
92 # endif | |
93 : "=a" (rv), | |
94 "=m" (*atomic) | |
95 : "r" (newvalue), | |
96 "m" (*atomic), | |
97 "0" (oldvalue)); | |
98 return (rv == oldvalue); | |
99 } | |
100 #elif defined(__GNUC__) && defined(__alpha__) | |
101 # define ATOMIC_MEMORY_BARRIER (__asm__ __volatile__ ("mb" : : : "memory")) | |
102 # define ATOMIC_INT_CMP_XCHG(atomic,value) \ | |
103 ({ \ | |
104 int rv,prev; \ | |
105 __asm__ __volatile__(" mb\n" \ | |
106 "1: ldl_l %0,%2\n" \ | |
107 " cmpeq %0,%3,%1\n" \ | |
108 " beq %1,2f\n" \ | |
109 " mov %4,%1\n" \ | |
110 " stl_c %1,%2\n" \ | |
111 " beq %1,1b\n" \ | |
112 " mb\n" \ | |
113 "2:" \ | |
114 : "=&r" (prev), \ | |
115 "=&r" (rv) \ | |
116 : "m" (*(atomic)), \ | |
117 "Ir" (oldvalue), \ | |
118 "Ir" (newvalue) \ | |
119 : "memory"); \ | |
120 (rv != 0); \ | |
121 }) | |
122 | |
123 # if (SIZEOF_VOIDP == 4) | |
124 static __inline__ SDL_bool | |
125 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
126 { | |
127 int rv; | |
128 void* prev; | |
129 __asm__ __volatile__(" mb\n" | |
130 "1: ldl_l %0,%2\n" | |
131 " cmpeq %0,%3,%1\n" | |
132 " beq $1,2f\n" | |
133 " mov %4,%1\n" | |
134 " stl_c %1,%2\n" | |
135 " beq %1,1b\n" | |
136 " mb\n" | |
137 "2:" | |
138 : "=&r" (prev), | |
139 "=&r" (rv) | |
140 : "m" (*atomic), | |
141 "Ir" (oldvalue), | |
142 "Ir" (newvalue) | |
143 : "memory"); | |
144 return (rv != 0); | |
145 } | |
146 # elif (SIZEOF_VOIDP == 8) | |
147 static __inline__ SDL_bool | |
148 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
149 { | |
150 int rv; | |
151 void* prev; | |
152 __asm__ __volatile__(" mb\n" | |
153 "1: ldq_l %0,%2\n" | |
154 " cmpeq %0,%3,%1\n" | |
155 " beq %1,2f\n" | |
156 " mov %4,%1\n" | |
157 " stq_c %1,%2\n" | |
158 " beq %1,1b\n" | |
159 " mb\n" | |
160 "2:" | |
161 : "=&r" (prev), | |
162 "=&r" (rv) | |
163 : "m" (*atomic), | |
164 "Ir" (oldvalue), | |
165 "Ir" (newvalue) | |
166 : "memory"); | |
167 return (rv != 0); | |
168 } | |
169 # else | |
170 # error "Your system has an unsupported pointer size" | |
171 # endif /* SIZEOF_VOIDP */ | |
172 #elif defined(__GNUC__) && defined(__sparc__) | |
173 # define ATOMIC_MEMORY_BARRIER \ | |
174 (__asm__ __volatile__("membar #LoadLoad | #LoadStore" \ | |
175 " | #StoreLoad | #StoreStore" : : : "memory")) | |
176 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue) \ | |
177 ({ \ | |
178 int rv; \ | |
179 __asm__ __volatile__("cas [%4], %2, %0" \ | |
180 : "=r" (rv), "=m" (*(atomic)) \ | |
181 : "r" (oldvalue), "m" (*(atomic)), \ | |
182 "r" (atomic), "0" (newvalue)); \ | |
183 rv == oldvalue; \ | |
184 }) | |
185 | |
186 # if (SIZEOF_VOIDP == 4) | |
187 static __inline__ SDL_bool | |
188 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
189 { | |
190 void* rv; | |
191 __asm__ __volatile__("cas [%4], %2, %0" | |
192 : "=r" (rv), | |
193 "=m" (*atomic) | |
194 : "r" (oldvalue), | |
195 "m" (*atomic), | |
196 "r" (atomic), | |
197 "0" (newvalue)); | |
198 return (rv == oldvalue); | |
199 } | |
200 # elif (SIZEOF_VOIDP == 8) | |
201 static __inline__ SDL_bool | |
202 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
203 { | |
204 void* rv; | |
205 void** a = atomic; | |
206 __asm__ __volatile__("casx [%4], %2, %0" | |
207 : "=r" (rv), | |
208 "=m" (*a) | |
209 : "r" (oldvalue), | |
210 "m" (*a), | |
211 "r" (a), | |
212 "0" (newvalue)); | |
213 return (rv == oldvalue); | |
214 } | |
215 # else | |
216 # error "Your system has an unsupported pointer size" | |
217 # endif /* SIZEOF_VOIDP */ | |
218 #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)) | |
219 # define ATOMIC_MEMORY_BARRIER \ | |
220 (__asm__ __volatile__ ("sync" : : : "memory")) | |
221 static __inline__ void | |
222 SDL_atomic_int_add(volatile int* atomic, int value) | |
223 { | |
224 int rv,tmp; | |
225 __asm__ __volatile__("1: lwarx %0, 0, %3\n" | |
226 " add %1, %0, %4\n" | |
227 " stwcx. %1, 0, %3\n" | |
228 " bne- 1b" | |
229 : "=&b" (rv), | |
230 "=&r" (tmp), | |
231 "=m" (*atomic) | |
232 : "b" (atomic), | |
233 "r" (value), | |
234 "m" (*atomic) | |
235 : "cr0", | |
236 "memory"); | |
237 } | |
238 | |
239 static __inline__ int | |
240 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
241 { | |
242 int rv,tmp; | |
243 __asm__ __volatile__("1: lwarx %0, 0, %3\n" | |
244 " add %1, %0, %4\n" | |
245 " stwcx. %1, 0, %3\n" | |
246 " bne- 1b" | |
247 : "=&b" (rv), | |
248 "=&r" (tmp), | |
249 "=m" (*atomic) | |
250 : "b" (atomic), | |
251 "r" (value), | |
252 "m" (*atomic) | |
253 : "cr0", | |
254 "memory"); | |
255 return rv; | |
256 } | |
257 | |
258 # if (SIZEOF_VOIDP == 4) | |
259 static __inline__ SDL_bool | |
260 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
261 { | |
262 int rv; | |
263 __asm__ __volatile__(" sync\n" | |
264 "1: lwarx %0, 0, %1\n" | |
265 " subf. %0, %2, %0\n" | |
266 " bne 2f\n" | |
267 " stwcx. %3, 0, %1\n" | |
268 " bne- 1b\n" | |
269 "2: isync" | |
270 : "=&r" (rv) | |
271 : "b" (atomic), | |
272 "r" (oldvalue), | |
273 "r" | |
274 : "cr0", | |
275 "memory"); | |
276 return (rv == 0); | |
277 } | |
278 | |
279 static __inline__ SDL_bool | |
280 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
281 { | |
282 void* rv; | |
283 __asm__ __volatile__("sync\n" | |
284 "1: lwarx %0, 0, %1\n" | |
285 " subf. %0, %2, %0\n" | |
286 " bne 2f\n" | |
287 " stwcx. %3, 0, %1\n" | |
288 " bne- 1b\n" | |
289 "2: isync" | |
290 : "=&r" (rv) | |
291 : "b" (atomic), | |
292 "r" (oldvalue), | |
293 "r" (newvalue) | |
294 : "cr0", | |
295 "memory"); | |
296 return (rv == 0); | |
297 } | |
298 # elif (SIZEOF_VOIDP == 8) | |
299 static __inline__ SDL_bool | |
300 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
301 { | |
302 int rv; | |
303 __asm__ __volatile__(" sync\n" | |
304 "1: lwarx %0, 0, %1\n" | |
305 " extsw %0, %0\n" | |
306 " subf. %0, %2, %0\n" | |
307 " bne 2f\n" | |
308 " stwcx. %3, 0, %1\n" | |
309 " bne- 1b\n" | |
310 "2: isync" | |
311 : "=&r" (rv) | |
312 : "b" (atomic), | |
313 "r" (oldvalue), | |
314 "r" | |
315 : "cr0", | |
316 "memory"); | |
317 return (rv == 0); | |
318 } | |
319 | |
320 static __inline__ SDL_bool | |
321 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
322 { | |
323 void* rv; | |
324 __asm__ __volatile__("sync\n" | |
325 "1: ldarx %0, 0, %1\n" | |
326 " subf. %0, %2, %0\n" | |
327 " bne 2f\n" | |
328 " stdcx. %3, 0, %1\n" | |
329 " bne- 1b\n" | |
330 "2: isync" | |
331 : "=&r" (rv) | |
332 : "b" (atomic), | |
333 "r" (oldvalue), | |
334 "r" (newvalue) | |
335 : "cr0", | |
336 "memory"); | |
337 return (rv == 0); | |
338 } | |
339 # else | |
340 # error "Your system has an unsupported pointer size" | |
341 # endif /* SIZEOF_VOIDP */ | |
342 #elif defined(__GNUC__) && (defined(__IA64__) || defined(__ia64__)) | |
343 # define ATOMIC_MEMORY_BARRIER (__sync_synchronize()) | |
344 # define SDL_atomic_int_xchg_add(atomic, value) \ | |
345 (__sync_fetch_and_add((atomic),(value))) | |
346 # define SDL_atomic_int_add(atomic, value) \ | |
347 ((void)__sync_fetch_and_add((atomic),(value))) | |
348 # define SDL_atomic_int_cmp_xchg(atomic,oldvalue,newvalue) \ | |
349 (__sync_bool_compare_and_swap((atomic),(oldvalue),(newvalue))) | |
350 # define SDL_atomic_ptr_cmp_xchg(atomic,oldvalue,newvalue) \ | |
351 (__sync_bool_compare_and_swap((long*)(atomic),(long)(oldvalue),(long)(newvalue))) | |
352 #elif defined(__GNUC__) && defined(__LINUX__) && (defined(__mips__) || defined(__MIPS__)) | |
353 static __inline__ int | |
354 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
355 { | |
356 int rv,tmp; | |
357 __asm__ __volatile__("1: \n" | |
358 ".set push \n" | |
359 ".set mips2 \n" | |
360 "ll %0,%3 \n" | |
361 "addu %1,%4,%0 \n" | |
362 "sc %1,%2 \n" | |
363 ".set pop \n" | |
364 "beqz %1,1b \n" | |
365 : "=&r" (rv), | |
366 "=&r" (tmp), | |
367 "=m" (*atomic) | |
368 : "m" (*atomic), | |
369 "r" (value) | |
370 : "memory"); | |
371 return rv; | |
372 } | |
373 | |
374 static __inline__ void | |
375 SDL_atomic_int_add(volatile int* atomic, int value) | |
376 { | |
377 int rv; | |
378 __asm__ __volatile__("1: \n" | |
379 ".set push \n" | |
380 ".set mips2 \n" | |
381 "ll %0,%2 \n" | |
382 "addu %0,%3,%0 \n" | |
383 "sc %0,%1 \n" | |
384 ".set pop \n" | |
385 "beqz %0,1b \n" | |
386 : "=&r" (rv), | |
387 "=m" (*atomic) | |
388 : "m" (*atomic), | |
389 "r" (value) | |
390 : "memory"); | |
391 } | |
392 | |
393 static __inline__ SDL_bool | |
394 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
395 { | |
396 int rv; | |
397 __asm__ __volatile__(" .set push \n" | |
398 " .set noat \n" | |
399 " .set mips3 \n" | |
400 "1: ll %0, %2 \n" | |
401 " bne %0, %z3, 2f \n" | |
402 " .set mips0 \n" | |
403 " move $1, %z4 \n" | |
404 " .set mips3 \n" | |
405 " sc $1, %1 \n" | |
406 " beqz $1, 1b \n" | |
407 " sync \n" | |
408 "2: \n" | |
409 " .set pop \n" | |
410 : "=&r" (rv), | |
411 "=R" (*atomic) | |
412 : "R" (*atomic), | |
413 "Jr" (oldvalue), | |
414 "Jr" (newvalue) | |
415 : "memory"); | |
416 return (SDL_bool)rv; | |
417 } | |
418 | |
419 static __inline__ SDL_bool | |
420 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
421 { | |
422 int rv; | |
423 __asm__ __volatile__(" .set push \n" | |
424 " .set noat \n" | |
425 " .set mips3 \n" | |
426 # if defined(__mips64) | |
427 "1: lld %0, %2 \n" | |
428 # else | |
429 "1: ll %0, %2 \n" | |
430 # endif | |
431 " bne %0, %z3, 2f \n" | |
432 " move $1, %z4 \n" | |
433 # if defined(__mips64) | |
434 " sc $1, %1 \n" | |
435 # else | |
436 " scd $1, %1 \n" | |
437 # endif | |
438 " beqz $1, 1b \n" | |
439 " sync \n" | |
440 "2: \n" | |
441 " .set pop \n" | |
442 : "=&r" (rv), | |
443 "=R" (*atomic) | |
444 : "R" (*atomic), | |
445 "Jr" (oldvalue), | |
446 "Jr" (newvalue) | |
447 : "memory"); | |
448 return (SDL_bool)rv; | |
449 } | |
450 #elif defined(__GNUC__) && defined(__m68k__) | |
451 static __inline__ int | |
452 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
453 { | |
454 int rv = *atomic; | |
455 int tmp; | |
456 __asm__ __volatile__("1: move%.l %0,%1 \n" | |
457 " add%.l %2,%1 \n" | |
458 " cas%.l %0,%1,%3 \n" | |
459 " jbne 1b \n" | |
460 : "=d" (rv), | |
461 "=&d" (tmp) | |
462 : "d" (value), | |
463 "m" (*atomic), | |
464 "0" (rv) | |
465 : "memory"); | |
466 return rv; | |
467 } | |
468 | |
469 static __inline__ void | |
470 SDL_atomic_int_add(volatile int* atomic, int value) | |
471 { | |
472 __asm__ __volatile__("add%.l %0,%1" | |
473 : | |
474 : "id" (value), | |
475 "m" (*atomic) | |
476 : "memory"); | |
477 } | |
478 | |
479 static __inline__ SDL_bool | |
480 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
481 { | |
482 char rv; | |
483 int readvalue; | |
484 __asm__ __volatile__("cas%.l %2,%3,%1\n" | |
485 "seq %0" | |
486 : "=dm" (rv), | |
487 "=m" (*atomic), | |
488 "=d" (readvalue) | |
489 : "d" (newvalue), | |
490 "m" (*atomic), | |
491 "2" (oldvalue)); | |
492 return rv; | |
493 } | |
494 | |
495 static __inline__ SDL_bool | |
496 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
497 { | |
498 char rv; | |
499 int readvalue; | |
500 __asm__ __volatile__("cas%.l %2,%3,%1\n" | |
501 "seq %0" | |
502 : "=dm" (rv), | |
503 "=m" (*atomic), | |
504 "=d" (readvalue) | |
505 : "d" (newvalue), | |
506 "m" (*atomic), | |
507 "2" (oldvalue)); | |
508 return rv; | |
509 } | |
510 #elif defined(__GNUC__) && defined(__s390__) | |
511 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue) \ | |
512 ({ \ | |
513 int rv = oldvalue; \ | |
514 __asm__ __volatile__("cs %0, %2, %1" \ | |
515 : "+d" (rv), \ | |
516 "=Q" (*(atomic)) \ | |
517 : "d" (newvalue), \ | |
518 "m" (*(atomic)) \ | |
519 : "cc"); \ | |
520 rv == oldvalue; \ | |
521 }) | |
522 # if (SIZEOF_VOIDP == 4) | |
523 static __inline__ SDL_bool | |
524 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
525 { | |
526 void* rv = oldvalue; | |
527 __asm__ __volatile__("cs %0, %2, %1" | |
528 : "+d" (rv), | |
529 "=Q" (*atomic) | |
530 : "d" (newvalue), | |
531 "m" (*atomic) | |
532 : "cc"); | |
533 return (rv == oldvalue); | |
534 } | |
535 # elif (SIZEOF_VOIDP == 8) | |
536 static __inline__ SDL_bool | |
537 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
538 { | |
539 void* rv = oldvalue; | |
540 void** a = atomic; | |
541 __asm__ __volatile__("csg %0, %2, %1" | |
542 : "+d" (rv), | |
543 "=Q" (*a) | |
544 : "d" ((long)(newvalue)), | |
545 "m" (*a) | |
546 : "cc"); | |
547 return (rv == oldvalue); | |
548 } | |
549 # else | |
550 # error "Your system has an unsupported pointer size" | |
551 # endif /* SIZEOF_VOIDP */ | |
552 #elif defined(__WIN32__) | |
553 # include <windows.h> | |
554 static __inline__ int | |
555 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
556 { | |
557 return InterlockedExchangeAdd(atomic, value); | |
558 } | |
559 | |
560 static __inline__ void | |
561 SDL_atomic_int_add(volatile int* atomic, int value) | |
562 { | |
563 InterlockedExchangeAdd(atomic, value); | |
564 } | |
565 | |
566 # if (WINVER > 0X0400) | |
567 static __inline__ SDL_bool | |
568 SDL_atmoic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
569 { | |
570 return ((SDL_bool)InterlockedCompareExchangePointer((PVOID*)atomic, | |
571 (PVOID)newvalue, | |
572 (PVOID)oldvalue) == oldvalue); | |
573 } | |
574 | |
575 | |
576 static __inline__ SDL_bool | |
577 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
578 { | |
579 return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) == oldvalue); | |
580 } | |
581 # else /* WINVER <= 0x0400 */ | |
582 # if (SIZEOF_VOIDP != 4) | |
583 # error "InterlockedCompareExchangePointer needed" | |
584 # endif | |
585 | |
586 static __inline__ SDL_bool | |
587 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
588 { | |
589 return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue); | |
590 } | |
591 | |
592 static __inline__ SDL_bool | |
593 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue) | |
594 { | |
595 return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue); | |
596 } | |
597 # endif | |
598 #else /* when all else fails */ | |
599 # define SDL_ATOMIC_OPS_NOT_SUPPORTED | |
600 # warning "Atomic Ops for this platform not supported!" | |
601 static __inline__ int | |
602 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
603 { | |
604 int rv = *atomic; | |
605 *(atomic) += value; | |
606 return rv; | |
607 } | |
608 | |
609 static __inline__ SDL_bool | |
610 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
611 { | |
612 return (*atomic == oldvalue) ? | |
613 ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE; | |
614 } | |
615 | |
616 static __inline__ void | |
617 SDL_atomic_int_add(volatile int* atomic, int value) | |
618 { | |
619 *atomic += value; | |
620 } | |
621 #endif /* arch & platforms */ | |
622 | |
623 #ifdef ATOMIC_INT_CMP_XCHG | |
624 static __inline__ SDL_bool | |
625 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue) | |
626 { | |
627 return ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue); | |
628 } | |
629 | |
630 static __inline__ int | |
631 SDL_atomic_int_xchg_add(volatile int* atomic, int value) | |
632 { | |
633 int rv; | |
634 do | |
635 rv = *atomic; | |
636 while(!ATOMIC_INT_CMP_XCHG(atomic,rv,rv+value)); | |
637 return rv; | |
638 } | |
639 | |
640 static __inline__ void | |
641 SDL_atomic_int_add(volatile int* atomic, int value) | |
642 { | |
643 int rv; | |
644 do | |
645 rv = *atomic; | |
646 while(!ATOMIC_INT_CMP_XCHG(atomic,rv,rv+value)); | |
647 } | |
648 #endif /* ATOMIC_CMP_XCHG */ | |
649 | |
650 #ifdef ATOMIC_MEMORY_BARRIER | |
651 # define SDL_atomic_int_get(atomic) \ | |
652 (ATOMIC_MEMORY_BARRIER,*(atomic)) | |
653 # define SDL_atomic_int_set(atomic,value) \ | |
654 (*(atomic)=value,ATOMIC_MEMORY_BARRIER) | |
655 #else | |
656 # define SDL_atomic_int_get(atomic) (*(atomic)) | |
657 # define SDL_atomic_int_set(atomic, newvalue) ((void)(*(atomic) = (newvalue))) | |
658 #endif /* MEMORY_BARRIER_NEEDED */ | |
659 | |
660 #define SDL_atomic_int_inc(atomic) (SDL_atomic_int_add((atomic),1)) | |
661 #define SDL_atomic_int_dec_test(atomic) (SDL_atomic_int_xchg_add((atomic),-1) == 1) | |
662 | |
663 /* Ends C function definitions when using C++ */ | |
664 #ifdef __cplusplus | |
665 /* *INDENT-OFF* */ | |
666 } | |
667 /* *INDENT-ON* */ | |
668 #endif | |
669 | |
670 #include "close_code.h" | |
671 | |
672 #endif /* _SDL_atomic_h_ */ | |
673 | |
674 /* vi: set ts=4 sw=4 expandtab: */ |