diff src/atomic/SDL_spinlock.c @ 5004:0c72ae7b7cb2

Added native atomic operations for Windows, Mac OS X, and gcc compiler intrinsics. Changed the CAS return value to bool, so it's efficient with OSAtomicCompareAndSwap32Barrier() Added an atomic test adapted from code by Michael Davidsaver
author Sam Lantinga <slouken@libsdl.org>
date Sun, 16 Jan 2011 15:16:39 -0800
parents 3a95a2b93eb3
children 1bf9e38431ec
line wrap: on
line diff
--- a/src/atomic/SDL_spinlock.c	Sat Jan 15 12:41:59 2011 -0800
+++ b/src/atomic/SDL_spinlock.c	Sun Jan 16 15:16:39 2011 -0800
@@ -44,24 +44,22 @@
 #elif defined(__MACOSX__)
     return OSAtomicCompareAndSwap32Barrier(0, 1, lock);
 
-#elif defined(__GNUC__)
-#if defined(__arm__)
-#ifdef __ARM_ARCH_5__
+#elif defined(HAVE_GCC_ATOMICS)
+    return (__sync_lock_test_and_set(lock, 1) == 0);
+
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_ARCH_5__)
     int result;
     __asm__ __volatile__ (
         "swp %0, %1, [%2]\n"
         : "=&r,&r" (result) : "r,0" (1), "r,r" (lock) : "memory");
     return (result == 0);
-#else
+
+#elif defined(__GNUC__) && defined(__arm__)
     int result;
     __asm__ __volatile__ (
         "ldrex %0, [%2]\nteq   %0, #0\nstrexeq %0, %1, [%2]"
         : "=&r" (result) : "r" (1), "r" (lock) : "cc", "memory");
     return (result == 0);
-#endif
-#else
-    return (__sync_lock_test_and_set(lock, 1) == 0);
-#endif
 
 #else
     /* Need CPU instructions for spinlock here! */
@@ -81,19 +79,8 @@
 void
 SDL_AtomicUnlock(SDL_SpinLock *lock)
 {
-#if defined(__WIN32__)
-    *lock = 0;
-
-#elif defined(__MACOSX__)
+    /* Assuming atomic assignment operation and full memory barrier in lock */
     *lock = 0;
-
-#elif defined(__GNUC__) && !defined(__arm__)
-    __sync_lock_release(lock);
-
-#else
-    /* Assuming memory barrier in lock and integral assignment operation */
-    *lock = 0;
-#endif
 }
 
 /* vi: set ts=4 sw=4 expandtab: */