Mercurial > sdl-ios-xcode
comparison src/atomic/SDL_atomic.c @ 5003:3a95a2b93eb3
Updated the atomic API for better use cases
author | Sam Lantinga <slouken@libsdl.org> |
---|---|
date | Sat, 15 Jan 2011 12:41:59 -0800 |
parents | |
children | 0c72ae7b7cb2 |
comparison
equal
deleted
inserted
replaced
5002:c5b9486688ce | 5003:3a95a2b93eb3 |
---|---|
1 /* | |
2 SDL - Simple DirectMedia Layer | |
3 Copyright (C) 1997-2010 Sam Lantinga | |
4 | |
5 This library is free software; you can redistribute it and/or | |
6 modify it under the terms of the GNU Lesser General Public | |
7 License as published by the Free Software Foundation; either | |
8 version 2.1 of the License, or (at your option) any later version. | |
9 | |
10 This library is distributed in the hope that it will be useful, | |
11 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 Lesser General Public License for more details. | |
14 | |
15 You should have received a copy of the GNU Lesser General Public | |
16 License along with this library; if not, write to the Free Software | |
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | |
19 Sam Lantinga | |
20 slouken@libsdl.org | |
21 */ | |
22 #include "SDL_stdinc.h" | |
23 | |
24 #include "SDL_atomic.h" | |
25 | |
26 /* | |
27 If any of the operations are not provided then we must emulate some | |
28 of them. That means we need a nice implementation of spin locks | |
29 that avoids the "one big lock" problem. We use a vector of spin | |
30 locks and pick which one to use based on the address of the operand | |
31 of the function. | |
32 | |
33 To generate the index of the lock we first shift by 3 bits to get | |
34 rid on the zero bits that result from 32 and 64 bit allignment of | |
35 data. We then mask off all but 5 bits and use those 5 bits as an | |
36 index into the table. | |
37 | |
38 Picking the lock this way insures that accesses to the same data at | |
39 the same time will go to the same lock. OTOH, accesses to different | |
40 data have only a 1/32 chance of hitting the same lock. That should | |
41 pretty much eliminate the chances of several atomic operations on | |
42 different data from waiting on the same "big lock". If it isn't | |
43 then the table of locks can be expanded to a new size so long as | |
44 the new size is a power of two. | |
45 | |
46 Contributed by Bob Pendleton, bob@pendleton.com | |
47 */ | |
48 | |
49 static SDL_SpinLock locks[32]; | |
50 | |
51 static __inline__ void | |
52 enterLock(void *a) | |
53 { | |
54 uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f); | |
55 | |
56 SDL_AtomicLock(&locks[index]); | |
57 } | |
58 | |
59 static __inline__ void | |
60 leaveLock(void *a) | |
61 { | |
62 uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f); | |
63 | |
64 SDL_AtomicUnlock(&locks[index]); | |
65 } | |
66 | |
67 #ifndef SDL_AtomicSet | |
68 int | |
69 SDL_AtomicSet(SDL_atomic_t *a, int value) | |
70 { | |
71 int oldvalue; | |
72 | |
73 enterLock(a); | |
74 oldvalue = a->value; | |
75 a->value = value; | |
76 leaveLock(a); | |
77 | |
78 return oldvalue; | |
79 } | |
80 #endif | |
81 | |
82 #ifndef SDL_AtomicGet | |
83 int | |
84 SDL_AtomicGet(SDL_atomic_t *a) | |
85 { | |
86 /* Assuming integral reads on this platform, we're safe here since the | |
87 functions that set the variable have the necessary memory barriers. | |
88 */ | |
89 return a->value; | |
90 } | |
91 #endif | |
92 | |
93 #ifndef SDL_AtomicAdd | |
94 int | |
95 SDL_AtomicAdd(SDL_atomic_t *a, int value) | |
96 { | |
97 int oldvalue; | |
98 | |
99 enterLock(a); | |
100 oldvalue = a->value; | |
101 a->value += value; | |
102 leaveLock(a); | |
103 | |
104 return oldvalue; | |
105 } | |
106 #endif | |
107 | |
108 #ifndef SDL_AtomicIncRef | |
109 void | |
110 SDL_AtomicIncRef(SDL_atomic_t *a) | |
111 { | |
112 SDL_AtomicAdd(a, 1); | |
113 } | |
114 #endif | |
115 | |
116 #ifndef SDL_AtomicDecRef | |
117 SDL_bool | |
118 SDL_AtomicDecRef(SDL_atomic_t *a) | |
119 { | |
120 return SDL_AtomicAdd(a, -1) == 1; | |
121 } | |
122 #endif | |
123 | |
124 #ifndef SDL_AtomicCAS | |
125 int | |
126 SDL_AtomicCAS(SDL_atomic_t *a, int oldval, int newval) | |
127 { | |
128 int prevval; | |
129 | |
130 enterLock(a); | |
131 prevval = a->value; | |
132 if (prevval == oldval) { | |
133 a->value = newval; | |
134 } | |
135 leaveLock(a); | |
136 | |
137 return prevval; | |
138 } | |
139 #endif | |
140 | |
141 #ifndef SDL_AtomicSetPtr | |
142 void | |
143 SDL_AtomicSetPtr(void** a, void* value) | |
144 { | |
145 void *prevval; | |
146 do { | |
147 prevval = *a; | |
148 } while (SDL_AtomicCASPtr(a, prevval, value) != prevval); | |
149 } | |
150 #endif | |
151 | |
152 #ifndef SDL_AtomicGetPtr | |
153 void* | |
154 SDL_AtomicGetPtr(void** a) | |
155 { | |
156 /* Assuming integral reads on this platform, we're safe here since the | |
157 functions that set the pointer have the necessary memory barriers. | |
158 */ | |
159 return *a; | |
160 } | |
161 #endif | |
162 | |
163 #ifndef SDL_AtomicCASPtr | |
164 void* SDL_AtomicCASPtr(void **a, void *oldval, void *newval) | |
165 { | |
166 void *prevval; | |
167 | |
168 enterLock(a); | |
169 prevval = *a; | |
170 if (*a == oldval) { | |
171 *a = newval; | |
172 } | |
173 leaveLock(a); | |
174 | |
175 return prevval; | |
176 } | |
177 #endif | |
178 | |
179 /* vi: set ts=4 sw=4 expandtab: */ |