Mercurial > sdl-ios-xcode
comparison src/video/SDL_blit_N.c @ 1662:782fd950bd46 SDL-1.3
Revamp of the video system in progress - adding support for multiple displays, multiple windows, and a full video mode selection API.
WARNING: None of the video drivers have been updated for the new API yet! The API is still under design and very fluid.
The code is now run through a consistent indent format:
indent -i4 -nut -nsc -br -ce
The headers are being converted to automatically generate doxygen documentation.
author | Sam Lantinga <slouken@libsdl.org> |
---|---|
date | Sun, 28 May 2006 13:04:16 +0000 |
parents | 14717b52abc0 |
children | 4da1ee79c9af |
comparison
equal
deleted
inserted
replaced
1661:281d3f4870e5 | 1662:782fd950bd46 |
---|---|
36 #include <altivec.h> | 36 #include <altivec.h> |
37 #endif | 37 #endif |
38 #define assert(X) | 38 #define assert(X) |
39 #ifdef __MACOSX__ | 39 #ifdef __MACOSX__ |
40 #include <sys/sysctl.h> | 40 #include <sys/sysctl.h> |
41 static size_t GetL3CacheSize( void ) | 41 static size_t |
42 GetL3CacheSize (void) | |
42 { | 43 { |
43 const char key[] = "hw.l3cachesize"; | 44 const char key[] = "hw.l3cachesize"; |
44 u_int64_t result = 0; | 45 u_int64_t result = 0; |
45 size_t typeSize = sizeof( result ); | 46 size_t typeSize = sizeof (result); |
46 | 47 |
47 | 48 |
48 int err = sysctlbyname( key, &result, &typeSize, NULL, 0 ); | 49 int err = sysctlbyname (key, &result, &typeSize, NULL, 0); |
49 if( 0 != err ) return 0; | 50 if (0 != err) |
51 return 0; | |
50 | 52 |
51 return result; | 53 return result; |
52 } | 54 } |
53 #else | 55 #else |
54 static size_t GetL3CacheSize( void ) | 56 static size_t |
57 GetL3CacheSize (void) | |
55 { | 58 { |
56 /* XXX: Just guess G4 */ | 59 /* XXX: Just guess G4 */ |
57 return 2097152; | 60 return 2097152; |
58 } | 61 } |
59 #endif /* __MACOSX__ */ | 62 #endif /* __MACOSX__ */ |
60 | 63 |
61 #if (defined(__MACOSX__) && (__GNUC__ < 4)) | 64 #if (defined(__MACOSX__) && (__GNUC__ < 4)) |
62 #define VECUINT8_LITERAL(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) \ | 65 #define VECUINT8_LITERAL(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) \ |
63 (vector unsigned char) ( a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p ) | 66 (vector unsigned char) ( a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p ) |
64 #define VECUINT16_LITERAL(a,b,c,d,e,f,g,h) \ | 67 #define VECUINT16_LITERAL(a,b,c,d,e,f,g,h) \ |
65 (vector unsigned short) ( a,b,c,d,e,f,g,h ) | 68 (vector unsigned short) ( a,b,c,d,e,f,g,h ) |
66 #else | 69 #else |
67 #define VECUINT8_LITERAL(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) \ | 70 #define VECUINT8_LITERAL(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) \ |
68 (vector unsigned char) { a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p } | 71 (vector unsigned char) { a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p } |
69 #define VECUINT16_LITERAL(a,b,c,d,e,f,g,h) \ | 72 #define VECUINT16_LITERAL(a,b,c,d,e,f,g,h) \ |
70 (vector unsigned short) { a,b,c,d,e,f,g,h } | 73 (vector unsigned short) { a,b,c,d,e,f,g,h } |
71 #endif | 74 #endif |
72 | 75 |
73 #define UNALIGNED_PTR(x) (((size_t) x) & 0x0000000F) | 76 #define UNALIGNED_PTR(x) (((size_t) x) & 0x0000000F) |
74 #define VSWIZZLE32(a,b,c,d) (vector unsigned char) \ | 77 #define VSWIZZLE32(a,b,c,d) (vector unsigned char) \ |
99 #define VEC_ALIGNER(src) ((UNALIGNED_PTR(src)) \ | 102 #define VEC_ALIGNER(src) ((UNALIGNED_PTR(src)) \ |
100 ? vec_lvsl(0, src) \ | 103 ? vec_lvsl(0, src) \ |
101 : vec_add(vec_lvsl(8, src), vec_splat_u8(8))) | 104 : vec_add(vec_lvsl(8, src), vec_splat_u8(8))) |
102 | 105 |
103 /* Calculate the permute vector used for 32->32 swizzling */ | 106 /* Calculate the permute vector used for 32->32 swizzling */ |
104 static vector unsigned char calc_swizzle32(const SDL_PixelFormat *srcfmt, | 107 static vector unsigned char |
105 const SDL_PixelFormat *dstfmt) | 108 calc_swizzle32 (const SDL_PixelFormat * srcfmt, |
109 const SDL_PixelFormat * dstfmt) | |
106 { | 110 { |
107 /* | 111 /* |
108 * We have to assume that the bits that aren't used by other | 112 * We have to assume that the bits that aren't used by other |
109 * colors is alpha, and it's one complete byte, since some formats | 113 * colors is alpha, and it's one complete byte, since some formats |
110 * leave alpha with a zero mask, but we should still swizzle the bits. | 114 * leave alpha with a zero mask, but we should still swizzle the bits. |
111 */ | 115 */ |
112 /* ARGB */ | 116 /* ARGB */ |
113 const static struct SDL_PixelFormat default_pixel_format = { | 117 const static struct SDL_PixelFormat default_pixel_format = { |
114 NULL, 0, 0, | 118 NULL, 0, 0, |
115 0, 0, 0, 0, | 119 0, 0, 0, 0, |
116 16, 8, 0, 24, | 120 16, 8, 0, 24, |
117 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000, | 121 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000, |
118 0, 0}; | 122 0, 0 |
123 }; | |
119 if (!srcfmt) { | 124 if (!srcfmt) { |
120 srcfmt = &default_pixel_format; | 125 srcfmt = &default_pixel_format; |
121 } | 126 } |
122 if (!dstfmt) { | 127 if (!dstfmt) { |
123 dstfmt = &default_pixel_format; | 128 dstfmt = &default_pixel_format; |
124 } | 129 } |
125 const vector unsigned char plus = VECUINT8_LITERAL( | 130 const vector unsigned char plus = |
126 0x00, 0x00, 0x00, 0x00, | 131 VECUINT8_LITERAL (0x00, 0x00, 0x00, 0x00, |
127 0x04, 0x04, 0x04, 0x04, | 132 0x04, 0x04, 0x04, 0x04, |
128 0x08, 0x08, 0x08, 0x08, | 133 0x08, 0x08, 0x08, 0x08, |
129 0x0C, 0x0C, 0x0C, 0x0C ); | 134 0x0C, 0x0C, 0x0C, 0x0C); |
130 vector unsigned char vswiz; | 135 vector unsigned char vswiz; |
131 vector unsigned int srcvec; | 136 vector unsigned int srcvec; |
132 #define RESHIFT(X) (3 - ((X) >> 3)) | 137 #define RESHIFT(X) (3 - ((X) >> 3)) |
133 Uint32 rmask = RESHIFT(srcfmt->Rshift) << (dstfmt->Rshift); | 138 Uint32 rmask = RESHIFT (srcfmt->Rshift) << (dstfmt->Rshift); |
134 Uint32 gmask = RESHIFT(srcfmt->Gshift) << (dstfmt->Gshift); | 139 Uint32 gmask = RESHIFT (srcfmt->Gshift) << (dstfmt->Gshift); |
135 Uint32 bmask = RESHIFT(srcfmt->Bshift) << (dstfmt->Bshift); | 140 Uint32 bmask = RESHIFT (srcfmt->Bshift) << (dstfmt->Bshift); |
136 Uint32 amask; | 141 Uint32 amask; |
137 /* Use zero for alpha if either surface doesn't have alpha */ | 142 /* Use zero for alpha if either surface doesn't have alpha */ |
138 if (dstfmt->Amask) { | 143 if (dstfmt->Amask) { |
139 amask = ((srcfmt->Amask) ? RESHIFT(srcfmt->Ashift) : 0x10) << (dstfmt->Ashift); | 144 amask = |
140 } else { | 145 ((srcfmt->Amask) ? RESHIFT (srcfmt->Ashift) : 0x10) << (dstfmt-> |
141 amask = 0x10101010 & ((dstfmt->Rmask | dstfmt->Gmask | dstfmt->Bmask) ^ 0xFFFFFFFF); | 146 Ashift); |
142 } | 147 } else { |
143 #undef RESHIFT | 148 amask = |
144 ((unsigned int *)(char*)&srcvec)[0] = (rmask | gmask | bmask | amask); | 149 0x10101010 & ((dstfmt->Rmask | dstfmt->Gmask | dstfmt->Bmask) ^ |
145 vswiz = vec_add(plus, (vector unsigned char)vec_splat(srcvec, 0)); | 150 0xFFFFFFFF); |
146 return(vswiz); | 151 } |
147 } | 152 #undef RESHIFT |
148 | 153 ((unsigned int *) (char *) &srcvec)[0] = (rmask | gmask | bmask | amask); |
149 static void Blit_RGB888_RGB565(SDL_BlitInfo *info); | 154 vswiz = vec_add (plus, (vector unsigned char) vec_splat (srcvec, 0)); |
150 static void Blit_RGB888_RGB565Altivec(SDL_BlitInfo *info) { | 155 return (vswiz); |
156 } | |
157 | |
158 static void Blit_RGB888_RGB565 (SDL_BlitInfo * info); | |
159 static void | |
160 Blit_RGB888_RGB565Altivec (SDL_BlitInfo * info) | |
161 { | |
151 int height = info->d_height; | 162 int height = info->d_height; |
152 Uint8 *src = (Uint8 *) info->s_pixels; | 163 Uint8 *src = (Uint8 *) info->s_pixels; |
153 int srcskip = info->s_skip; | 164 int srcskip = info->s_skip; |
154 Uint8 *dst = (Uint8 *) info->d_pixels; | 165 Uint8 *dst = (Uint8 *) info->d_pixels; |
155 int dstskip = info->d_skip; | 166 int dstskip = info->d_skip; |
156 SDL_PixelFormat *srcfmt = info->src; | 167 SDL_PixelFormat *srcfmt = info->src; |
157 vector unsigned char valpha = vec_splat_u8(0); | 168 vector unsigned char valpha = vec_splat_u8 (0); |
158 vector unsigned char vpermute = calc_swizzle32(srcfmt, NULL); | 169 vector unsigned char vpermute = calc_swizzle32 (srcfmt, NULL); |
159 vector unsigned char vgmerge = VECUINT8_LITERAL( | 170 vector unsigned char vgmerge = VECUINT8_LITERAL (0x00, 0x02, 0x00, 0x06, |
160 0x00, 0x02, 0x00, 0x06, | 171 0x00, 0x0a, 0x00, 0x0e, |
161 0x00, 0x0a, 0x00, 0x0e, | 172 0x00, 0x12, 0x00, 0x16, |
162 0x00, 0x12, 0x00, 0x16, | 173 0x00, 0x1a, 0x00, 0x1e); |
163 0x00, 0x1a, 0x00, 0x1e); | 174 vector unsigned short v1 = vec_splat_u16 (1); |
164 vector unsigned short v1 = vec_splat_u16(1); | 175 vector unsigned short v3 = vec_splat_u16 (3); |
165 vector unsigned short v3 = vec_splat_u16(3); | 176 vector unsigned short v3f = |
166 vector unsigned short v3f = VECUINT16_LITERAL( | 177 VECUINT16_LITERAL (0x003f, 0x003f, 0x003f, 0x003f, |
167 0x003f, 0x003f, 0x003f, 0x003f, | 178 0x003f, 0x003f, 0x003f, 0x003f); |
168 0x003f, 0x003f, 0x003f, 0x003f); | 179 vector unsigned short vfc = |
169 vector unsigned short vfc = VECUINT16_LITERAL( | 180 VECUINT16_LITERAL (0x00fc, 0x00fc, 0x00fc, 0x00fc, |
170 0x00fc, 0x00fc, 0x00fc, 0x00fc, | 181 0x00fc, 0x00fc, 0x00fc, 0x00fc); |
171 0x00fc, 0x00fc, 0x00fc, 0x00fc); | 182 vector unsigned short vf800 = (vector unsigned short) vec_splat_u8 (-7); |
172 vector unsigned short vf800 = (vector unsigned short)vec_splat_u8(-7); | 183 vf800 = vec_sl (vf800, vec_splat_u16 (8)); |
173 vf800 = vec_sl(vf800, vec_splat_u16(8)); | |
174 | 184 |
175 while (height--) { | 185 while (height--) { |
176 vector unsigned char valigner; | 186 vector unsigned char valigner; |
177 vector unsigned char voverflow; | 187 vector unsigned char voverflow; |
178 vector unsigned char vsrc; | 188 vector unsigned char vsrc; |
193 dst += 2; \ | 203 dst += 2; \ |
194 src += 4; \ | 204 src += 4; \ |
195 widthvar--; \ | 205 widthvar--; \ |
196 } | 206 } |
197 | 207 |
198 ONE_PIXEL_BLEND(((UNALIGNED_PTR(dst)) && (width)), width); | 208 ONE_PIXEL_BLEND (((UNALIGNED_PTR (dst)) && (width)), width); |
199 | 209 |
200 /* After all that work, here's the vector part! */ | 210 /* After all that work, here's the vector part! */ |
201 extrawidth = (width % 8); /* trailing unaligned stores */ | 211 extrawidth = (width % 8); /* trailing unaligned stores */ |
202 width -= extrawidth; | 212 width -= extrawidth; |
203 vsrc = vec_ld(0, src); | 213 vsrc = vec_ld (0, src); |
204 valigner = VEC_ALIGNER(src); | 214 valigner = VEC_ALIGNER (src); |
205 | 215 |
206 while (width) { | 216 while (width) { |
207 vector unsigned short vpixel, vrpixel, vgpixel, vbpixel; | 217 vector unsigned short vpixel, vrpixel, vgpixel, vbpixel; |
208 vector unsigned int vsrc1, vsrc2; | 218 vector unsigned int vsrc1, vsrc2; |
209 vector unsigned char vdst; | 219 vector unsigned char vdst; |
210 | 220 |
211 voverflow = vec_ld(15, src); | 221 voverflow = vec_ld (15, src); |
212 vsrc = vec_perm(vsrc, voverflow, valigner); | 222 vsrc = vec_perm (vsrc, voverflow, valigner); |
213 vsrc1 = (vector unsigned int)vec_perm(vsrc, valpha, vpermute); | 223 vsrc1 = (vector unsigned int) vec_perm (vsrc, valpha, vpermute); |
214 src += 16; | 224 src += 16; |
215 vsrc = voverflow; | 225 vsrc = voverflow; |
216 voverflow = vec_ld(15, src); | 226 voverflow = vec_ld (15, src); |
217 vsrc = vec_perm(vsrc, voverflow, valigner); | 227 vsrc = vec_perm (vsrc, voverflow, valigner); |
218 vsrc2 = (vector unsigned int)vec_perm(vsrc, valpha, vpermute); | 228 vsrc2 = (vector unsigned int) vec_perm (vsrc, valpha, vpermute); |
219 /* 1555 */ | 229 /* 1555 */ |
220 vpixel = (vector unsigned short)vec_packpx(vsrc1, vsrc2); | 230 vpixel = (vector unsigned short) vec_packpx (vsrc1, vsrc2); |
221 vgpixel = (vector unsigned short)vec_perm(vsrc1, vsrc2, vgmerge); | 231 vgpixel = |
222 vgpixel = vec_and(vgpixel, vfc); | 232 (vector unsigned short) vec_perm (vsrc1, vsrc2, vgmerge); |
223 vgpixel = vec_sl(vgpixel, v3); | 233 vgpixel = vec_and (vgpixel, vfc); |
224 vrpixel = vec_sl(vpixel, v1); | 234 vgpixel = vec_sl (vgpixel, v3); |
225 vrpixel = vec_and(vrpixel, vf800); | 235 vrpixel = vec_sl (vpixel, v1); |
226 vbpixel = vec_and(vpixel, v3f); | 236 vrpixel = vec_and (vrpixel, vf800); |
227 vdst = vec_or((vector unsigned char)vrpixel, (vector unsigned char)vgpixel); | 237 vbpixel = vec_and (vpixel, v3f); |
238 vdst = | |
239 vec_or ((vector unsigned char) vrpixel, | |
240 (vector unsigned char) vgpixel); | |
228 /* 565 */ | 241 /* 565 */ |
229 vdst = vec_or(vdst, (vector unsigned char)vbpixel); | 242 vdst = vec_or (vdst, (vector unsigned char) vbpixel); |
230 vec_st(vdst, 0, dst); | 243 vec_st (vdst, 0, dst); |
231 | 244 |
232 width -= 8; | 245 width -= 8; |
233 src += 16; | 246 src += 16; |
234 dst += 16; | 247 dst += 16; |
235 vsrc = voverflow; | 248 vsrc = voverflow; |
236 } | 249 } |
237 | 250 |
238 assert(width == 0); | 251 assert (width == 0); |
239 | 252 |
240 /* do scalar until we can align... */ | 253 /* do scalar until we can align... */ |
241 ONE_PIXEL_BLEND((extrawidth), extrawidth); | 254 ONE_PIXEL_BLEND ((extrawidth), extrawidth); |
242 #undef ONE_PIXEL_BLEND | 255 #undef ONE_PIXEL_BLEND |
243 | 256 |
244 src += srcskip; /* move to next row, accounting for pitch. */ | 257 src += srcskip; /* move to next row, accounting for pitch. */ |
245 dst += dstskip; | 258 dst += dstskip; |
246 } | 259 } |
247 | 260 |
248 | 261 |
249 } | 262 } |
250 | 263 |
251 static void Blit_RGB565_32Altivec(SDL_BlitInfo *info) { | 264 static void |
265 Blit_RGB565_32Altivec (SDL_BlitInfo * info) | |
266 { | |
252 int height = info->d_height; | 267 int height = info->d_height; |
253 Uint8 *src = (Uint8 *) info->s_pixels; | 268 Uint8 *src = (Uint8 *) info->s_pixels; |
254 int srcskip = info->s_skip; | 269 int srcskip = info->s_skip; |
255 Uint8 *dst = (Uint8 *) info->d_pixels; | 270 Uint8 *dst = (Uint8 *) info->d_pixels; |
256 int dstskip = info->d_skip; | 271 int dstskip = info->d_skip; |
258 SDL_PixelFormat *dstfmt = info->dst; | 273 SDL_PixelFormat *dstfmt = info->dst; |
259 unsigned alpha; | 274 unsigned alpha; |
260 vector unsigned char valpha; | 275 vector unsigned char valpha; |
261 vector unsigned char vpermute; | 276 vector unsigned char vpermute; |
262 vector unsigned short vf800; | 277 vector unsigned short vf800; |
263 vector unsigned int v8 = vec_splat_u32(8); | 278 vector unsigned int v8 = vec_splat_u32 (8); |
264 vector unsigned int v16 = vec_add(v8, v8); | 279 vector unsigned int v16 = vec_add (v8, v8); |
265 vector unsigned short v2 = vec_splat_u16(2); | 280 vector unsigned short v2 = vec_splat_u16 (2); |
266 vector unsigned short v3 = vec_splat_u16(3); | 281 vector unsigned short v3 = vec_splat_u16 (3); |
267 /* | 282 /* |
268 0x10 - 0x1f is the alpha | 283 0x10 - 0x1f is the alpha |
269 0x00 - 0x0e evens are the red | 284 0x00 - 0x0e evens are the red |
270 0x01 - 0x0f odds are zero | 285 0x01 - 0x0f odds are zero |
271 */ | 286 */ |
272 vector unsigned char vredalpha1 = VECUINT8_LITERAL( | 287 vector unsigned char vredalpha1 = |
273 0x10, 0x00, 0x01, 0x01, | 288 VECUINT8_LITERAL (0x10, 0x00, 0x01, 0x01, |
274 0x10, 0x02, 0x01, 0x01, | 289 0x10, 0x02, 0x01, 0x01, |
275 0x10, 0x04, 0x01, 0x01, | 290 0x10, 0x04, 0x01, 0x01, |
276 0x10, 0x06, 0x01, 0x01 | 291 0x10, 0x06, 0x01, 0x01); |
277 ); | 292 vector unsigned char vredalpha2 = |
278 vector unsigned char vredalpha2 = (vector unsigned char) ( | 293 (vector unsigned |
279 vec_add((vector unsigned int)vredalpha1, vec_sl(v8, v16)) | 294 char) (vec_add ((vector unsigned int) vredalpha1, vec_sl (v8, v16)) |
280 ); | 295 ); |
281 /* | 296 /* |
282 0x00 - 0x0f is ARxx ARxx ARxx ARxx | 297 0x00 - 0x0f is ARxx ARxx ARxx ARxx |
283 0x11 - 0x0f odds are blue | 298 0x11 - 0x0f odds are blue |
284 */ | 299 */ |
285 vector unsigned char vblue1 = VECUINT8_LITERAL( | 300 vector unsigned char vblue1 = VECUINT8_LITERAL (0x00, 0x01, 0x02, 0x11, |
286 0x00, 0x01, 0x02, 0x11, | 301 0x04, 0x05, 0x06, 0x13, |
287 0x04, 0x05, 0x06, 0x13, | 302 0x08, 0x09, 0x0a, 0x15, |
288 0x08, 0x09, 0x0a, 0x15, | 303 0x0c, 0x0d, 0x0e, 0x17); |
289 0x0c, 0x0d, 0x0e, 0x17 | 304 vector unsigned char vblue2 = |
290 ); | 305 (vector unsigned char) (vec_add ((vector unsigned int) vblue1, v8) |
291 vector unsigned char vblue2 = (vector unsigned char)( | 306 ); |
292 vec_add((vector unsigned int)vblue1, v8) | |
293 ); | |
294 /* | 307 /* |
295 0x00 - 0x0f is ARxB ARxB ARxB ARxB | 308 0x00 - 0x0f is ARxB ARxB ARxB ARxB |
296 0x10 - 0x0e evens are green | 309 0x10 - 0x0e evens are green |
297 */ | 310 */ |
298 vector unsigned char vgreen1 = VECUINT8_LITERAL( | 311 vector unsigned char vgreen1 = VECUINT8_LITERAL (0x00, 0x01, 0x10, 0x03, |
299 0x00, 0x01, 0x10, 0x03, | 312 0x04, 0x05, 0x12, 0x07, |
300 0x04, 0x05, 0x12, 0x07, | 313 0x08, 0x09, 0x14, 0x0b, |
301 0x08, 0x09, 0x14, 0x0b, | 314 0x0c, 0x0d, 0x16, 0x0f); |
302 0x0c, 0x0d, 0x16, 0x0f | 315 vector unsigned char vgreen2 = |
303 ); | 316 (vector unsigned |
304 vector unsigned char vgreen2 = (vector unsigned char)( | 317 char) (vec_add ((vector unsigned int) vgreen1, vec_sl (v8, v8)) |
305 vec_add((vector unsigned int)vgreen1, vec_sl(v8, v8)) | 318 ); |
306 ); | 319 |
307 | 320 |
308 | 321 assert (srcfmt->BytesPerPixel == 2); |
309 assert(srcfmt->BytesPerPixel == 2); | 322 assert (dstfmt->BytesPerPixel == 4); |
310 assert(dstfmt->BytesPerPixel == 4); | 323 |
311 | 324 vf800 = (vector unsigned short) vec_splat_u8 (-7); |
312 vf800 = (vector unsigned short)vec_splat_u8(-7); | 325 vf800 = vec_sl (vf800, vec_splat_u16 (8)); |
313 vf800 = vec_sl(vf800, vec_splat_u16(8)); | |
314 | 326 |
315 if (dstfmt->Amask && srcfmt->alpha) { | 327 if (dstfmt->Amask && srcfmt->alpha) { |
316 ((unsigned char *)&valpha)[0] = alpha = srcfmt->alpha; | 328 ((unsigned char *) &valpha)[0] = alpha = srcfmt->alpha; |
317 valpha = vec_splat(valpha, 0); | 329 valpha = vec_splat (valpha, 0); |
318 } else { | 330 } else { |
319 alpha = 0; | 331 alpha = 0; |
320 valpha = vec_splat_u8(0); | 332 valpha = vec_splat_u8 (0); |
321 } | 333 } |
322 | 334 |
323 vpermute = calc_swizzle32(NULL, dstfmt); | 335 vpermute = calc_swizzle32 (NULL, dstfmt); |
324 while (height--) { | 336 while (height--) { |
325 vector unsigned char valigner; | 337 vector unsigned char valigner; |
326 vector unsigned char voverflow; | 338 vector unsigned char voverflow; |
327 vector unsigned char vsrc; | 339 vector unsigned char vsrc; |
328 | 340 |
340 ASSEMBLE_RGBA(dst, 4, dstfmt, sR, sG, sB, alpha); \ | 352 ASSEMBLE_RGBA(dst, 4, dstfmt, sR, sG, sB, alpha); \ |
341 src += 2; \ | 353 src += 2; \ |
342 dst += 4; \ | 354 dst += 4; \ |
343 widthvar--; \ | 355 widthvar--; \ |
344 } | 356 } |
345 ONE_PIXEL_BLEND(((UNALIGNED_PTR(dst)) && (width)), width); | 357 ONE_PIXEL_BLEND (((UNALIGNED_PTR (dst)) && (width)), width); |
346 | 358 |
347 /* After all that work, here's the vector part! */ | 359 /* After all that work, here's the vector part! */ |
348 extrawidth = (width % 8); /* trailing unaligned stores */ | 360 extrawidth = (width % 8); /* trailing unaligned stores */ |
349 width -= extrawidth; | 361 width -= extrawidth; |
350 vsrc = vec_ld(0, src); | 362 vsrc = vec_ld (0, src); |
351 valigner = VEC_ALIGNER(src); | 363 valigner = VEC_ALIGNER (src); |
352 | 364 |
353 while (width) { | 365 while (width) { |
354 vector unsigned short vR, vG, vB; | 366 vector unsigned short vR, vG, vB; |
355 vector unsigned char vdst1, vdst2; | 367 vector unsigned char vdst1, vdst2; |
356 | 368 |
357 voverflow = vec_ld(15, src); | 369 voverflow = vec_ld (15, src); |
358 vsrc = vec_perm(vsrc, voverflow, valigner); | 370 vsrc = vec_perm (vsrc, voverflow, valigner); |
359 | 371 |
360 vR = vec_and((vector unsigned short)vsrc, vf800); | 372 vR = vec_and ((vector unsigned short) vsrc, vf800); |
361 vB = vec_sl((vector unsigned short)vsrc, v3); | 373 vB = vec_sl ((vector unsigned short) vsrc, v3); |
362 vG = vec_sl(vB, v2); | 374 vG = vec_sl (vB, v2); |
363 | 375 |
364 vdst1 = (vector unsigned char)vec_perm((vector unsigned char)vR, valpha, vredalpha1); | 376 vdst1 = |
365 vdst1 = vec_perm(vdst1, (vector unsigned char)vB, vblue1); | 377 (vector unsigned char) vec_perm ((vector unsigned char) vR, |
366 vdst1 = vec_perm(vdst1, (vector unsigned char)vG, vgreen1); | 378 valpha, vredalpha1); |
367 vdst1 = vec_perm(vdst1, valpha, vpermute); | 379 vdst1 = vec_perm (vdst1, (vector unsigned char) vB, vblue1); |
368 vec_st(vdst1, 0, dst); | 380 vdst1 = vec_perm (vdst1, (vector unsigned char) vG, vgreen1); |
369 | 381 vdst1 = vec_perm (vdst1, valpha, vpermute); |
370 vdst2 = (vector unsigned char)vec_perm((vector unsigned char)vR, valpha, vredalpha2); | 382 vec_st (vdst1, 0, dst); |
371 vdst2 = vec_perm(vdst2, (vector unsigned char)vB, vblue2); | 383 |
372 vdst2 = vec_perm(vdst2, (vector unsigned char)vG, vgreen2); | 384 vdst2 = |
373 vdst2 = vec_perm(vdst2, valpha, vpermute); | 385 (vector unsigned char) vec_perm ((vector unsigned char) vR, |
374 vec_st(vdst2, 16, dst); | 386 valpha, vredalpha2); |
375 | 387 vdst2 = vec_perm (vdst2, (vector unsigned char) vB, vblue2); |
388 vdst2 = vec_perm (vdst2, (vector unsigned char) vG, vgreen2); | |
389 vdst2 = vec_perm (vdst2, valpha, vpermute); | |
390 vec_st (vdst2, 16, dst); | |
391 | |
376 width -= 8; | 392 width -= 8; |
377 dst += 32; | 393 dst += 32; |
378 src += 16; | 394 src += 16; |
379 vsrc = voverflow; | 395 vsrc = voverflow; |
380 } | 396 } |
381 | 397 |
382 assert(width == 0); | 398 assert (width == 0); |
383 | 399 |
384 | 400 |
385 /* do scalar until we can align... */ | 401 /* do scalar until we can align... */ |
386 ONE_PIXEL_BLEND((extrawidth), extrawidth); | 402 ONE_PIXEL_BLEND ((extrawidth), extrawidth); |
387 #undef ONE_PIXEL_BLEND | 403 #undef ONE_PIXEL_BLEND |
388 | 404 |
389 src += srcskip; /* move to next row, accounting for pitch. */ | 405 src += srcskip; /* move to next row, accounting for pitch. */ |
390 dst += dstskip; | 406 dst += dstskip; |
391 } | 407 } |
392 | 408 |
393 } | 409 } |
394 | 410 |
395 | 411 |
396 static void Blit_RGB555_32Altivec(SDL_BlitInfo *info) { | 412 static void |
413 Blit_RGB555_32Altivec (SDL_BlitInfo * info) | |
414 { | |
397 int height = info->d_height; | 415 int height = info->d_height; |
398 Uint8 *src = (Uint8 *) info->s_pixels; | 416 Uint8 *src = (Uint8 *) info->s_pixels; |
399 int srcskip = info->s_skip; | 417 int srcskip = info->s_skip; |
400 Uint8 *dst = (Uint8 *) info->d_pixels; | 418 Uint8 *dst = (Uint8 *) info->d_pixels; |
401 int dstskip = info->d_skip; | 419 int dstskip = info->d_skip; |
403 SDL_PixelFormat *dstfmt = info->dst; | 421 SDL_PixelFormat *dstfmt = info->dst; |
404 unsigned alpha; | 422 unsigned alpha; |
405 vector unsigned char valpha; | 423 vector unsigned char valpha; |
406 vector unsigned char vpermute; | 424 vector unsigned char vpermute; |
407 vector unsigned short vf800; | 425 vector unsigned short vf800; |
408 vector unsigned int v8 = vec_splat_u32(8); | 426 vector unsigned int v8 = vec_splat_u32 (8); |
409 vector unsigned int v16 = vec_add(v8, v8); | 427 vector unsigned int v16 = vec_add (v8, v8); |
410 vector unsigned short v1 = vec_splat_u16(1); | 428 vector unsigned short v1 = vec_splat_u16 (1); |
411 vector unsigned short v3 = vec_splat_u16(3); | 429 vector unsigned short v3 = vec_splat_u16 (3); |
412 /* | 430 /* |
413 0x10 - 0x1f is the alpha | 431 0x10 - 0x1f is the alpha |
414 0x00 - 0x0e evens are the red | 432 0x00 - 0x0e evens are the red |
415 0x01 - 0x0f odds are zero | 433 0x01 - 0x0f odds are zero |
416 */ | 434 */ |
417 vector unsigned char vredalpha1 = VECUINT8_LITERAL( | 435 vector unsigned char vredalpha1 = |
418 0x10, 0x00, 0x01, 0x01, | 436 VECUINT8_LITERAL (0x10, 0x00, 0x01, 0x01, |
419 0x10, 0x02, 0x01, 0x01, | 437 0x10, 0x02, 0x01, 0x01, |
420 0x10, 0x04, 0x01, 0x01, | 438 0x10, 0x04, 0x01, 0x01, |
421 0x10, 0x06, 0x01, 0x01 | 439 0x10, 0x06, 0x01, 0x01); |
422 ); | 440 vector unsigned char vredalpha2 = |
423 vector unsigned char vredalpha2 = (vector unsigned char)( | 441 (vector unsigned |
424 vec_add((vector unsigned int)vredalpha1, vec_sl(v8, v16)) | 442 char) (vec_add ((vector unsigned int) vredalpha1, vec_sl (v8, v16)) |
425 ); | 443 ); |
426 /* | 444 /* |
427 0x00 - 0x0f is ARxx ARxx ARxx ARxx | 445 0x00 - 0x0f is ARxx ARxx ARxx ARxx |
428 0x11 - 0x0f odds are blue | 446 0x11 - 0x0f odds are blue |
429 */ | 447 */ |
430 vector unsigned char vblue1 = VECUINT8_LITERAL( | 448 vector unsigned char vblue1 = VECUINT8_LITERAL (0x00, 0x01, 0x02, 0x11, |
431 0x00, 0x01, 0x02, 0x11, | 449 0x04, 0x05, 0x06, 0x13, |
432 0x04, 0x05, 0x06, 0x13, | 450 0x08, 0x09, 0x0a, 0x15, |
433 0x08, 0x09, 0x0a, 0x15, | 451 0x0c, 0x0d, 0x0e, 0x17); |
434 0x0c, 0x0d, 0x0e, 0x17 | 452 vector unsigned char vblue2 = |
435 ); | 453 (vector unsigned char) (vec_add ((vector unsigned int) vblue1, v8) |
436 vector unsigned char vblue2 = (vector unsigned char)( | 454 ); |
437 vec_add((vector unsigned int)vblue1, v8) | |
438 ); | |
439 /* | 455 /* |
440 0x00 - 0x0f is ARxB ARxB ARxB ARxB | 456 0x00 - 0x0f is ARxB ARxB ARxB ARxB |
441 0x10 - 0x0e evens are green | 457 0x10 - 0x0e evens are green |
442 */ | 458 */ |
443 vector unsigned char vgreen1 = VECUINT8_LITERAL( | 459 vector unsigned char vgreen1 = VECUINT8_LITERAL (0x00, 0x01, 0x10, 0x03, |
444 0x00, 0x01, 0x10, 0x03, | 460 0x04, 0x05, 0x12, 0x07, |
445 0x04, 0x05, 0x12, 0x07, | 461 0x08, 0x09, 0x14, 0x0b, |
446 0x08, 0x09, 0x14, 0x0b, | 462 0x0c, 0x0d, 0x16, 0x0f); |
447 0x0c, 0x0d, 0x16, 0x0f | 463 vector unsigned char vgreen2 = |
448 ); | 464 (vector unsigned |
449 vector unsigned char vgreen2 = (vector unsigned char)( | 465 char) (vec_add ((vector unsigned int) vgreen1, vec_sl (v8, v8)) |
450 vec_add((vector unsigned int)vgreen1, vec_sl(v8, v8)) | 466 ); |
451 ); | 467 |
452 | 468 |
453 | 469 assert (srcfmt->BytesPerPixel == 2); |
454 assert(srcfmt->BytesPerPixel == 2); | 470 assert (dstfmt->BytesPerPixel == 4); |
455 assert(dstfmt->BytesPerPixel == 4); | 471 |
456 | 472 vf800 = (vector unsigned short) vec_splat_u8 (-7); |
457 vf800 = (vector unsigned short)vec_splat_u8(-7); | 473 vf800 = vec_sl (vf800, vec_splat_u16 (8)); |
458 vf800 = vec_sl(vf800, vec_splat_u16(8)); | |
459 | 474 |
460 if (dstfmt->Amask && srcfmt->alpha) { | 475 if (dstfmt->Amask && srcfmt->alpha) { |
461 ((unsigned char *)&valpha)[0] = alpha = srcfmt->alpha; | 476 ((unsigned char *) &valpha)[0] = alpha = srcfmt->alpha; |
462 valpha = vec_splat(valpha, 0); | 477 valpha = vec_splat (valpha, 0); |
463 } else { | 478 } else { |
464 alpha = 0; | 479 alpha = 0; |
465 valpha = vec_splat_u8(0); | 480 valpha = vec_splat_u8 (0); |
466 } | 481 } |
467 | 482 |
468 vpermute = calc_swizzle32(NULL, dstfmt); | 483 vpermute = calc_swizzle32 (NULL, dstfmt); |
469 while (height--) { | 484 while (height--) { |
470 vector unsigned char valigner; | 485 vector unsigned char valigner; |
471 vector unsigned char voverflow; | 486 vector unsigned char voverflow; |
472 vector unsigned char vsrc; | 487 vector unsigned char vsrc; |
473 | 488 |
485 ASSEMBLE_RGBA(dst, 4, dstfmt, sR, sG, sB, alpha); \ | 500 ASSEMBLE_RGBA(dst, 4, dstfmt, sR, sG, sB, alpha); \ |
486 src += 2; \ | 501 src += 2; \ |
487 dst += 4; \ | 502 dst += 4; \ |
488 widthvar--; \ | 503 widthvar--; \ |
489 } | 504 } |
490 ONE_PIXEL_BLEND(((UNALIGNED_PTR(dst)) && (width)), width); | 505 ONE_PIXEL_BLEND (((UNALIGNED_PTR (dst)) && (width)), width); |
491 | 506 |
492 /* After all that work, here's the vector part! */ | 507 /* After all that work, here's the vector part! */ |
493 extrawidth = (width % 8); /* trailing unaligned stores */ | 508 extrawidth = (width % 8); /* trailing unaligned stores */ |
494 width -= extrawidth; | 509 width -= extrawidth; |
495 vsrc = vec_ld(0, src); | 510 vsrc = vec_ld (0, src); |
496 valigner = VEC_ALIGNER(src); | 511 valigner = VEC_ALIGNER (src); |
497 | 512 |
498 while (width) { | 513 while (width) { |
499 vector unsigned short vR, vG, vB; | 514 vector unsigned short vR, vG, vB; |
500 vector unsigned char vdst1, vdst2; | 515 vector unsigned char vdst1, vdst2; |
501 | 516 |
502 voverflow = vec_ld(15, src); | 517 voverflow = vec_ld (15, src); |
503 vsrc = vec_perm(vsrc, voverflow, valigner); | 518 vsrc = vec_perm (vsrc, voverflow, valigner); |
504 | 519 |
505 vR = vec_and(vec_sl((vector unsigned short)vsrc,v1), vf800); | 520 vR = vec_and (vec_sl ((vector unsigned short) vsrc, v1), vf800); |
506 vB = vec_sl((vector unsigned short)vsrc, v3); | 521 vB = vec_sl ((vector unsigned short) vsrc, v3); |
507 vG = vec_sl(vB, v3); | 522 vG = vec_sl (vB, v3); |
508 | 523 |
509 vdst1 = (vector unsigned char)vec_perm((vector unsigned char)vR, valpha, vredalpha1); | 524 vdst1 = |
510 vdst1 = vec_perm(vdst1, (vector unsigned char)vB, vblue1); | 525 (vector unsigned char) vec_perm ((vector unsigned char) vR, |
511 vdst1 = vec_perm(vdst1, (vector unsigned char)vG, vgreen1); | 526 valpha, vredalpha1); |
512 vdst1 = vec_perm(vdst1, valpha, vpermute); | 527 vdst1 = vec_perm (vdst1, (vector unsigned char) vB, vblue1); |
513 vec_st(vdst1, 0, dst); | 528 vdst1 = vec_perm (vdst1, (vector unsigned char) vG, vgreen1); |
514 | 529 vdst1 = vec_perm (vdst1, valpha, vpermute); |
515 vdst2 = (vector unsigned char)vec_perm((vector unsigned char)vR, valpha, vredalpha2); | 530 vec_st (vdst1, 0, dst); |
516 vdst2 = vec_perm(vdst2, (vector unsigned char)vB, vblue2); | 531 |
517 vdst2 = vec_perm(vdst2, (vector unsigned char)vG, vgreen2); | 532 vdst2 = |
518 vdst2 = vec_perm(vdst2, valpha, vpermute); | 533 (vector unsigned char) vec_perm ((vector unsigned char) vR, |
519 vec_st(vdst2, 16, dst); | 534 valpha, vredalpha2); |
520 | 535 vdst2 = vec_perm (vdst2, (vector unsigned char) vB, vblue2); |
536 vdst2 = vec_perm (vdst2, (vector unsigned char) vG, vgreen2); | |
537 vdst2 = vec_perm (vdst2, valpha, vpermute); | |
538 vec_st (vdst2, 16, dst); | |
539 | |
521 width -= 8; | 540 width -= 8; |
522 dst += 32; | 541 dst += 32; |
523 src += 16; | 542 src += 16; |
524 vsrc = voverflow; | 543 vsrc = voverflow; |
525 } | 544 } |
526 | 545 |
527 assert(width == 0); | 546 assert (width == 0); |
528 | 547 |
529 | 548 |
530 /* do scalar until we can align... */ | 549 /* do scalar until we can align... */ |
531 ONE_PIXEL_BLEND((extrawidth), extrawidth); | 550 ONE_PIXEL_BLEND ((extrawidth), extrawidth); |
532 #undef ONE_PIXEL_BLEND | 551 #undef ONE_PIXEL_BLEND |
533 | 552 |
534 src += srcskip; /* move to next row, accounting for pitch. */ | 553 src += srcskip; /* move to next row, accounting for pitch. */ |
535 dst += dstskip; | 554 dst += dstskip; |
536 } | 555 } |
537 | 556 |
538 } | 557 } |
539 | 558 |
540 static void BlitNtoNKey(SDL_BlitInfo *info); | 559 static void BlitNtoNKey (SDL_BlitInfo * info); |
541 static void BlitNtoNKeyCopyAlpha(SDL_BlitInfo *info); | 560 static void BlitNtoNKeyCopyAlpha (SDL_BlitInfo * info); |
542 static void Blit32to32KeyAltivec(SDL_BlitInfo *info) | 561 static void |
562 Blit32to32KeyAltivec (SDL_BlitInfo * info) | |
543 { | 563 { |
544 int height = info->d_height; | 564 int height = info->d_height; |
545 Uint32 *srcp = (Uint32 *) info->s_pixels; | 565 Uint32 *srcp = (Uint32 *) info->s_pixels; |
546 int srcskip = info->s_skip; | 566 int srcskip = info->s_skip; |
547 Uint32 *dstp = (Uint32 *) info->d_pixels; | 567 Uint32 *dstp = (Uint32 *) info->d_pixels; |
549 SDL_PixelFormat *srcfmt = info->src; | 569 SDL_PixelFormat *srcfmt = info->src; |
550 int srcbpp = srcfmt->BytesPerPixel; | 570 int srcbpp = srcfmt->BytesPerPixel; |
551 SDL_PixelFormat *dstfmt = info->dst; | 571 SDL_PixelFormat *dstfmt = info->dst; |
552 int dstbpp = dstfmt->BytesPerPixel; | 572 int dstbpp = dstfmt->BytesPerPixel; |
553 int copy_alpha = (srcfmt->Amask && dstfmt->Amask); | 573 int copy_alpha = (srcfmt->Amask && dstfmt->Amask); |
554 unsigned alpha = dstfmt->Amask ? srcfmt->alpha : 0; | 574 unsigned alpha = dstfmt->Amask ? srcfmt->alpha : 0; |
555 Uint32 rgbmask = srcfmt->Rmask | srcfmt->Gmask | srcfmt->Bmask; | 575 Uint32 rgbmask = srcfmt->Rmask | srcfmt->Gmask | srcfmt->Bmask; |
556 Uint32 ckey = info->src->colorkey; | 576 Uint32 ckey = info->src->colorkey; |
557 vector unsigned int valpha; | 577 vector unsigned int valpha; |
558 vector unsigned char vpermute; | 578 vector unsigned char vpermute; |
559 vector unsigned char vzero; | 579 vector unsigned char vzero; |
560 vector unsigned int vckey; | 580 vector unsigned int vckey; |
561 vector unsigned int vrgbmask; | 581 vector unsigned int vrgbmask; |
562 vpermute = calc_swizzle32(srcfmt, dstfmt); | 582 vpermute = calc_swizzle32 (srcfmt, dstfmt); |
563 if (info->d_width < 16) { | 583 if (info->d_width < 16) { |
564 if(copy_alpha) { | 584 if (copy_alpha) { |
565 BlitNtoNKeyCopyAlpha(info); | 585 BlitNtoNKeyCopyAlpha (info); |
566 } else { | 586 } else { |
567 BlitNtoNKey(info); | 587 BlitNtoNKey (info); |
568 } | 588 } |
569 return; | 589 return; |
570 } | 590 } |
571 vzero = vec_splat_u8(0); | 591 vzero = vec_splat_u8 (0); |
572 if (alpha) { | 592 if (alpha) { |
573 ((unsigned char *)&valpha)[0] = (unsigned char)alpha; | 593 ((unsigned char *) &valpha)[0] = (unsigned char) alpha; |
574 valpha = (vector unsigned int)vec_splat((vector unsigned char)valpha, 0); | 594 valpha = |
595 (vector unsigned int) vec_splat ((vector unsigned char) valpha, | |
596 0); | |
575 } else { | 597 } else { |
576 valpha = (vector unsigned int)vzero; | 598 valpha = (vector unsigned int) vzero; |
577 } | 599 } |
578 ckey &= rgbmask; | 600 ckey &= rgbmask; |
579 ((unsigned int *)(char*)&vckey)[0] = ckey; | 601 ((unsigned int *) (char *) &vckey)[0] = ckey; |
580 vckey = vec_splat(vckey, 0); | 602 vckey = vec_splat (vckey, 0); |
581 ((unsigned int *)(char*)&vrgbmask)[0] = rgbmask; | 603 ((unsigned int *) (char *) &vrgbmask)[0] = rgbmask; |
582 vrgbmask = vec_splat(vrgbmask, 0); | 604 vrgbmask = vec_splat (vrgbmask, 0); |
583 | 605 |
584 while (height--) { | 606 while (height--) { |
585 #define ONE_PIXEL_BLEND(condition, widthvar) \ | 607 #define ONE_PIXEL_BLEND(condition, widthvar) \ |
586 if (copy_alpha) { \ | 608 if (copy_alpha) { \ |
587 while (condition) { \ | 609 while (condition) { \ |
611 srcp = (Uint32 *) (((Uint8 *)srcp) + srcbpp); \ | 633 srcp = (Uint32 *) (((Uint8 *)srcp) + srcbpp); \ |
612 widthvar--; \ | 634 widthvar--; \ |
613 } \ | 635 } \ |
614 } | 636 } |
615 int width = info->d_width; | 637 int width = info->d_width; |
616 ONE_PIXEL_BLEND((UNALIGNED_PTR(dstp)) && (width), width); | 638 ONE_PIXEL_BLEND ((UNALIGNED_PTR (dstp)) && (width), width); |
617 assert(width > 0); | 639 assert (width > 0); |
618 if (width > 0) { | 640 if (width > 0) { |
619 int extrawidth = (width % 4); | 641 int extrawidth = (width % 4); |
620 vector unsigned char valigner = VEC_ALIGNER(srcp); | 642 vector unsigned char valigner = VEC_ALIGNER (srcp); |
621 vector unsigned int vs = vec_ld(0, srcp); | 643 vector unsigned int vs = vec_ld (0, srcp); |
622 width -= extrawidth; | 644 width -= extrawidth; |
623 assert(width >= 4); | 645 assert (width >= 4); |
624 while (width) { | 646 while (width) { |
625 vector unsigned char vsel; | 647 vector unsigned char vsel; |
626 vector unsigned int vd; | 648 vector unsigned int vd; |
627 vector unsigned int voverflow = vec_ld(15, srcp); | 649 vector unsigned int voverflow = vec_ld (15, srcp); |
628 /* load the source vec */ | 650 /* load the source vec */ |
629 vs = vec_perm(vs, voverflow, valigner); | 651 vs = vec_perm (vs, voverflow, valigner); |
630 /* vsel is set for items that match the key */ | 652 /* vsel is set for items that match the key */ |
631 vsel = (vector unsigned char)vec_and(vs, vrgbmask); | 653 vsel = (vector unsigned char) vec_and (vs, vrgbmask); |
632 vsel = (vector unsigned char)vec_cmpeq(vs, vckey); | 654 vsel = (vector unsigned char) vec_cmpeq (vs, vckey); |
633 /* permute the src vec to the dest format */ | 655 /* permute the src vec to the dest format */ |
634 vs = vec_perm(vs, valpha, vpermute); | 656 vs = vec_perm (vs, valpha, vpermute); |
635 /* load the destination vec */ | 657 /* load the destination vec */ |
636 vd = vec_ld(0, dstp); | 658 vd = vec_ld (0, dstp); |
637 /* select the source and dest into vs */ | 659 /* select the source and dest into vs */ |
638 vd = (vector unsigned int)vec_sel((vector unsigned char)vs, (vector unsigned char)vd, vsel); | 660 vd = (vector unsigned int) vec_sel ((vector unsigned char) vs, |
639 | 661 (vector unsigned char) vd, |
640 vec_st(vd, 0, dstp); | 662 vsel); |
663 | |
664 vec_st (vd, 0, dstp); | |
641 srcp += 4; | 665 srcp += 4; |
642 width -= 4; | 666 width -= 4; |
643 dstp += 4; | 667 dstp += 4; |
644 vs = voverflow; | 668 vs = voverflow; |
645 } | 669 } |
646 ONE_PIXEL_BLEND((extrawidth), extrawidth); | 670 ONE_PIXEL_BLEND ((extrawidth), extrawidth); |
647 #undef ONE_PIXEL_BLEND | 671 #undef ONE_PIXEL_BLEND |
648 srcp += srcskip >> 2; | 672 srcp += srcskip >> 2; |
649 dstp += dstskip >> 2; | 673 dstp += dstskip >> 2; |
650 } | 674 } |
651 } | 675 } |
652 } | 676 } |
653 | 677 |
654 /* Altivec code to swizzle one 32-bit surface to a different 32-bit format. */ | 678 /* Altivec code to swizzle one 32-bit surface to a different 32-bit format. */ |
655 /* Use this on a G5 */ | 679 /* Use this on a G5 */ |
656 static void ConvertAltivec32to32_noprefetch(SDL_BlitInfo *info) | 680 static void |
681 ConvertAltivec32to32_noprefetch (SDL_BlitInfo * info) | |
657 { | 682 { |
658 int height = info->d_height; | 683 int height = info->d_height; |
659 Uint32 *src = (Uint32 *) info->s_pixels; | 684 Uint32 *src = (Uint32 *) info->s_pixels; |
660 int srcskip = info->s_skip; | 685 int srcskip = info->s_skip; |
661 Uint32 *dst = (Uint32 *) info->d_pixels; | 686 Uint32 *dst = (Uint32 *) info->d_pixels; |
662 int dstskip = info->d_skip; | 687 int dstskip = info->d_skip; |
663 SDL_PixelFormat *srcfmt = info->src; | 688 SDL_PixelFormat *srcfmt = info->src; |
664 SDL_PixelFormat *dstfmt = info->dst; | 689 SDL_PixelFormat *dstfmt = info->dst; |
665 vector unsigned int vzero = vec_splat_u32(0); | 690 vector unsigned int vzero = vec_splat_u32 (0); |
666 vector unsigned char vpermute = calc_swizzle32(srcfmt, dstfmt); | 691 vector unsigned char vpermute = calc_swizzle32 (srcfmt, dstfmt); |
667 if (dstfmt->Amask && !srcfmt->Amask) { | 692 if (dstfmt->Amask && !srcfmt->Amask) { |
668 if (srcfmt->alpha) { | 693 if (srcfmt->alpha) { |
669 vector unsigned char valpha; | 694 vector unsigned char valpha; |
670 ((unsigned char *)&valpha)[0] = srcfmt->alpha; | 695 ((unsigned char *) &valpha)[0] = srcfmt->alpha; |
671 vzero = (vector unsigned int)vec_splat(valpha, 0); | 696 vzero = (vector unsigned int) vec_splat (valpha, 0); |
672 } | 697 } |
673 } | 698 } |
674 | 699 |
675 assert(srcfmt->BytesPerPixel == 4); | 700 assert (srcfmt->BytesPerPixel == 4); |
676 assert(dstfmt->BytesPerPixel == 4); | 701 assert (dstfmt->BytesPerPixel == 4); |
677 | 702 |
678 while (height--) { | 703 while (height--) { |
679 vector unsigned char valigner; | 704 vector unsigned char valigner; |
680 vector unsigned int vbits; | 705 vector unsigned int vbits; |
681 vector unsigned int voverflow; | 706 vector unsigned int voverflow; |
684 | 709 |
685 int width = info->d_width; | 710 int width = info->d_width; |
686 int extrawidth; | 711 int extrawidth; |
687 | 712 |
688 /* do scalar until we can align... */ | 713 /* do scalar until we can align... */ |
689 while ((UNALIGNED_PTR(dst)) && (width)) { | 714 while ((UNALIGNED_PTR (dst)) && (width)) { |
690 bits = *(src++); | 715 bits = *(src++); |
691 RGBA_FROM_8888(bits, srcfmt, r, g, b, a); | 716 RGBA_FROM_8888 (bits, srcfmt, r, g, b, a); |
692 *(dst++) = MAKE8888(dstfmt, r, g, b, a); | 717 *(dst++) = MAKE8888 (dstfmt, r, g, b, a); |
693 width--; | 718 width--; |
694 } | 719 } |
695 | 720 |
696 /* After all that work, here's the vector part! */ | 721 /* After all that work, here's the vector part! */ |
697 extrawidth = (width % 4); | 722 extrawidth = (width % 4); |
698 width -= extrawidth; | 723 width -= extrawidth; |
699 valigner = VEC_ALIGNER(src); | 724 valigner = VEC_ALIGNER (src); |
700 vbits = vec_ld(0, src); | 725 vbits = vec_ld (0, src); |
701 | 726 |
702 while (width) { | 727 while (width) { |
703 voverflow = vec_ld(15, src); | 728 voverflow = vec_ld (15, src); |
704 src += 4; | 729 src += 4; |
705 width -= 4; | 730 width -= 4; |
706 vbits = vec_perm(vbits, voverflow, valigner); /* src is ready. */ | 731 vbits = vec_perm (vbits, voverflow, valigner); /* src is ready. */ |
707 vbits = vec_perm(vbits, vzero, vpermute); /* swizzle it. */ | 732 vbits = vec_perm (vbits, vzero, vpermute); /* swizzle it. */ |
708 vec_st(vbits, 0, dst); /* store it back out. */ | 733 vec_st (vbits, 0, dst); /* store it back out. */ |
709 dst += 4; | 734 dst += 4; |
710 vbits = voverflow; | 735 vbits = voverflow; |
711 } | 736 } |
712 | 737 |
713 assert(width == 0); | 738 assert (width == 0); |
714 | 739 |
715 /* cover pixels at the end of the row that didn't fit in 16 bytes. */ | 740 /* cover pixels at the end of the row that didn't fit in 16 bytes. */ |
716 while (extrawidth) { | 741 while (extrawidth) { |
717 bits = *(src++); /* max 7 pixels, don't bother with prefetch. */ | 742 bits = *(src++); /* max 7 pixels, don't bother with prefetch. */ |
718 RGBA_FROM_8888(bits, srcfmt, r, g, b, a); | 743 RGBA_FROM_8888 (bits, srcfmt, r, g, b, a); |
719 *(dst++) = MAKE8888(dstfmt, r, g, b, a); | 744 *(dst++) = MAKE8888 (dstfmt, r, g, b, a); |
720 extrawidth--; | 745 extrawidth--; |
721 } | 746 } |
722 | 747 |
723 src += srcskip >> 2; /* move to next row, accounting for pitch. */ | 748 src += srcskip >> 2; /* move to next row, accounting for pitch. */ |
724 dst += dstskip >> 2; | 749 dst += dstskip >> 2; |
725 } | 750 } |
726 | 751 |
727 } | 752 } |
728 | 753 |
729 /* Altivec code to swizzle one 32-bit surface to a different 32-bit format. */ | 754 /* Altivec code to swizzle one 32-bit surface to a different 32-bit format. */ |
730 /* Use this on a G4 */ | 755 /* Use this on a G4 */ |
731 static void ConvertAltivec32to32_prefetch(SDL_BlitInfo *info) | 756 static void |
757 ConvertAltivec32to32_prefetch (SDL_BlitInfo * info) | |
732 { | 758 { |
733 const int scalar_dst_lead = sizeof (Uint32) * 4; | 759 const int scalar_dst_lead = sizeof (Uint32) * 4; |
734 const int vector_dst_lead = sizeof (Uint32) * 16; | 760 const int vector_dst_lead = sizeof (Uint32) * 16; |
735 | 761 |
736 int height = info->d_height; | 762 int height = info->d_height; |
738 int srcskip = info->s_skip; | 764 int srcskip = info->s_skip; |
739 Uint32 *dst = (Uint32 *) info->d_pixels; | 765 Uint32 *dst = (Uint32 *) info->d_pixels; |
740 int dstskip = info->d_skip; | 766 int dstskip = info->d_skip; |
741 SDL_PixelFormat *srcfmt = info->src; | 767 SDL_PixelFormat *srcfmt = info->src; |
742 SDL_PixelFormat *dstfmt = info->dst; | 768 SDL_PixelFormat *dstfmt = info->dst; |
743 vector unsigned int vzero = vec_splat_u32(0); | 769 vector unsigned int vzero = vec_splat_u32 (0); |
744 vector unsigned char vpermute = calc_swizzle32(srcfmt, dstfmt); | 770 vector unsigned char vpermute = calc_swizzle32 (srcfmt, dstfmt); |
745 if (dstfmt->Amask && !srcfmt->Amask) { | 771 if (dstfmt->Amask && !srcfmt->Amask) { |
746 if (srcfmt->alpha) { | 772 if (srcfmt->alpha) { |
747 vector unsigned char valpha; | 773 vector unsigned char valpha; |
748 ((unsigned char *)&valpha)[0] = srcfmt->alpha; | 774 ((unsigned char *) &valpha)[0] = srcfmt->alpha; |
749 vzero = (vector unsigned int)vec_splat(valpha, 0); | 775 vzero = (vector unsigned int) vec_splat (valpha, 0); |
750 } | 776 } |
751 } | 777 } |
752 | 778 |
753 assert(srcfmt->BytesPerPixel == 4); | 779 assert (srcfmt->BytesPerPixel == 4); |
754 assert(dstfmt->BytesPerPixel == 4); | 780 assert (dstfmt->BytesPerPixel == 4); |
755 | 781 |
756 while (height--) { | 782 while (height--) { |
757 vector unsigned char valigner; | 783 vector unsigned char valigner; |
758 vector unsigned int vbits; | 784 vector unsigned int vbits; |
759 vector unsigned int voverflow; | 785 vector unsigned int voverflow; |
762 | 788 |
763 int width = info->d_width; | 789 int width = info->d_width; |
764 int extrawidth; | 790 int extrawidth; |
765 | 791 |
766 /* do scalar until we can align... */ | 792 /* do scalar until we can align... */ |
767 while ((UNALIGNED_PTR(dst)) && (width)) { | 793 while ((UNALIGNED_PTR (dst)) && (width)) { |
768 vec_dstt(src+scalar_dst_lead, DST_CTRL(2,32,1024), DST_CHAN_SRC); | 794 vec_dstt (src + scalar_dst_lead, DST_CTRL (2, 32, 1024), |
769 vec_dstst(dst+scalar_dst_lead, DST_CTRL(2,32,1024), DST_CHAN_DEST); | 795 DST_CHAN_SRC); |
796 vec_dstst (dst + scalar_dst_lead, DST_CTRL (2, 32, 1024), | |
797 DST_CHAN_DEST); | |
770 bits = *(src++); | 798 bits = *(src++); |
771 RGBA_FROM_8888(bits, srcfmt, r, g, b, a); | 799 RGBA_FROM_8888 (bits, srcfmt, r, g, b, a); |
772 *(dst++) = MAKE8888(dstfmt, r, g, b, a); | 800 *(dst++) = MAKE8888 (dstfmt, r, g, b, a); |
773 width--; | 801 width--; |
774 } | 802 } |
775 | 803 |
776 /* After all that work, here's the vector part! */ | 804 /* After all that work, here's the vector part! */ |
777 extrawidth = (width % 4); | 805 extrawidth = (width % 4); |
778 width -= extrawidth; | 806 width -= extrawidth; |
779 valigner = VEC_ALIGNER(src); | 807 valigner = VEC_ALIGNER (src); |
780 vbits = vec_ld(0, src); | 808 vbits = vec_ld (0, src); |
781 | 809 |
782 while (width) { | 810 while (width) { |
783 vec_dstt(src+vector_dst_lead, DST_CTRL(2,32,1024), DST_CHAN_SRC); | 811 vec_dstt (src + vector_dst_lead, DST_CTRL (2, 32, 1024), |
784 vec_dstst(dst+vector_dst_lead, DST_CTRL(2,32,1024), DST_CHAN_DEST); | 812 DST_CHAN_SRC); |
785 voverflow = vec_ld(15, src); | 813 vec_dstst (dst + vector_dst_lead, DST_CTRL (2, 32, 1024), |
814 DST_CHAN_DEST); | |
815 voverflow = vec_ld (15, src); | |
786 src += 4; | 816 src += 4; |
787 width -= 4; | 817 width -= 4; |
788 vbits = vec_perm(vbits, voverflow, valigner); /* src is ready. */ | 818 vbits = vec_perm (vbits, voverflow, valigner); /* src is ready. */ |
789 vbits = vec_perm(vbits, vzero, vpermute); /* swizzle it. */ | 819 vbits = vec_perm (vbits, vzero, vpermute); /* swizzle it. */ |
790 vec_st(vbits, 0, dst); /* store it back out. */ | 820 vec_st (vbits, 0, dst); /* store it back out. */ |
791 dst += 4; | 821 dst += 4; |
792 vbits = voverflow; | 822 vbits = voverflow; |
793 } | 823 } |
794 | 824 |
795 assert(width == 0); | 825 assert (width == 0); |
796 | 826 |
797 /* cover pixels at the end of the row that didn't fit in 16 bytes. */ | 827 /* cover pixels at the end of the row that didn't fit in 16 bytes. */ |
798 while (extrawidth) { | 828 while (extrawidth) { |
799 bits = *(src++); /* max 7 pixels, don't bother with prefetch. */ | 829 bits = *(src++); /* max 7 pixels, don't bother with prefetch. */ |
800 RGBA_FROM_8888(bits, srcfmt, r, g, b, a); | 830 RGBA_FROM_8888 (bits, srcfmt, r, g, b, a); |
801 *(dst++) = MAKE8888(dstfmt, r, g, b, a); | 831 *(dst++) = MAKE8888 (dstfmt, r, g, b, a); |
802 extrawidth--; | 832 extrawidth--; |
803 } | 833 } |
804 | 834 |
805 src += srcskip >> 2; /* move to next row, accounting for pitch. */ | 835 src += srcskip >> 2; /* move to next row, accounting for pitch. */ |
806 dst += dstskip >> 2; | 836 dst += dstskip >> 2; |
807 } | 837 } |
808 | 838 |
809 vec_dss(DST_CHAN_SRC); | 839 vec_dss (DST_CHAN_SRC); |
810 vec_dss(DST_CHAN_DEST); | 840 vec_dss (DST_CHAN_DEST); |
811 } | 841 } |
812 | 842 |
813 static Uint32 GetBlitFeatures( void ) | 843 static Uint32 |
844 GetBlitFeatures (void) | |
814 { | 845 { |
815 static Uint32 features = 0xffffffff; | 846 static Uint32 features = 0xffffffff; |
816 if (features == 0xffffffff) { | 847 if (features == 0xffffffff) { |
817 /* Provide an override for testing .. */ | 848 /* Provide an override for testing .. */ |
818 char *override = SDL_getenv("SDL_ALTIVEC_BLIT_FEATURES"); | 849 char *override = SDL_getenv ("SDL_ALTIVEC_BLIT_FEATURES"); |
819 if (override) { | 850 if (override) { |
820 features = 0; | 851 features = 0; |
821 SDL_sscanf(override, "%u", &features); | 852 SDL_sscanf (override, "%u", &features); |
822 } else { | 853 } else { |
823 features = ( 0 | 854 features = (0 |
824 /* Feature 1 is has-MMX */ | 855 /* Feature 1 is has-MMX */ |
825 | ((SDL_HasMMX()) ? 1 : 0) | 856 | ((SDL_HasMMX ())? 1 : 0) |
826 /* Feature 2 is has-AltiVec */ | 857 /* Feature 2 is has-AltiVec */ |
827 | ((SDL_HasAltiVec()) ? 2 : 0) | 858 | ((SDL_HasAltiVec ())? 2 : 0) |
828 /* Feature 4 is dont-use-prefetch */ | 859 /* Feature 4 is dont-use-prefetch */ |
829 /* !!!! FIXME: Check for G5 or later, not the cache size! Always prefetch on a G4. */ | 860 /* !!!! FIXME: Check for G5 or later, not the cache size! Always prefetch on a G4. */ |
830 | ((GetL3CacheSize() == 0) ? 4 : 0) | 861 | ((GetL3CacheSize () == 0) ? 4 : 0) |
831 ); | 862 ); |
832 } | 863 } |
833 } | 864 } |
834 return features; | 865 return features; |
835 } | 866 } |
867 | |
836 #if __MWERKS__ | 868 #if __MWERKS__ |
837 #pragma altivec_model off | 869 #pragma altivec_model off |
838 #endif | 870 #endif |
839 #else | 871 #else |
840 /* Feature 1 is has-MMX */ | 872 /* Feature 1 is has-MMX */ |
867 #define RGB888_RGB332(dst, src) { \ | 899 #define RGB888_RGB332(dst, src) { \ |
868 dst = (Uint8)((((src)&0x00E00000)>>16)| \ | 900 dst = (Uint8)((((src)&0x00E00000)>>16)| \ |
869 (((src)&0x0000E000)>>11)| \ | 901 (((src)&0x0000E000)>>11)| \ |
870 (((src)&0x000000C0)>>6)); \ | 902 (((src)&0x000000C0)>>6)); \ |
871 } | 903 } |
872 static void Blit_RGB888_index8(SDL_BlitInfo *info) | 904 static void |
905 Blit_RGB888_index8 (SDL_BlitInfo * info) | |
873 { | 906 { |
874 #ifndef USE_DUFFS_LOOP | 907 #ifndef USE_DUFFS_LOOP |
875 int c; | 908 int c; |
876 #endif | 909 #endif |
877 int width, height; | 910 int width, height; |
878 Uint32 *src; | 911 Uint32 *src; |
879 const Uint8 *map; | 912 const Uint8 *map; |
880 Uint8 *dst; | 913 Uint8 *dst; |
881 int srcskip, dstskip; | 914 int srcskip, dstskip; |
882 | 915 |
883 /* Set up some basic variables */ | 916 /* Set up some basic variables */ |
884 width = info->d_width; | 917 width = info->d_width; |
885 height = info->d_height; | 918 height = info->d_height; |
886 src = (Uint32 *)info->s_pixels; | 919 src = (Uint32 *) info->s_pixels; |
887 srcskip = info->s_skip/4; | 920 srcskip = info->s_skip / 4; |
888 dst = info->d_pixels; | 921 dst = info->d_pixels; |
889 dstskip = info->d_skip; | 922 dstskip = info->d_skip; |
890 map = info->table; | 923 map = info->table; |
891 | 924 |
892 if ( map == NULL ) { | 925 if (map == NULL) { |
893 while ( height-- ) { | 926 while (height--) { |
894 #ifdef USE_DUFFS_LOOP | 927 #ifdef USE_DUFFS_LOOP |
928 /* *INDENT-OFF* */ | |
895 DUFFS_LOOP( | 929 DUFFS_LOOP( |
896 RGB888_RGB332(*dst++, *src); | 930 RGB888_RGB332(*dst++, *src); |
897 , width); | 931 , width); |
932 /* *INDENT-ON* */ | |
898 #else | 933 #else |
899 for ( c=width/4; c; --c ) { | 934 for (c = width / 4; c; --c) { |
900 /* Pack RGB into 8bit pixel */ | 935 /* Pack RGB into 8bit pixel */ |
901 ++src; | 936 ++src; |
902 RGB888_RGB332(*dst++, *src); | 937 RGB888_RGB332 (*dst++, *src); |
903 ++src; | 938 ++src; |
904 RGB888_RGB332(*dst++, *src); | 939 RGB888_RGB332 (*dst++, *src); |
905 ++src; | 940 ++src; |
906 RGB888_RGB332(*dst++, *src); | 941 RGB888_RGB332 (*dst++, *src); |
907 ++src; | 942 ++src; |
908 } | 943 } |
909 switch ( width & 3 ) { | 944 switch (width & 3) { |
910 case 3: | 945 case 3: |
911 RGB888_RGB332(*dst++, *src); | 946 RGB888_RGB332 (*dst++, *src); |
912 ++src; | 947 ++src; |
913 case 2: | 948 case 2: |
914 RGB888_RGB332(*dst++, *src); | 949 RGB888_RGB332 (*dst++, *src); |
915 ++src; | 950 ++src; |
916 case 1: | 951 case 1: |
917 RGB888_RGB332(*dst++, *src); | 952 RGB888_RGB332 (*dst++, *src); |
918 ++src; | 953 ++src; |
919 } | 954 } |
920 #endif /* USE_DUFFS_LOOP */ | 955 #endif /* USE_DUFFS_LOOP */ |
921 src += srcskip; | 956 src += srcskip; |
922 dst += dstskip; | 957 dst += dstskip; |
923 } | 958 } |
924 } else { | 959 } else { |
925 int Pixel; | 960 int Pixel; |
926 | 961 |
927 while ( height-- ) { | 962 while (height--) { |
928 #ifdef USE_DUFFS_LOOP | 963 #ifdef USE_DUFFS_LOOP |
964 /* *INDENT-OFF* */ | |
929 DUFFS_LOOP( | 965 DUFFS_LOOP( |
930 RGB888_RGB332(Pixel, *src); | 966 RGB888_RGB332(Pixel, *src); |
931 *dst++ = map[Pixel]; | 967 *dst++ = map[Pixel]; |
932 ++src; | 968 ++src; |
933 , width); | 969 , width); |
970 /* *INDENT-ON* */ | |
934 #else | 971 #else |
935 for ( c=width/4; c; --c ) { | 972 for (c = width / 4; c; --c) { |
936 /* Pack RGB into 8bit pixel */ | 973 /* Pack RGB into 8bit pixel */ |
937 RGB888_RGB332(Pixel, *src); | 974 RGB888_RGB332 (Pixel, *src); |
938 *dst++ = map[Pixel]; | 975 *dst++ = map[Pixel]; |
939 ++src; | 976 ++src; |
940 RGB888_RGB332(Pixel, *src); | 977 RGB888_RGB332 (Pixel, *src); |
941 *dst++ = map[Pixel]; | 978 *dst++ = map[Pixel]; |
942 ++src; | 979 ++src; |
943 RGB888_RGB332(Pixel, *src); | 980 RGB888_RGB332 (Pixel, *src); |
944 *dst++ = map[Pixel]; | 981 *dst++ = map[Pixel]; |
945 ++src; | 982 ++src; |
946 RGB888_RGB332(Pixel, *src); | 983 RGB888_RGB332 (Pixel, *src); |
947 *dst++ = map[Pixel]; | 984 *dst++ = map[Pixel]; |
948 ++src; | 985 ++src; |
949 } | 986 } |
950 switch ( width & 3 ) { | 987 switch (width & 3) { |
951 case 3: | 988 case 3: |
952 RGB888_RGB332(Pixel, *src); | 989 RGB888_RGB332 (Pixel, *src); |
953 *dst++ = map[Pixel]; | 990 *dst++ = map[Pixel]; |
954 ++src; | 991 ++src; |
955 case 2: | 992 case 2: |
956 RGB888_RGB332(Pixel, *src); | 993 RGB888_RGB332 (Pixel, *src); |
957 *dst++ = map[Pixel]; | 994 *dst++ = map[Pixel]; |
958 ++src; | 995 ++src; |
959 case 1: | 996 case 1: |
960 RGB888_RGB332(Pixel, *src); | 997 RGB888_RGB332 (Pixel, *src); |
961 *dst++ = map[Pixel]; | 998 *dst++ = map[Pixel]; |
962 ++src; | 999 ++src; |
963 } | 1000 } |
964 #endif /* USE_DUFFS_LOOP */ | 1001 #endif /* USE_DUFFS_LOOP */ |
965 src += srcskip; | 1002 src += srcskip; |
966 dst += dstskip; | 1003 dst += dstskip; |
967 } | 1004 } |
968 } | 1005 } |
969 } | 1006 } |
1007 | |
970 /* Special optimized blit for RGB 8-8-8 --> RGB 5-5-5 */ | 1008 /* Special optimized blit for RGB 8-8-8 --> RGB 5-5-5 */ |
971 #define RGB888_RGB555(dst, src) { \ | 1009 #define RGB888_RGB555(dst, src) { \ |
972 *(Uint16 *)(dst) = (Uint16)((((*src)&0x00F80000)>>9)| \ | 1010 *(Uint16 *)(dst) = (Uint16)((((*src)&0x00F80000)>>9)| \ |
973 (((*src)&0x0000F800)>>6)| \ | 1011 (((*src)&0x0000F800)>>6)| \ |
974 (((*src)&0x000000F8)>>3)); \ | 1012 (((*src)&0x000000F8)>>3)); \ |
979 (((src[HI])&0x000000F8)>>3))<<16)| \ | 1017 (((src[HI])&0x000000F8)>>3))<<16)| \ |
980 (((src[LO])&0x00F80000)>>9)| \ | 1018 (((src[LO])&0x00F80000)>>9)| \ |
981 (((src[LO])&0x0000F800)>>6)| \ | 1019 (((src[LO])&0x0000F800)>>6)| \ |
982 (((src[LO])&0x000000F8)>>3); \ | 1020 (((src[LO])&0x000000F8)>>3); \ |
983 } | 1021 } |
984 static void Blit_RGB888_RGB555(SDL_BlitInfo *info) | 1022 static void |
1023 Blit_RGB888_RGB555 (SDL_BlitInfo * info) | |
985 { | 1024 { |
986 #ifndef USE_DUFFS_LOOP | 1025 #ifndef USE_DUFFS_LOOP |
987 int c; | 1026 int c; |
988 #endif | 1027 #endif |
989 int width, height; | 1028 int width, height; |
990 Uint32 *src; | 1029 Uint32 *src; |
991 Uint16 *dst; | 1030 Uint16 *dst; |
992 int srcskip, dstskip; | 1031 int srcskip, dstskip; |
993 | 1032 |
994 /* Set up some basic variables */ | 1033 /* Set up some basic variables */ |
995 width = info->d_width; | 1034 width = info->d_width; |
996 height = info->d_height; | 1035 height = info->d_height; |
997 src = (Uint32 *)info->s_pixels; | 1036 src = (Uint32 *) info->s_pixels; |
998 srcskip = info->s_skip/4; | 1037 srcskip = info->s_skip / 4; |
999 dst = (Uint16 *)info->d_pixels; | 1038 dst = (Uint16 *) info->d_pixels; |
1000 dstskip = info->d_skip/2; | 1039 dstskip = info->d_skip / 2; |
1001 | 1040 |
1002 #ifdef USE_DUFFS_LOOP | 1041 #ifdef USE_DUFFS_LOOP |
1003 while ( height-- ) { | 1042 while (height--) { |
1043 /* *INDENT-OFF* */ | |
1004 DUFFS_LOOP( | 1044 DUFFS_LOOP( |
1005 RGB888_RGB555(dst, src); | 1045 RGB888_RGB555(dst, src); |
1006 ++src; | 1046 ++src; |
1007 ++dst; | 1047 ++dst; |
1008 , width); | 1048 , width); |
1009 src += srcskip; | 1049 /* *INDENT-ON* */ |
1010 dst += dstskip; | 1050 src += srcskip; |
1011 } | 1051 dst += dstskip; |
1052 } | |
1012 #else | 1053 #else |
1013 /* Memory align at 4-byte boundary, if necessary */ | 1054 /* Memory align at 4-byte boundary, if necessary */ |
1014 if ( (long)dst & 0x03 ) { | 1055 if ((long) dst & 0x03) { |
1015 /* Don't do anything if width is 0 */ | 1056 /* Don't do anything if width is 0 */ |
1016 if ( width == 0 ) { | 1057 if (width == 0) { |
1017 return; | 1058 return; |
1018 } | 1059 } |
1019 --width; | 1060 --width; |
1020 | 1061 |
1021 while ( height-- ) { | 1062 while (height--) { |
1022 /* Perform copy alignment */ | 1063 /* Perform copy alignment */ |
1023 RGB888_RGB555(dst, src); | 1064 RGB888_RGB555 (dst, src); |
1024 ++src; | 1065 ++src; |
1025 ++dst; | 1066 ++dst; |
1026 | 1067 |
1027 /* Copy in 4 pixel chunks */ | 1068 /* Copy in 4 pixel chunks */ |
1028 for ( c=width/4; c; --c ) { | 1069 for (c = width / 4; c; --c) { |
1029 RGB888_RGB555_TWO(dst, src); | 1070 RGB888_RGB555_TWO (dst, src); |
1030 src += 2; | 1071 src += 2; |
1031 dst += 2; | 1072 dst += 2; |
1032 RGB888_RGB555_TWO(dst, src); | 1073 RGB888_RGB555_TWO (dst, src); |
1033 src += 2; | 1074 src += 2; |
1034 dst += 2; | 1075 dst += 2; |
1035 } | 1076 } |
1036 /* Get any leftovers */ | 1077 /* Get any leftovers */ |
1037 switch (width & 3) { | 1078 switch (width & 3) { |
1038 case 3: | 1079 case 3: |
1039 RGB888_RGB555(dst, src); | 1080 RGB888_RGB555 (dst, src); |
1040 ++src; | 1081 ++src; |
1041 ++dst; | 1082 ++dst; |
1042 case 2: | 1083 case 2: |
1043 RGB888_RGB555_TWO(dst, src); | 1084 RGB888_RGB555_TWO (dst, src); |
1044 src += 2; | 1085 src += 2; |
1045 dst += 2; | 1086 dst += 2; |
1046 break; | 1087 break; |
1047 case 1: | 1088 case 1: |
1048 RGB888_RGB555(dst, src); | 1089 RGB888_RGB555 (dst, src); |
1049 ++src; | 1090 ++src; |
1050 ++dst; | 1091 ++dst; |
1051 break; | 1092 break; |
1052 } | 1093 } |
1053 src += srcskip; | 1094 src += srcskip; |
1054 dst += dstskip; | 1095 dst += dstskip; |
1055 } | 1096 } |
1056 } else { | 1097 } else { |
1057 while ( height-- ) { | 1098 while (height--) { |
1058 /* Copy in 4 pixel chunks */ | 1099 /* Copy in 4 pixel chunks */ |
1059 for ( c=width/4; c; --c ) { | 1100 for (c = width / 4; c; --c) { |
1060 RGB888_RGB555_TWO(dst, src); | 1101 RGB888_RGB555_TWO (dst, src); |
1061 src += 2; | 1102 src += 2; |
1062 dst += 2; | 1103 dst += 2; |
1063 RGB888_RGB555_TWO(dst, src); | 1104 RGB888_RGB555_TWO (dst, src); |
1064 src += 2; | 1105 src += 2; |
1065 dst += 2; | 1106 dst += 2; |
1066 } | 1107 } |
1067 /* Get any leftovers */ | 1108 /* Get any leftovers */ |
1068 switch (width & 3) { | 1109 switch (width & 3) { |
1069 case 3: | 1110 case 3: |
1070 RGB888_RGB555(dst, src); | 1111 RGB888_RGB555 (dst, src); |
1071 ++src; | 1112 ++src; |
1072 ++dst; | 1113 ++dst; |
1073 case 2: | 1114 case 2: |
1074 RGB888_RGB555_TWO(dst, src); | 1115 RGB888_RGB555_TWO (dst, src); |
1075 src += 2; | 1116 src += 2; |
1076 dst += 2; | 1117 dst += 2; |
1077 break; | 1118 break; |
1078 case 1: | 1119 case 1: |
1079 RGB888_RGB555(dst, src); | 1120 RGB888_RGB555 (dst, src); |
1080 ++src; | 1121 ++src; |
1081 ++dst; | 1122 ++dst; |
1082 break; | 1123 break; |
1083 } | 1124 } |
1084 src += srcskip; | 1125 src += srcskip; |
1085 dst += dstskip; | 1126 dst += dstskip; |
1086 } | 1127 } |
1087 } | 1128 } |
1088 #endif /* USE_DUFFS_LOOP */ | 1129 #endif /* USE_DUFFS_LOOP */ |
1089 } | 1130 } |
1131 | |
1090 /* Special optimized blit for RGB 8-8-8 --> RGB 5-6-5 */ | 1132 /* Special optimized blit for RGB 8-8-8 --> RGB 5-6-5 */ |
1091 #define RGB888_RGB565(dst, src) { \ | 1133 #define RGB888_RGB565(dst, src) { \ |
1092 *(Uint16 *)(dst) = (Uint16)((((*src)&0x00F80000)>>8)| \ | 1134 *(Uint16 *)(dst) = (Uint16)((((*src)&0x00F80000)>>8)| \ |
1093 (((*src)&0x0000FC00)>>5)| \ | 1135 (((*src)&0x0000FC00)>>5)| \ |
1094 (((*src)&0x000000F8)>>3)); \ | 1136 (((*src)&0x000000F8)>>3)); \ |
1099 (((src[HI])&0x000000F8)>>3))<<16)| \ | 1141 (((src[HI])&0x000000F8)>>3))<<16)| \ |
1100 (((src[LO])&0x00F80000)>>8)| \ | 1142 (((src[LO])&0x00F80000)>>8)| \ |
1101 (((src[LO])&0x0000FC00)>>5)| \ | 1143 (((src[LO])&0x0000FC00)>>5)| \ |
1102 (((src[LO])&0x000000F8)>>3); \ | 1144 (((src[LO])&0x000000F8)>>3); \ |
1103 } | 1145 } |
1104 static void Blit_RGB888_RGB565(SDL_BlitInfo *info) | 1146 static void |
1147 Blit_RGB888_RGB565 (SDL_BlitInfo * info) | |
1105 { | 1148 { |
1106 #ifndef USE_DUFFS_LOOP | 1149 #ifndef USE_DUFFS_LOOP |
1107 int c; | 1150 int c; |
1108 #endif | 1151 #endif |
1109 int width, height; | 1152 int width, height; |
1110 Uint32 *src; | 1153 Uint32 *src; |
1111 Uint16 *dst; | 1154 Uint16 *dst; |
1112 int srcskip, dstskip; | 1155 int srcskip, dstskip; |
1113 | 1156 |
1114 /* Set up some basic variables */ | 1157 /* Set up some basic variables */ |
1115 width = info->d_width; | 1158 width = info->d_width; |
1116 height = info->d_height; | 1159 height = info->d_height; |
1117 src = (Uint32 *)info->s_pixels; | 1160 src = (Uint32 *) info->s_pixels; |
1118 srcskip = info->s_skip/4; | 1161 srcskip = info->s_skip / 4; |
1119 dst = (Uint16 *)info->d_pixels; | 1162 dst = (Uint16 *) info->d_pixels; |
1120 dstskip = info->d_skip/2; | 1163 dstskip = info->d_skip / 2; |
1121 | 1164 |
1122 #ifdef USE_DUFFS_LOOP | 1165 #ifdef USE_DUFFS_LOOP |
1123 while ( height-- ) { | 1166 while (height--) { |
1167 /* *INDENT-OFF* */ | |
1124 DUFFS_LOOP( | 1168 DUFFS_LOOP( |
1125 RGB888_RGB565(dst, src); | 1169 RGB888_RGB565(dst, src); |
1126 ++src; | 1170 ++src; |
1127 ++dst; | 1171 ++dst; |
1128 , width); | 1172 , width); |
1129 src += srcskip; | 1173 /* *INDENT-ON* */ |
1130 dst += dstskip; | 1174 src += srcskip; |
1131 } | 1175 dst += dstskip; |
1176 } | |
1132 #else | 1177 #else |
1133 /* Memory align at 4-byte boundary, if necessary */ | 1178 /* Memory align at 4-byte boundary, if necessary */ |
1134 if ( (long)dst & 0x03 ) { | 1179 if ((long) dst & 0x03) { |
1135 /* Don't do anything if width is 0 */ | 1180 /* Don't do anything if width is 0 */ |
1136 if ( width == 0 ) { | 1181 if (width == 0) { |
1137 return; | 1182 return; |
1138 } | 1183 } |
1139 --width; | 1184 --width; |
1140 | 1185 |
1141 while ( height-- ) { | 1186 while (height--) { |
1142 /* Perform copy alignment */ | 1187 /* Perform copy alignment */ |
1143 RGB888_RGB565(dst, src); | 1188 RGB888_RGB565 (dst, src); |
1144 ++src; | 1189 ++src; |
1145 ++dst; | 1190 ++dst; |
1146 | 1191 |
1147 /* Copy in 4 pixel chunks */ | 1192 /* Copy in 4 pixel chunks */ |
1148 for ( c=width/4; c; --c ) { | 1193 for (c = width / 4; c; --c) { |
1149 RGB888_RGB565_TWO(dst, src); | 1194 RGB888_RGB565_TWO (dst, src); |
1150 src += 2; | 1195 src += 2; |
1151 dst += 2; | 1196 dst += 2; |
1152 RGB888_RGB565_TWO(dst, src); | 1197 RGB888_RGB565_TWO (dst, src); |
1153 src += 2; | 1198 src += 2; |
1154 dst += 2; | 1199 dst += 2; |
1155 } | 1200 } |
1156 /* Get any leftovers */ | 1201 /* Get any leftovers */ |
1157 switch (width & 3) { | 1202 switch (width & 3) { |
1158 case 3: | 1203 case 3: |
1159 RGB888_RGB565(dst, src); | 1204 RGB888_RGB565 (dst, src); |
1160 ++src; | 1205 ++src; |
1161 ++dst; | 1206 ++dst; |
1162 case 2: | 1207 case 2: |
1163 RGB888_RGB565_TWO(dst, src); | 1208 RGB888_RGB565_TWO (dst, src); |
1164 src += 2; | 1209 src += 2; |
1165 dst += 2; | 1210 dst += 2; |
1166 break; | 1211 break; |
1167 case 1: | 1212 case 1: |
1168 RGB888_RGB565(dst, src); | 1213 RGB888_RGB565 (dst, src); |
1169 ++src; | 1214 ++src; |
1170 ++dst; | 1215 ++dst; |
1171 break; | 1216 break; |
1172 } | 1217 } |
1173 src += srcskip; | 1218 src += srcskip; |
1174 dst += dstskip; | 1219 dst += dstskip; |
1175 } | 1220 } |
1176 } else { | 1221 } else { |
1177 while ( height-- ) { | 1222 while (height--) { |
1178 /* Copy in 4 pixel chunks */ | 1223 /* Copy in 4 pixel chunks */ |
1179 for ( c=width/4; c; --c ) { | 1224 for (c = width / 4; c; --c) { |
1180 RGB888_RGB565_TWO(dst, src); | 1225 RGB888_RGB565_TWO (dst, src); |
1181 src += 2; | 1226 src += 2; |
1182 dst += 2; | 1227 dst += 2; |
1183 RGB888_RGB565_TWO(dst, src); | 1228 RGB888_RGB565_TWO (dst, src); |
1184 src += 2; | 1229 src += 2; |
1185 dst += 2; | 1230 dst += 2; |
1186 } | 1231 } |
1187 /* Get any leftovers */ | 1232 /* Get any leftovers */ |
1188 switch (width & 3) { | 1233 switch (width & 3) { |
1189 case 3: | 1234 case 3: |
1190 RGB888_RGB565(dst, src); | 1235 RGB888_RGB565 (dst, src); |
1191 ++src; | 1236 ++src; |
1192 ++dst; | 1237 ++dst; |
1193 case 2: | 1238 case 2: |
1194 RGB888_RGB565_TWO(dst, src); | 1239 RGB888_RGB565_TWO (dst, src); |
1195 src += 2; | 1240 src += 2; |
1196 dst += 2; | 1241 dst += 2; |
1197 break; | 1242 break; |
1198 case 1: | 1243 case 1: |
1199 RGB888_RGB565(dst, src); | 1244 RGB888_RGB565 (dst, src); |
1200 ++src; | 1245 ++src; |
1201 ++dst; | 1246 ++dst; |
1202 break; | 1247 break; |
1203 } | 1248 } |
1204 src += srcskip; | 1249 src += srcskip; |
1205 dst += dstskip; | 1250 dst += dstskip; |
1206 } | 1251 } |
1207 } | 1252 } |
1208 #endif /* USE_DUFFS_LOOP */ | 1253 #endif /* USE_DUFFS_LOOP */ |
1209 } | 1254 } |
1210 | 1255 |
1211 #endif /* SDL_HERMES_BLITTERS */ | 1256 #endif /* SDL_HERMES_BLITTERS */ |
1212 | 1257 |
1213 | 1258 |
1214 /* Special optimized blit for RGB 5-6-5 --> 32-bit RGB surfaces */ | 1259 /* Special optimized blit for RGB 5-6-5 --> 32-bit RGB surfaces */ |
1215 #define RGB565_32(dst, src, map) (map[src[LO]*2] + map[src[HI]*2+1]) | 1260 #define RGB565_32(dst, src, map) (map[src[LO]*2] + map[src[HI]*2+1]) |
1216 static void Blit_RGB565_32(SDL_BlitInfo *info, const Uint32 *map) | 1261 static void |
1262 Blit_RGB565_32 (SDL_BlitInfo * info, const Uint32 * map) | |
1217 { | 1263 { |
1218 #ifndef USE_DUFFS_LOOP | 1264 #ifndef USE_DUFFS_LOOP |
1219 int c; | 1265 int c; |
1220 #endif | 1266 #endif |
1221 int width, height; | 1267 int width, height; |
1222 Uint8 *src; | 1268 Uint8 *src; |
1223 Uint32 *dst; | 1269 Uint32 *dst; |
1224 int srcskip, dstskip; | 1270 int srcskip, dstskip; |
1225 | 1271 |
1226 /* Set up some basic variables */ | 1272 /* Set up some basic variables */ |
1227 width = info->d_width; | 1273 width = info->d_width; |
1228 height = info->d_height; | 1274 height = info->d_height; |
1229 src = (Uint8 *)info->s_pixels; | 1275 src = (Uint8 *) info->s_pixels; |
1230 srcskip = info->s_skip; | 1276 srcskip = info->s_skip; |
1231 dst = (Uint32 *)info->d_pixels; | 1277 dst = (Uint32 *) info->d_pixels; |
1232 dstskip = info->d_skip/4; | 1278 dstskip = info->d_skip / 4; |
1233 | 1279 |
1234 #ifdef USE_DUFFS_LOOP | 1280 #ifdef USE_DUFFS_LOOP |
1235 while ( height-- ) { | 1281 while (height--) { |
1282 /* *INDENT-OFF* */ | |
1236 DUFFS_LOOP( | 1283 DUFFS_LOOP( |
1237 { | 1284 { |
1238 *dst++ = RGB565_32(dst, src, map); | 1285 *dst++ = RGB565_32(dst, src, map); |
1239 src += 2; | 1286 src += 2; |
1240 }, | 1287 }, |
1241 width); | 1288 width); |
1242 src += srcskip; | 1289 /* *INDENT-ON* */ |
1243 dst += dstskip; | 1290 src += srcskip; |
1244 } | 1291 dst += dstskip; |
1292 } | |
1245 #else | 1293 #else |
1246 while ( height-- ) { | 1294 while (height--) { |
1247 /* Copy in 4 pixel chunks */ | 1295 /* Copy in 4 pixel chunks */ |
1248 for ( c=width/4; c; --c ) { | 1296 for (c = width / 4; c; --c) { |
1249 *dst++ = RGB565_32(dst, src, map); | 1297 *dst++ = RGB565_32 (dst, src, map); |
1250 src += 2; | 1298 src += 2; |
1251 *dst++ = RGB565_32(dst, src, map); | 1299 *dst++ = RGB565_32 (dst, src, map); |
1252 src += 2; | 1300 src += 2; |
1253 *dst++ = RGB565_32(dst, src, map); | 1301 *dst++ = RGB565_32 (dst, src, map); |
1254 src += 2; | 1302 src += 2; |
1255 *dst++ = RGB565_32(dst, src, map); | 1303 *dst++ = RGB565_32 (dst, src, map); |
1256 src += 2; | 1304 src += 2; |
1257 } | 1305 } |
1258 /* Get any leftovers */ | 1306 /* Get any leftovers */ |
1259 switch (width & 3) { | 1307 switch (width & 3) { |
1260 case 3: | 1308 case 3: |
1261 *dst++ = RGB565_32(dst, src, map); | 1309 *dst++ = RGB565_32 (dst, src, map); |
1262 src += 2; | 1310 src += 2; |
1263 case 2: | 1311 case 2: |
1264 *dst++ = RGB565_32(dst, src, map); | 1312 *dst++ = RGB565_32 (dst, src, map); |
1265 src += 2; | 1313 src += 2; |
1266 case 1: | 1314 case 1: |
1267 *dst++ = RGB565_32(dst, src, map); | 1315 *dst++ = RGB565_32 (dst, src, map); |
1268 src += 2; | 1316 src += 2; |
1269 break; | 1317 break; |
1270 } | 1318 } |
1271 src += srcskip; | 1319 src += srcskip; |
1272 dst += dstskip; | 1320 dst += dstskip; |
1273 } | 1321 } |
1274 #endif /* USE_DUFFS_LOOP */ | 1322 #endif /* USE_DUFFS_LOOP */ |
1275 } | 1323 } |
1276 | 1324 |
1277 /* Special optimized blit for RGB 5-6-5 --> ARGB 8-8-8-8 */ | 1325 /* Special optimized blit for RGB 5-6-5 --> ARGB 8-8-8-8 */ |
1278 static const Uint32 RGB565_ARGB8888_LUT[512] = { | 1326 static const Uint32 RGB565_ARGB8888_LUT[512] = { |
1279 0x00000000, 0xff000000, 0x00000008, 0xff002000, | 1327 0x00000000, 0xff000000, 0x00000008, 0xff002000, |
1280 0x00000010, 0xff004000, 0x00000018, 0xff006100, | 1328 0x00000010, 0xff004000, 0x00000018, 0xff006100, |
1281 0x00000020, 0xff008100, 0x00000029, 0xff00a100, | 1329 0x00000020, 0xff008100, 0x00000029, 0xff00a100, |
1282 0x00000031, 0xff00c200, 0x00000039, 0xff00e200, | 1330 0x00000031, 0xff00c200, 0x00000039, 0xff00e200, |
1283 0x00000041, 0xff080000, 0x0000004a, 0xff082000, | 1331 0x00000041, 0xff080000, 0x0000004a, 0xff082000, |
1284 0x00000052, 0xff084000, 0x0000005a, 0xff086100, | 1332 0x00000052, 0xff084000, 0x0000005a, 0xff086100, |
1285 0x00000062, 0xff088100, 0x0000006a, 0xff08a100, | 1333 0x00000062, 0xff088100, 0x0000006a, 0xff08a100, |
1286 0x00000073, 0xff08c200, 0x0000007b, 0xff08e200, | 1334 0x00000073, 0xff08c200, 0x0000007b, 0xff08e200, |
1287 0x00000083, 0xff100000, 0x0000008b, 0xff102000, | 1335 0x00000083, 0xff100000, 0x0000008b, 0xff102000, |
1288 0x00000094, 0xff104000, 0x0000009c, 0xff106100, | 1336 0x00000094, 0xff104000, 0x0000009c, 0xff106100, |
1289 0x000000a4, 0xff108100, 0x000000ac, 0xff10a100, | 1337 0x000000a4, 0xff108100, 0x000000ac, 0xff10a100, |
1290 0x000000b4, 0xff10c200, 0x000000bd, 0xff10e200, | 1338 0x000000b4, 0xff10c200, 0x000000bd, 0xff10e200, |
1291 0x000000c5, 0xff180000, 0x000000cd, 0xff182000, | 1339 0x000000c5, 0xff180000, 0x000000cd, 0xff182000, |
1292 0x000000d5, 0xff184000, 0x000000de, 0xff186100, | 1340 0x000000d5, 0xff184000, 0x000000de, 0xff186100, |
1293 0x000000e6, 0xff188100, 0x000000ee, 0xff18a100, | 1341 0x000000e6, 0xff188100, 0x000000ee, 0xff18a100, |
1294 0x000000f6, 0xff18c200, 0x000000ff, 0xff18e200, | 1342 0x000000f6, 0xff18c200, 0x000000ff, 0xff18e200, |
1295 0x00000400, 0xff200000, 0x00000408, 0xff202000, | 1343 0x00000400, 0xff200000, 0x00000408, 0xff202000, |
1296 0x00000410, 0xff204000, 0x00000418, 0xff206100, | 1344 0x00000410, 0xff204000, 0x00000418, 0xff206100, |
1297 0x00000420, 0xff208100, 0x00000429, 0xff20a100, | 1345 0x00000420, 0xff208100, 0x00000429, 0xff20a100, |
1298 0x00000431, 0xff20c200, 0x00000439, 0xff20e200, | 1346 0x00000431, 0xff20c200, 0x00000439, 0xff20e200, |
1299 0x00000441, 0xff290000, 0x0000044a, 0xff292000, | 1347 0x00000441, 0xff290000, 0x0000044a, 0xff292000, |
1300 0x00000452, 0xff294000, 0x0000045a, 0xff296100, | 1348 0x00000452, 0xff294000, 0x0000045a, 0xff296100, |
1301 0x00000462, 0xff298100, 0x0000046a, 0xff29a100, | 1349 0x00000462, 0xff298100, 0x0000046a, 0xff29a100, |
1302 0x00000473, 0xff29c200, 0x0000047b, 0xff29e200, | 1350 0x00000473, 0xff29c200, 0x0000047b, 0xff29e200, |
1303 0x00000483, 0xff310000, 0x0000048b, 0xff312000, | 1351 0x00000483, 0xff310000, 0x0000048b, 0xff312000, |
1304 0x00000494, 0xff314000, 0x0000049c, 0xff316100, | 1352 0x00000494, 0xff314000, 0x0000049c, 0xff316100, |
1305 0x000004a4, 0xff318100, 0x000004ac, 0xff31a100, | 1353 0x000004a4, 0xff318100, 0x000004ac, 0xff31a100, |
1306 0x000004b4, 0xff31c200, 0x000004bd, 0xff31e200, | 1354 0x000004b4, 0xff31c200, 0x000004bd, 0xff31e200, |
1307 0x000004c5, 0xff390000, 0x000004cd, 0xff392000, | 1355 0x000004c5, 0xff390000, 0x000004cd, 0xff392000, |
1308 0x000004d5, 0xff394000, 0x000004de, 0xff396100, | 1356 0x000004d5, 0xff394000, 0x000004de, 0xff396100, |
1309 0x000004e6, 0xff398100, 0x000004ee, 0xff39a100, | 1357 0x000004e6, 0xff398100, 0x000004ee, 0xff39a100, |
1310 0x000004f6, 0xff39c200, 0x000004ff, 0xff39e200, | 1358 0x000004f6, 0xff39c200, 0x000004ff, 0xff39e200, |
1311 0x00000800, 0xff410000, 0x00000808, 0xff412000, | 1359 0x00000800, 0xff410000, 0x00000808, 0xff412000, |
1312 0x00000810, 0xff414000, 0x00000818, 0xff416100, | 1360 0x00000810, 0xff414000, 0x00000818, 0xff416100, |
1313 0x00000820, 0xff418100, 0x00000829, 0xff41a100, | 1361 0x00000820, 0xff418100, 0x00000829, 0xff41a100, |
1314 0x00000831, 0xff41c200, 0x00000839, 0xff41e200, | 1362 0x00000831, 0xff41c200, 0x00000839, 0xff41e200, |
1315 0x00000841, 0xff4a0000, 0x0000084a, 0xff4a2000, | 1363 0x00000841, 0xff4a0000, 0x0000084a, 0xff4a2000, |
1316 0x00000852, 0xff4a4000, 0x0000085a, 0xff4a6100, | 1364 0x00000852, 0xff4a4000, 0x0000085a, 0xff4a6100, |
1317 0x00000862, 0xff4a8100, 0x0000086a, 0xff4aa100, | 1365 0x00000862, 0xff4a8100, 0x0000086a, 0xff4aa100, |
1318 0x00000873, 0xff4ac200, 0x0000087b, 0xff4ae200, | 1366 0x00000873, 0xff4ac200, 0x0000087b, 0xff4ae200, |
1319 0x00000883, 0xff520000, 0x0000088b, 0xff522000, | 1367 0x00000883, 0xff520000, 0x0000088b, 0xff522000, |
1320 0x00000894, 0xff524000, 0x0000089c, 0xff526100, | 1368 0x00000894, 0xff524000, 0x0000089c, 0xff526100, |
1321 0x000008a4, 0xff528100, 0x000008ac, 0xff52a100, | 1369 0x000008a4, 0xff528100, 0x000008ac, 0xff52a100, |
1322 0x000008b4, 0xff52c200, 0x000008bd, 0xff52e200, | 1370 0x000008b4, 0xff52c200, 0x000008bd, 0xff52e200, |
1323 0x000008c5, 0xff5a0000, 0x000008cd, 0xff5a2000, | 1371 0x000008c5, 0xff5a0000, 0x000008cd, 0xff5a2000, |
1324 0x000008d5, 0xff5a4000, 0x000008de, 0xff5a6100, | 1372 0x000008d5, 0xff5a4000, 0x000008de, 0xff5a6100, |
1325 0x000008e6, 0xff5a8100, 0x000008ee, 0xff5aa100, | 1373 0x000008e6, 0xff5a8100, 0x000008ee, 0xff5aa100, |
1326 0x000008f6, 0xff5ac200, 0x000008ff, 0xff5ae200, | 1374 0x000008f6, 0xff5ac200, 0x000008ff, 0xff5ae200, |
1327 0x00000c00, 0xff620000, 0x00000c08, 0xff622000, | 1375 0x00000c00, 0xff620000, 0x00000c08, 0xff622000, |
1328 0x00000c10, 0xff624000, 0x00000c18, 0xff626100, | 1376 0x00000c10, 0xff624000, 0x00000c18, 0xff626100, |
1329 0x00000c20, 0xff628100, 0x00000c29, 0xff62a100, | 1377 0x00000c20, 0xff628100, 0x00000c29, 0xff62a100, |
1330 0x00000c31, 0xff62c200, 0x00000c39, 0xff62e200, | 1378 0x00000c31, 0xff62c200, 0x00000c39, 0xff62e200, |
1331 0x00000c41, 0xff6a0000, 0x00000c4a, 0xff6a2000, | 1379 0x00000c41, 0xff6a0000, 0x00000c4a, 0xff6a2000, |
1332 0x00000c52, 0xff6a4000, 0x00000c5a, 0xff6a6100, | 1380 0x00000c52, 0xff6a4000, 0x00000c5a, 0xff6a6100, |
1333 0x00000c62, 0xff6a8100, 0x00000c6a, 0xff6aa100, | 1381 0x00000c62, 0xff6a8100, 0x00000c6a, 0xff6aa100, |
1334 0x00000c73, 0xff6ac200, 0x00000c7b, 0xff6ae200, | 1382 0x00000c73, 0xff6ac200, 0x00000c7b, 0xff6ae200, |
1335 0x00000c83, 0xff730000, 0x00000c8b, 0xff732000, | 1383 0x00000c83, 0xff730000, 0x00000c8b, 0xff732000, |
1336 0x00000c94, 0xff734000, 0x00000c9c, 0xff736100, | 1384 0x00000c94, 0xff734000, 0x00000c9c, 0xff736100, |
1337 0x00000ca4, 0xff738100, 0x00000cac, 0xff73a100, | 1385 0x00000ca4, 0xff738100, 0x00000cac, 0xff73a100, |
1338 0x00000cb4, 0xff73c200, 0x00000cbd, 0xff73e200, | 1386 0x00000cb4, 0xff73c200, 0x00000cbd, 0xff73e200, |
1339 0x00000cc5, 0xff7b0000, 0x00000ccd, 0xff7b2000, | 1387 0x00000cc5, 0xff7b0000, 0x00000ccd, 0xff7b2000, |
1340 0x00000cd5, 0xff7b4000, 0x00000cde, 0xff7b6100, | 1388 0x00000cd5, 0xff7b4000, 0x00000cde, 0xff7b6100, |
1341 0x00000ce6, 0xff7b8100, 0x00000cee, 0xff7ba100, | 1389 0x00000ce6, 0xff7b8100, 0x00000cee, 0xff7ba100, |
1342 0x00000cf6, 0xff7bc200, 0x00000cff, 0xff7be200, | 1390 0x00000cf6, 0xff7bc200, 0x00000cff, 0xff7be200, |
1343 0x00001000, 0xff830000, 0x00001008, 0xff832000, | 1391 0x00001000, 0xff830000, 0x00001008, 0xff832000, |
1344 0x00001010, 0xff834000, 0x00001018, 0xff836100, | 1392 0x00001010, 0xff834000, 0x00001018, 0xff836100, |
1345 0x00001020, 0xff838100, 0x00001029, 0xff83a100, | 1393 0x00001020, 0xff838100, 0x00001029, 0xff83a100, |
1346 0x00001031, 0xff83c200, 0x00001039, 0xff83e200, | 1394 0x00001031, 0xff83c200, 0x00001039, 0xff83e200, |
1347 0x00001041, 0xff8b0000, 0x0000104a, 0xff8b2000, | 1395 0x00001041, 0xff8b0000, 0x0000104a, 0xff8b2000, |
1348 0x00001052, 0xff8b4000, 0x0000105a, 0xff8b6100, | 1396 0x00001052, 0xff8b4000, 0x0000105a, 0xff8b6100, |
1349 0x00001062, 0xff8b8100, 0x0000106a, 0xff8ba100, | 1397 0x00001062, 0xff8b8100, 0x0000106a, 0xff8ba100, |
1350 0x00001073, 0xff8bc200, 0x0000107b, 0xff8be200, | 1398 0x00001073, 0xff8bc200, 0x0000107b, 0xff8be200, |
1351 0x00001083, 0xff940000, 0x0000108b, 0xff942000, | 1399 0x00001083, 0xff940000, 0x0000108b, 0xff942000, |
1352 0x00001094, 0xff944000, 0x0000109c, 0xff946100, | 1400 0x00001094, 0xff944000, 0x0000109c, 0xff946100, |
1353 0x000010a4, 0xff948100, 0x000010ac, 0xff94a100, | 1401 0x000010a4, 0xff948100, 0x000010ac, 0xff94a100, |
1354 0x000010b4, 0xff94c200, 0x000010bd, 0xff94e200, | 1402 0x000010b4, 0xff94c200, 0x000010bd, 0xff94e200, |
1355 0x000010c5, 0xff9c0000, 0x000010cd, 0xff9c2000, | 1403 0x000010c5, 0xff9c0000, 0x000010cd, 0xff9c2000, |
1356 0x000010d5, 0xff9c4000, 0x000010de, 0xff9c6100, | 1404 0x000010d5, 0xff9c4000, 0x000010de, 0xff9c6100, |
1357 0x000010e6, 0xff9c8100, 0x000010ee, 0xff9ca100, | 1405 0x000010e6, 0xff9c8100, 0x000010ee, 0xff9ca100, |
1358 0x000010f6, 0xff9cc200, 0x000010ff, 0xff9ce200, | 1406 0x000010f6, 0xff9cc200, 0x000010ff, 0xff9ce200, |
1359 0x00001400, 0xffa40000, 0x00001408, 0xffa42000, | 1407 0x00001400, 0xffa40000, 0x00001408, 0xffa42000, |
1360 0x00001410, 0xffa44000, 0x00001418, 0xffa46100, | 1408 0x00001410, 0xffa44000, 0x00001418, 0xffa46100, |
1361 0x00001420, 0xffa48100, 0x00001429, 0xffa4a100, | 1409 0x00001420, 0xffa48100, 0x00001429, 0xffa4a100, |
1362 0x00001431, 0xffa4c200, 0x00001439, 0xffa4e200, | 1410 0x00001431, 0xffa4c200, 0x00001439, 0xffa4e200, |
1363 0x00001441, 0xffac0000, 0x0000144a, 0xffac2000, | 1411 0x00001441, 0xffac0000, 0x0000144a, 0xffac2000, |
1364 0x00001452, 0xffac4000, 0x0000145a, 0xffac6100, | 1412 0x00001452, 0xffac4000, 0x0000145a, 0xffac6100, |
1365 0x00001462, 0xffac8100, 0x0000146a, 0xffaca100, | 1413 0x00001462, 0xffac8100, 0x0000146a, 0xffaca100, |
1366 0x00001473, 0xffacc200, 0x0000147b, 0xfface200, | 1414 0x00001473, 0xffacc200, 0x0000147b, 0xfface200, |
1367 0x00001483, 0xffb40000, 0x0000148b, 0xffb42000, | 1415 0x00001483, 0xffb40000, 0x0000148b, 0xffb42000, |
1368 0x00001494, 0xffb44000, 0x0000149c, 0xffb46100, | 1416 0x00001494, 0xffb44000, 0x0000149c, 0xffb46100, |
1369 0x000014a4, 0xffb48100, 0x000014ac, 0xffb4a100, | 1417 0x000014a4, 0xffb48100, 0x000014ac, 0xffb4a100, |
1370 0x000014b4, 0xffb4c200, 0x000014bd, 0xffb4e200, | 1418 0x000014b4, 0xffb4c200, 0x000014bd, 0xffb4e200, |
1371 0x000014c5, 0xffbd0000, 0x000014cd, 0xffbd2000, | 1419 0x000014c5, 0xffbd0000, 0x000014cd, 0xffbd2000, |
1372 0x000014d5, 0xffbd4000, 0x000014de, 0xffbd6100, | 1420 0x000014d5, 0xffbd4000, 0x000014de, 0xffbd6100, |
1373 0x000014e6, 0xffbd8100, 0x000014ee, 0xffbda100, | 1421 0x000014e6, 0xffbd8100, 0x000014ee, 0xffbda100, |
1374 0x000014f6, 0xffbdc200, 0x000014ff, 0xffbde200, | 1422 0x000014f6, 0xffbdc200, 0x000014ff, 0xffbde200, |
1375 0x00001800, 0xffc50000, 0x00001808, 0xffc52000, | 1423 0x00001800, 0xffc50000, 0x00001808, 0xffc52000, |
1376 0x00001810, 0xffc54000, 0x00001818, 0xffc56100, | 1424 0x00001810, 0xffc54000, 0x00001818, 0xffc56100, |
1377 0x00001820, 0xffc58100, 0x00001829, 0xffc5a100, | 1425 0x00001820, 0xffc58100, 0x00001829, 0xffc5a100, |
1378 0x00001831, 0xffc5c200, 0x00001839, 0xffc5e200, | 1426 0x00001831, 0xffc5c200, 0x00001839, 0xffc5e200, |
1379 0x00001841, 0xffcd0000, 0x0000184a, 0xffcd2000, | 1427 0x00001841, 0xffcd0000, 0x0000184a, 0xffcd2000, |
1380 0x00001852, 0xffcd4000, 0x0000185a, 0xffcd6100, | 1428 0x00001852, 0xffcd4000, 0x0000185a, 0xffcd6100, |
1381 0x00001862, 0xffcd8100, 0x0000186a, 0xffcda100, | 1429 0x00001862, 0xffcd8100, 0x0000186a, 0xffcda100, |
1382 0x00001873, 0xffcdc200, 0x0000187b, 0xffcde200, | 1430 0x00001873, 0xffcdc200, 0x0000187b, 0xffcde200, |
1383 0x00001883, 0xffd50000, 0x0000188b, 0xffd52000, | 1431 0x00001883, 0xffd50000, 0x0000188b, 0xffd52000, |
1384 0x00001894, 0xffd54000, 0x0000189c, 0xffd56100, | 1432 0x00001894, 0xffd54000, 0x0000189c, 0xffd56100, |
1385 0x000018a4, 0xffd58100, 0x000018ac, 0xffd5a100, | 1433 0x000018a4, 0xffd58100, 0x000018ac, 0xffd5a100, |
1386 0x000018b4, 0xffd5c200, 0x000018bd, 0xffd5e200, | 1434 0x000018b4, 0xffd5c200, 0x000018bd, 0xffd5e200, |
1387 0x000018c5, 0xffde0000, 0x000018cd, 0xffde2000, | 1435 0x000018c5, 0xffde0000, 0x000018cd, 0xffde2000, |
1388 0x000018d5, 0xffde4000, 0x000018de, 0xffde6100, | 1436 0x000018d5, 0xffde4000, 0x000018de, 0xffde6100, |
1389 0x000018e6, 0xffde8100, 0x000018ee, 0xffdea100, | 1437 0x000018e6, 0xffde8100, 0x000018ee, 0xffdea100, |
1390 0x000018f6, 0xffdec200, 0x000018ff, 0xffdee200, | 1438 0x000018f6, 0xffdec200, 0x000018ff, 0xffdee200, |
1391 0x00001c00, 0xffe60000, 0x00001c08, 0xffe62000, | 1439 0x00001c00, 0xffe60000, 0x00001c08, 0xffe62000, |
1392 0x00001c10, 0xffe64000, 0x00001c18, 0xffe66100, | 1440 0x00001c10, 0xffe64000, 0x00001c18, 0xffe66100, |
1393 0x00001c20, 0xffe68100, 0x00001c29, 0xffe6a100, | 1441 0x00001c20, 0xffe68100, 0x00001c29, 0xffe6a100, |
1394 0x00001c31, 0xffe6c200, 0x00001c39, 0xffe6e200, | 1442 0x00001c31, 0xffe6c200, 0x00001c39, 0xffe6e200, |
1395 0x00001c41, 0xffee0000, 0x00001c4a, 0xffee2000, | 1443 0x00001c41, 0xffee0000, 0x00001c4a, 0xffee2000, |
1396 0x00001c52, 0xffee4000, 0x00001c5a, 0xffee6100, | 1444 0x00001c52, 0xffee4000, 0x00001c5a, 0xffee6100, |
1397 0x00001c62, 0xffee8100, 0x00001c6a, 0xffeea100, | 1445 0x00001c62, 0xffee8100, 0x00001c6a, 0xffeea100, |
1398 0x00001c73, 0xffeec200, 0x00001c7b, 0xffeee200, | 1446 0x00001c73, 0xffeec200, 0x00001c7b, 0xffeee200, |
1399 0x00001c83, 0xfff60000, 0x00001c8b, 0xfff62000, | 1447 0x00001c83, 0xfff60000, 0x00001c8b, 0xfff62000, |
1400 0x00001c94, 0xfff64000, 0x00001c9c, 0xfff66100, | 1448 0x00001c94, 0xfff64000, 0x00001c9c, 0xfff66100, |
1401 0x00001ca4, 0xfff68100, 0x00001cac, 0xfff6a100, | 1449 0x00001ca4, 0xfff68100, 0x00001cac, 0xfff6a100, |
1402 0x00001cb4, 0xfff6c200, 0x00001cbd, 0xfff6e200, | 1450 0x00001cb4, 0xfff6c200, 0x00001cbd, 0xfff6e200, |
1403 0x00001cc5, 0xffff0000, 0x00001ccd, 0xffff2000, | 1451 0x00001cc5, 0xffff0000, 0x00001ccd, 0xffff2000, |
1404 0x00001cd5, 0xffff4000, 0x00001cde, 0xffff6100, | 1452 0x00001cd5, 0xffff4000, 0x00001cde, 0xffff6100, |
1405 0x00001ce6, 0xffff8100, 0x00001cee, 0xffffa100, | 1453 0x00001ce6, 0xffff8100, 0x00001cee, 0xffffa100, |
1406 0x00001cf6, 0xffffc200, 0x00001cff, 0xffffe200 | 1454 0x00001cf6, 0xffffc200, 0x00001cff, 0xffffe200 |
1407 }; | 1455 }; |
1408 static void Blit_RGB565_ARGB8888(SDL_BlitInfo *info) | 1456 static void |
1409 { | 1457 Blit_RGB565_ARGB8888 (SDL_BlitInfo * info) |
1410 Blit_RGB565_32(info, RGB565_ARGB8888_LUT); | 1458 { |
1459 Blit_RGB565_32 (info, RGB565_ARGB8888_LUT); | |
1411 } | 1460 } |
1412 | 1461 |
1413 /* Special optimized blit for RGB 5-6-5 --> ABGR 8-8-8-8 */ | 1462 /* Special optimized blit for RGB 5-6-5 --> ABGR 8-8-8-8 */ |
1414 static const Uint32 RGB565_ABGR8888_LUT[512] = { | 1463 static const Uint32 RGB565_ABGR8888_LUT[512] = { |
1415 0xff000000, 0x00000000, 0xff080000, 0x00002000, | 1464 0xff000000, 0x00000000, 0xff080000, 0x00002000, |
1416 0xff100000, 0x00004000, 0xff180000, 0x00006100, | 1465 0xff100000, 0x00004000, 0xff180000, 0x00006100, |
1417 0xff200000, 0x00008100, 0xff290000, 0x0000a100, | 1466 0xff200000, 0x00008100, 0xff290000, 0x0000a100, |
1418 0xff310000, 0x0000c200, 0xff390000, 0x0000e200, | 1467 0xff310000, 0x0000c200, 0xff390000, 0x0000e200, |
1419 0xff410000, 0x00000008, 0xff4a0000, 0x00002008, | 1468 0xff410000, 0x00000008, 0xff4a0000, 0x00002008, |
1420 0xff520000, 0x00004008, 0xff5a0000, 0x00006108, | 1469 0xff520000, 0x00004008, 0xff5a0000, 0x00006108, |
1421 0xff620000, 0x00008108, 0xff6a0000, 0x0000a108, | 1470 0xff620000, 0x00008108, 0xff6a0000, 0x0000a108, |
1422 0xff730000, 0x0000c208, 0xff7b0000, 0x0000e208, | 1471 0xff730000, 0x0000c208, 0xff7b0000, 0x0000e208, |
1423 0xff830000, 0x00000010, 0xff8b0000, 0x00002010, | 1472 0xff830000, 0x00000010, 0xff8b0000, 0x00002010, |
1424 0xff940000, 0x00004010, 0xff9c0000, 0x00006110, | 1473 0xff940000, 0x00004010, 0xff9c0000, 0x00006110, |
1425 0xffa40000, 0x00008110, 0xffac0000, 0x0000a110, | 1474 0xffa40000, 0x00008110, 0xffac0000, 0x0000a110, |
1426 0xffb40000, 0x0000c210, 0xffbd0000, 0x0000e210, | 1475 0xffb40000, 0x0000c210, 0xffbd0000, 0x0000e210, |
1427 0xffc50000, 0x00000018, 0xffcd0000, 0x00002018, | 1476 0xffc50000, 0x00000018, 0xffcd0000, 0x00002018, |
1428 0xffd50000, 0x00004018, 0xffde0000, 0x00006118, | 1477 0xffd50000, 0x00004018, 0xffde0000, 0x00006118, |
1429 0xffe60000, 0x00008118, 0xffee0000, 0x0000a118, | 1478 0xffe60000, 0x00008118, 0xffee0000, 0x0000a118, |
1430 0xfff60000, 0x0000c218, 0xffff0000, 0x0000e218, | 1479 0xfff60000, 0x0000c218, 0xffff0000, 0x0000e218, |
1431 0xff000400, 0x00000020, 0xff080400, 0x00002020, | 1480 0xff000400, 0x00000020, 0xff080400, 0x00002020, |
1432 0xff100400, 0x00004020, 0xff180400, 0x00006120, | 1481 0xff100400, 0x00004020, 0xff180400, 0x00006120, |
1433 0xff200400, 0x00008120, 0xff290400, 0x0000a120, | 1482 0xff200400, 0x00008120, 0xff290400, 0x0000a120, |
1434 0xff310400, 0x0000c220, 0xff390400, 0x0000e220, | 1483 0xff310400, 0x0000c220, 0xff390400, 0x0000e220, |
1435 0xff410400, 0x00000029, 0xff4a0400, 0x00002029, | 1484 0xff410400, 0x00000029, 0xff4a0400, 0x00002029, |
1436 0xff520400, 0x00004029, 0xff5a0400, 0x00006129, | 1485 0xff520400, 0x00004029, 0xff5a0400, 0x00006129, |
1437 0xff620400, 0x00008129, 0xff6a0400, 0x0000a129, | 1486 0xff620400, 0x00008129, 0xff6a0400, 0x0000a129, |
1438 0xff730400, 0x0000c229, 0xff7b0400, 0x0000e229, | 1487 0xff730400, 0x0000c229, 0xff7b0400, 0x0000e229, |
1439 0xff830400, 0x00000031, 0xff8b0400, 0x00002031, | 1488 0xff830400, 0x00000031, 0xff8b0400, 0x00002031, |
1440 0xff940400, 0x00004031, 0xff9c0400, 0x00006131, | 1489 0xff940400, 0x00004031, 0xff9c0400, 0x00006131, |
1441 0xffa40400, 0x00008131, 0xffac0400, 0x0000a131, | 1490 0xffa40400, 0x00008131, 0xffac0400, 0x0000a131, |
1442 0xffb40400, 0x0000c231, 0xffbd0400, 0x0000e231, | 1491 0xffb40400, 0x0000c231, 0xffbd0400, 0x0000e231, |
1443 0xffc50400, 0x00000039, 0xffcd0400, 0x00002039, | 1492 0xffc50400, 0x00000039, 0xffcd0400, 0x00002039, |
1444 0xffd50400, 0x00004039, 0xffde0400, 0x00006139, | 1493 0xffd50400, 0x00004039, 0xffde0400, 0x00006139, |
1445 0xffe60400, 0x00008139, 0xffee0400, 0x0000a139, | 1494 0xffe60400, 0x00008139, 0xffee0400, 0x0000a139, |
1446 0xfff60400, 0x0000c239, 0xffff0400, 0x0000e239, | 1495 0xfff60400, 0x0000c239, 0xffff0400, 0x0000e239, |
1447 0xff000800, 0x00000041, 0xff080800, 0x00002041, | 1496 0xff000800, 0x00000041, 0xff080800, 0x00002041, |
1448 0xff100800, 0x00004041, 0xff180800, 0x00006141, | 1497 0xff100800, 0x00004041, 0xff180800, 0x00006141, |
1449 0xff200800, 0x00008141, 0xff290800, 0x0000a141, | 1498 0xff200800, 0x00008141, 0xff290800, 0x0000a141, |
1450 0xff310800, 0x0000c241, 0xff390800, 0x0000e241, | 1499 0xff310800, 0x0000c241, 0xff390800, 0x0000e241, |
1451 0xff410800, 0x0000004a, 0xff4a0800, 0x0000204a, | 1500 0xff410800, 0x0000004a, 0xff4a0800, 0x0000204a, |
1452 0xff520800, 0x0000404a, 0xff5a0800, 0x0000614a, | 1501 0xff520800, 0x0000404a, 0xff5a0800, 0x0000614a, |
1453 0xff620800, 0x0000814a, 0xff6a0800, 0x0000a14a, | 1502 0xff620800, 0x0000814a, 0xff6a0800, 0x0000a14a, |
1454 0xff730800, 0x0000c24a, 0xff7b0800, 0x0000e24a, | 1503 0xff730800, 0x0000c24a, 0xff7b0800, 0x0000e24a, |
1455 0xff830800, 0x00000052, 0xff8b0800, 0x00002052, | 1504 0xff830800, 0x00000052, 0xff8b0800, 0x00002052, |
1456 0xff940800, 0x00004052, 0xff9c0800, 0x00006152, | 1505 0xff940800, 0x00004052, 0xff9c0800, 0x00006152, |
1457 0xffa40800, 0x00008152, 0xffac0800, 0x0000a152, | 1506 0xffa40800, 0x00008152, 0xffac0800, 0x0000a152, |
1458 0xffb40800, 0x0000c252, 0xffbd0800, 0x0000e252, | 1507 0xffb40800, 0x0000c252, 0xffbd0800, 0x0000e252, |
1459 0xffc50800, 0x0000005a, 0xffcd0800, 0x0000205a, | 1508 0xffc50800, 0x0000005a, 0xffcd0800, 0x0000205a, |
1460 0xffd50800, 0x0000405a, 0xffde0800, 0x0000615a, | 1509 0xffd50800, 0x0000405a, 0xffde0800, 0x0000615a, |
1461 0xffe60800, 0x0000815a, 0xffee0800, 0x0000a15a, | 1510 0xffe60800, 0x0000815a, 0xffee0800, 0x0000a15a, |
1462 0xfff60800, 0x0000c25a, 0xffff0800, 0x0000e25a, | 1511 0xfff60800, 0x0000c25a, 0xffff0800, 0x0000e25a, |
1463 0xff000c00, 0x00000062, 0xff080c00, 0x00002062, | 1512 0xff000c00, 0x00000062, 0xff080c00, 0x00002062, |
1464 0xff100c00, 0x00004062, 0xff180c00, 0x00006162, | 1513 0xff100c00, 0x00004062, 0xff180c00, 0x00006162, |
1465 0xff200c00, 0x00008162, 0xff290c00, 0x0000a162, | 1514 0xff200c00, 0x00008162, 0xff290c00, 0x0000a162, |
1466 0xff310c00, 0x0000c262, 0xff390c00, 0x0000e262, | 1515 0xff310c00, 0x0000c262, 0xff390c00, 0x0000e262, |
1467 0xff410c00, 0x0000006a, 0xff4a0c00, 0x0000206a, | 1516 0xff410c00, 0x0000006a, 0xff4a0c00, 0x0000206a, |
1468 0xff520c00, 0x0000406a, 0xff5a0c00, 0x0000616a, | 1517 0xff520c00, 0x0000406a, 0xff5a0c00, 0x0000616a, |
1469 0xff620c00, 0x0000816a, 0xff6a0c00, 0x0000a16a, | 1518 0xff620c00, 0x0000816a, 0xff6a0c00, 0x0000a16a, |
1470 0xff730c00, 0x0000c26a, 0xff7b0c00, 0x0000e26a, | 1519 0xff730c00, 0x0000c26a, 0xff7b0c00, 0x0000e26a, |
1471 0xff830c00, 0x00000073, 0xff8b0c00, 0x00002073, | 1520 0xff830c00, 0x00000073, 0xff8b0c00, 0x00002073, |
1472 0xff940c00, 0x00004073, 0xff9c0c00, 0x00006173, | 1521 0xff940c00, 0x00004073, 0xff9c0c00, 0x00006173, |
1473 0xffa40c00, 0x00008173, 0xffac0c00, 0x0000a173, | 1522 0xffa40c00, 0x00008173, 0xffac0c00, 0x0000a173, |
1474 0xffb40c00, 0x0000c273, 0xffbd0c00, 0x0000e273, | 1523 0xffb40c00, 0x0000c273, 0xffbd0c00, 0x0000e273, |
1475 0xffc50c00, 0x0000007b, 0xffcd0c00, 0x0000207b, | 1524 0xffc50c00, 0x0000007b, 0xffcd0c00, 0x0000207b, |
1476 0xffd50c00, 0x0000407b, 0xffde0c00, 0x0000617b, | 1525 0xffd50c00, 0x0000407b, 0xffde0c00, 0x0000617b, |
1477 0xffe60c00, 0x0000817b, 0xffee0c00, 0x0000a17b, | 1526 0xffe60c00, 0x0000817b, 0xffee0c00, 0x0000a17b, |
1478 0xfff60c00, 0x0000c27b, 0xffff0c00, 0x0000e27b, | 1527 0xfff60c00, 0x0000c27b, 0xffff0c00, 0x0000e27b, |
1479 0xff001000, 0x00000083, 0xff081000, 0x00002083, | 1528 0xff001000, 0x00000083, 0xff081000, 0x00002083, |
1480 0xff101000, 0x00004083, 0xff181000, 0x00006183, | 1529 0xff101000, 0x00004083, 0xff181000, 0x00006183, |
1481 0xff201000, 0x00008183, 0xff291000, 0x0000a183, | 1530 0xff201000, 0x00008183, 0xff291000, 0x0000a183, |
1482 0xff311000, 0x0000c283, 0xff391000, 0x0000e283, | 1531 0xff311000, 0x0000c283, 0xff391000, 0x0000e283, |
1483 0xff411000, 0x0000008b, 0xff4a1000, 0x0000208b, | 1532 0xff411000, 0x0000008b, 0xff4a1000, 0x0000208b, |
1484 0xff521000, 0x0000408b, 0xff5a1000, 0x0000618b, | 1533 0xff521000, 0x0000408b, 0xff5a1000, 0x0000618b, |
1485 0xff621000, 0x0000818b, 0xff6a1000, 0x0000a18b, | 1534 0xff621000, 0x0000818b, 0xff6a1000, 0x0000a18b, |
1486 0xff731000, 0x0000c28b, 0xff7b1000, 0x0000e28b, | 1535 0xff731000, 0x0000c28b, 0xff7b1000, 0x0000e28b, |
1487 0xff831000, 0x00000094, 0xff8b1000, 0x00002094, | 1536 0xff831000, 0x00000094, 0xff8b1000, 0x00002094, |
1488 0xff941000, 0x00004094, 0xff9c1000, 0x00006194, | 1537 0xff941000, 0x00004094, 0xff9c1000, 0x00006194, |
1489 0xffa41000, 0x00008194, 0xffac1000, 0x0000a194, | 1538 0xffa41000, 0x00008194, 0xffac1000, 0x0000a194, |
1490 0xffb41000, 0x0000c294, 0xffbd1000, 0x0000e294, | 1539 0xffb41000, 0x0000c294, 0xffbd1000, 0x0000e294, |
1491 0xffc51000, 0x0000009c, 0xffcd1000, 0x0000209c, | 1540 0xffc51000, 0x0000009c, 0xffcd1000, 0x0000209c, |
1492 0xffd51000, 0x0000409c, 0xffde1000, 0x0000619c, | 1541 0xffd51000, 0x0000409c, 0xffde1000, 0x0000619c, |
1493 0xffe61000, 0x0000819c, 0xffee1000, 0x0000a19c, | 1542 0xffe61000, 0x0000819c, 0xffee1000, 0x0000a19c, |
1494 0xfff61000, 0x0000c29c, 0xffff1000, 0x0000e29c, | 1543 0xfff61000, 0x0000c29c, 0xffff1000, 0x0000e29c, |
1495 0xff001400, 0x000000a4, 0xff081400, 0x000020a4, | 1544 0xff001400, 0x000000a4, 0xff081400, 0x000020a4, |
1496 0xff101400, 0x000040a4, 0xff181400, 0x000061a4, | 1545 0xff101400, 0x000040a4, 0xff181400, 0x000061a4, |
1497 0xff201400, 0x000081a4, 0xff291400, 0x0000a1a4, | 1546 0xff201400, 0x000081a4, 0xff291400, 0x0000a1a4, |
1498 0xff311400, 0x0000c2a4, 0xff391400, 0x0000e2a4, | 1547 0xff311400, 0x0000c2a4, 0xff391400, 0x0000e2a4, |
1499 0xff411400, 0x000000ac, 0xff4a1400, 0x000020ac, | 1548 0xff411400, 0x000000ac, 0xff4a1400, 0x000020ac, |
1500 0xff521400, 0x000040ac, 0xff5a1400, 0x000061ac, | 1549 0xff521400, 0x000040ac, 0xff5a1400, 0x000061ac, |
1501 0xff621400, 0x000081ac, 0xff6a1400, 0x0000a1ac, | 1550 0xff621400, 0x000081ac, 0xff6a1400, 0x0000a1ac, |
1502 0xff731400, 0x0000c2ac, 0xff7b1400, 0x0000e2ac, | 1551 0xff731400, 0x0000c2ac, 0xff7b1400, 0x0000e2ac, |
1503 0xff831400, 0x000000b4, 0xff8b1400, 0x000020b4, | 1552 0xff831400, 0x000000b4, 0xff8b1400, 0x000020b4, |
1504 0xff941400, 0x000040b4, 0xff9c1400, 0x000061b4, | 1553 0xff941400, 0x000040b4, 0xff9c1400, 0x000061b4, |
1505 0xffa41400, 0x000081b4, 0xffac1400, 0x0000a1b4, | 1554 0xffa41400, 0x000081b4, 0xffac1400, 0x0000a1b4, |
1506 0xffb41400, 0x0000c2b4, 0xffbd1400, 0x0000e2b4, | 1555 0xffb41400, 0x0000c2b4, 0xffbd1400, 0x0000e2b4, |
1507 0xffc51400, 0x000000bd, 0xffcd1400, 0x000020bd, | 1556 0xffc51400, 0x000000bd, 0xffcd1400, 0x000020bd, |
1508 0xffd51400, 0x000040bd, 0xffde1400, 0x000061bd, | 1557 0xffd51400, 0x000040bd, 0xffde1400, 0x000061bd, |
1509 0xffe61400, 0x000081bd, 0xffee1400, 0x0000a1bd, | 1558 0xffe61400, 0x000081bd, 0xffee1400, 0x0000a1bd, |
1510 0xfff61400, 0x0000c2bd, 0xffff1400, 0x0000e2bd, | 1559 0xfff61400, 0x0000c2bd, 0xffff1400, 0x0000e2bd, |
1511 0xff001800, 0x000000c5, 0xff081800, 0x000020c5, | 1560 0xff001800, 0x000000c5, 0xff081800, 0x000020c5, |
1512 0xff101800, 0x000040c5, 0xff181800, 0x000061c5, | 1561 0xff101800, 0x000040c5, 0xff181800, 0x000061c5, |
1513 0xff201800, 0x000081c5, 0xff291800, 0x0000a1c5, | 1562 0xff201800, 0x000081c5, 0xff291800, 0x0000a1c5, |
1514 0xff311800, 0x0000c2c5, 0xff391800, 0x0000e2c5, | 1563 0xff311800, 0x0000c2c5, 0xff391800, 0x0000e2c5, |
1515 0xff411800, 0x000000cd, 0xff4a1800, 0x000020cd, | 1564 0xff411800, 0x000000cd, 0xff4a1800, 0x000020cd, |
1516 0xff521800, 0x000040cd, 0xff5a1800, 0x000061cd, | 1565 0xff521800, 0x000040cd, 0xff5a1800, 0x000061cd, |
1517 0xff621800, 0x000081cd, 0xff6a1800, 0x0000a1cd, | 1566 0xff621800, 0x000081cd, 0xff6a1800, 0x0000a1cd, |
1518 0xff731800, 0x0000c2cd, 0xff7b1800, 0x0000e2cd, | 1567 0xff731800, 0x0000c2cd, 0xff7b1800, 0x0000e2cd, |
1519 0xff831800, 0x000000d5, 0xff8b1800, 0x000020d5, | 1568 0xff831800, 0x000000d5, 0xff8b1800, 0x000020d5, |
1520 0xff941800, 0x000040d5, 0xff9c1800, 0x000061d5, | 1569 0xff941800, 0x000040d5, 0xff9c1800, 0x000061d5, |
1521 0xffa41800, 0x000081d5, 0xffac1800, 0x0000a1d5, | 1570 0xffa41800, 0x000081d5, 0xffac1800, 0x0000a1d5, |
1522 0xffb41800, 0x0000c2d5, 0xffbd1800, 0x0000e2d5, | 1571 0xffb41800, 0x0000c2d5, 0xffbd1800, 0x0000e2d5, |
1523 0xffc51800, 0x000000de, 0xffcd1800, 0x000020de, | 1572 0xffc51800, 0x000000de, 0xffcd1800, 0x000020de, |
1524 0xffd51800, 0x000040de, 0xffde1800, 0x000061de, | 1573 0xffd51800, 0x000040de, 0xffde1800, 0x000061de, |
1525 0xffe61800, 0x000081de, 0xffee1800, 0x0000a1de, | 1574 0xffe61800, 0x000081de, 0xffee1800, 0x0000a1de, |
1526 0xfff61800, 0x0000c2de, 0xffff1800, 0x0000e2de, | 1575 0xfff61800, 0x0000c2de, 0xffff1800, 0x0000e2de, |
1527 0xff001c00, 0x000000e6, 0xff081c00, 0x000020e6, | 1576 0xff001c00, 0x000000e6, 0xff081c00, 0x000020e6, |
1528 0xff101c00, 0x000040e6, 0xff181c00, 0x000061e6, | 1577 0xff101c00, 0x000040e6, 0xff181c00, 0x000061e6, |
1529 0xff201c00, 0x000081e6, 0xff291c00, 0x0000a1e6, | 1578 0xff201c00, 0x000081e6, 0xff291c00, 0x0000a1e6, |
1530 0xff311c00, 0x0000c2e6, 0xff391c00, 0x0000e2e6, | 1579 0xff311c00, 0x0000c2e6, 0xff391c00, 0x0000e2e6, |
1531 0xff411c00, 0x000000ee, 0xff4a1c00, 0x000020ee, | 1580 0xff411c00, 0x000000ee, 0xff4a1c00, 0x000020ee, |
1532 0xff521c00, 0x000040ee, 0xff5a1c00, 0x000061ee, | 1581 0xff521c00, 0x000040ee, 0xff5a1c00, 0x000061ee, |
1533 0xff621c00, 0x000081ee, 0xff6a1c00, 0x0000a1ee, | 1582 0xff621c00, 0x000081ee, 0xff6a1c00, 0x0000a1ee, |
1534 0xff731c00, 0x0000c2ee, 0xff7b1c00, 0x0000e2ee, | 1583 0xff731c00, 0x0000c2ee, 0xff7b1c00, 0x0000e2ee, |
1535 0xff831c00, 0x000000f6, 0xff8b1c00, 0x000020f6, | 1584 0xff831c00, 0x000000f6, 0xff8b1c00, 0x000020f6, |
1536 0xff941c00, 0x000040f6, 0xff9c1c00, 0x000061f6, | 1585 0xff941c00, 0x000040f6, 0xff9c1c00, 0x000061f6, |
1537 0xffa41c00, 0x000081f6, 0xffac1c00, 0x0000a1f6, | 1586 0xffa41c00, 0x000081f6, 0xffac1c00, 0x0000a1f6, |
1538 0xffb41c00, 0x0000c2f6, 0xffbd1c00, 0x0000e2f6, | 1587 0xffb41c00, 0x0000c2f6, 0xffbd1c00, 0x0000e2f6, |
1539 0xffc51c00, 0x000000ff, 0xffcd1c00, 0x000020ff, | 1588 0xffc51c00, 0x000000ff, 0xffcd1c00, 0x000020ff, |
1540 0xffd51c00, 0x000040ff, 0xffde1c00, 0x000061ff, | 1589 0xffd51c00, 0x000040ff, 0xffde1c00, 0x000061ff, |
1541 0xffe61c00, 0x000081ff, 0xffee1c00, 0x0000a1ff, | 1590 0xffe61c00, 0x000081ff, 0xffee1c00, 0x0000a1ff, |
1542 0xfff61c00, 0x0000c2ff, 0xffff1c00, 0x0000e2ff | 1591 0xfff61c00, 0x0000c2ff, 0xffff1c00, 0x0000e2ff |
1543 }; | 1592 }; |
1544 static void Blit_RGB565_ABGR8888(SDL_BlitInfo *info) | 1593 static void |
1545 { | 1594 Blit_RGB565_ABGR8888 (SDL_BlitInfo * info) |
1546 Blit_RGB565_32(info, RGB565_ABGR8888_LUT); | 1595 { |
1596 Blit_RGB565_32 (info, RGB565_ABGR8888_LUT); | |
1547 } | 1597 } |
1548 | 1598 |
1549 /* Special optimized blit for RGB 5-6-5 --> RGBA 8-8-8-8 */ | 1599 /* Special optimized blit for RGB 5-6-5 --> RGBA 8-8-8-8 */ |
1550 static const Uint32 RGB565_RGBA8888_LUT[512] = { | 1600 static const Uint32 RGB565_RGBA8888_LUT[512] = { |
1551 0x000000ff, 0x00000000, 0x000008ff, 0x00200000, | 1601 0x000000ff, 0x00000000, 0x000008ff, 0x00200000, |
1552 0x000010ff, 0x00400000, 0x000018ff, 0x00610000, | 1602 0x000010ff, 0x00400000, 0x000018ff, 0x00610000, |
1553 0x000020ff, 0x00810000, 0x000029ff, 0x00a10000, | 1603 0x000020ff, 0x00810000, 0x000029ff, 0x00a10000, |
1554 0x000031ff, 0x00c20000, 0x000039ff, 0x00e20000, | 1604 0x000031ff, 0x00c20000, 0x000039ff, 0x00e20000, |
1555 0x000041ff, 0x08000000, 0x00004aff, 0x08200000, | 1605 0x000041ff, 0x08000000, 0x00004aff, 0x08200000, |
1556 0x000052ff, 0x08400000, 0x00005aff, 0x08610000, | 1606 0x000052ff, 0x08400000, 0x00005aff, 0x08610000, |
1557 0x000062ff, 0x08810000, 0x00006aff, 0x08a10000, | 1607 0x000062ff, 0x08810000, 0x00006aff, 0x08a10000, |
1558 0x000073ff, 0x08c20000, 0x00007bff, 0x08e20000, | 1608 0x000073ff, 0x08c20000, 0x00007bff, 0x08e20000, |
1559 0x000083ff, 0x10000000, 0x00008bff, 0x10200000, | 1609 0x000083ff, 0x10000000, 0x00008bff, 0x10200000, |
1560 0x000094ff, 0x10400000, 0x00009cff, 0x10610000, | 1610 0x000094ff, 0x10400000, 0x00009cff, 0x10610000, |
1561 0x0000a4ff, 0x10810000, 0x0000acff, 0x10a10000, | 1611 0x0000a4ff, 0x10810000, 0x0000acff, 0x10a10000, |
1562 0x0000b4ff, 0x10c20000, 0x0000bdff, 0x10e20000, | 1612 0x0000b4ff, 0x10c20000, 0x0000bdff, 0x10e20000, |
1563 0x0000c5ff, 0x18000000, 0x0000cdff, 0x18200000, | 1613 0x0000c5ff, 0x18000000, 0x0000cdff, 0x18200000, |
1564 0x0000d5ff, 0x18400000, 0x0000deff, 0x18610000, | 1614 0x0000d5ff, 0x18400000, 0x0000deff, 0x18610000, |
1565 0x0000e6ff, 0x18810000, 0x0000eeff, 0x18a10000, | 1615 0x0000e6ff, 0x18810000, 0x0000eeff, 0x18a10000, |
1566 0x0000f6ff, 0x18c20000, 0x0000ffff, 0x18e20000, | 1616 0x0000f6ff, 0x18c20000, 0x0000ffff, 0x18e20000, |
1567 0x000400ff, 0x20000000, 0x000408ff, 0x20200000, | 1617 0x000400ff, 0x20000000, 0x000408ff, 0x20200000, |
1568 0x000410ff, 0x20400000, 0x000418ff, 0x20610000, | 1618 0x000410ff, 0x20400000, 0x000418ff, 0x20610000, |
1569 0x000420ff, 0x20810000, 0x000429ff, 0x20a10000, | 1619 0x000420ff, 0x20810000, 0x000429ff, 0x20a10000, |
1570 0x000431ff, 0x20c20000, 0x000439ff, 0x20e20000, | 1620 0x000431ff, 0x20c20000, 0x000439ff, 0x20e20000, |
1571 0x000441ff, 0x29000000, 0x00044aff, 0x29200000, | 1621 0x000441ff, 0x29000000, 0x00044aff, 0x29200000, |
1572 0x000452ff, 0x29400000, 0x00045aff, 0x29610000, | 1622 0x000452ff, 0x29400000, 0x00045aff, 0x29610000, |
1573 0x000462ff, 0x29810000, 0x00046aff, 0x29a10000, | 1623 0x000462ff, 0x29810000, 0x00046aff, 0x29a10000, |
1574 0x000473ff, 0x29c20000, 0x00047bff, 0x29e20000, | 1624 0x000473ff, 0x29c20000, 0x00047bff, 0x29e20000, |
1575 0x000483ff, 0x31000000, 0x00048bff, 0x31200000, | 1625 0x000483ff, 0x31000000, 0x00048bff, 0x31200000, |
1576 0x000494ff, 0x31400000, 0x00049cff, 0x31610000, | 1626 0x000494ff, 0x31400000, 0x00049cff, 0x31610000, |
1577 0x0004a4ff, 0x31810000, 0x0004acff, 0x31a10000, | 1627 0x0004a4ff, 0x31810000, 0x0004acff, 0x31a10000, |
1578 0x0004b4ff, 0x31c20000, 0x0004bdff, 0x31e20000, | 1628 0x0004b4ff, 0x31c20000, 0x0004bdff, 0x31e20000, |
1579 0x0004c5ff, 0x39000000, 0x0004cdff, 0x39200000, | 1629 0x0004c5ff, 0x39000000, 0x0004cdff, 0x39200000, |
1580 0x0004d5ff, 0x39400000, 0x0004deff, 0x39610000, | 1630 0x0004d5ff, 0x39400000, 0x0004deff, 0x39610000, |
1581 0x0004e6ff, 0x39810000, 0x0004eeff, 0x39a10000, | 1631 0x0004e6ff, 0x39810000, 0x0004eeff, 0x39a10000, |
1582 0x0004f6ff, 0x39c20000, 0x0004ffff, 0x39e20000, | 1632 0x0004f6ff, 0x39c20000, 0x0004ffff, 0x39e20000, |
1583 0x000800ff, 0x41000000, 0x000808ff, 0x41200000, | 1633 0x000800ff, 0x41000000, 0x000808ff, 0x41200000, |
1584 0x000810ff, 0x41400000, 0x000818ff, 0x41610000, | 1634 0x000810ff, 0x41400000, 0x000818ff, 0x41610000, |
1585 0x000820ff, 0x41810000, 0x000829ff, 0x41a10000, | 1635 0x000820ff, 0x41810000, 0x000829ff, 0x41a10000, |
1586 0x000831ff, 0x41c20000, 0x000839ff, 0x41e20000, | 1636 0x000831ff, 0x41c20000, 0x000839ff, 0x41e20000, |
1587 0x000841ff, 0x4a000000, 0x00084aff, 0x4a200000, | 1637 0x000841ff, 0x4a000000, 0x00084aff, 0x4a200000, |
1588 0x000852ff, 0x4a400000, 0x00085aff, 0x4a610000, | 1638 0x000852ff, 0x4a400000, 0x00085aff, 0x4a610000, |
1589 0x000862ff, 0x4a810000, 0x00086aff, 0x4aa10000, | 1639 0x000862ff, 0x4a810000, 0x00086aff, 0x4aa10000, |
1590 0x000873ff, 0x4ac20000, 0x00087bff, 0x4ae20000, | 1640 0x000873ff, 0x4ac20000, 0x00087bff, 0x4ae20000, |
1591 0x000883ff, 0x52000000, 0x00088bff, 0x52200000, | 1641 0x000883ff, 0x52000000, 0x00088bff, 0x52200000, |
1592 0x000894ff, 0x52400000, 0x00089cff, 0x52610000, | 1642 0x000894ff, 0x52400000, 0x00089cff, 0x52610000, |
1593 0x0008a4ff, 0x52810000, 0x0008acff, 0x52a10000, | 1643 0x0008a4ff, 0x52810000, 0x0008acff, 0x52a10000, |
1594 0x0008b4ff, 0x52c20000, 0x0008bdff, 0x52e20000, | 1644 0x0008b4ff, 0x52c20000, 0x0008bdff, 0x52e20000, |
1595 0x0008c5ff, 0x5a000000, 0x0008cdff, 0x5a200000, | 1645 0x0008c5ff, 0x5a000000, 0x0008cdff, 0x5a200000, |
1596 0x0008d5ff, 0x5a400000, 0x0008deff, 0x5a610000, | 1646 0x0008d5ff, 0x5a400000, 0x0008deff, 0x5a610000, |
1597 0x0008e6ff, 0x5a810000, 0x0008eeff, 0x5aa10000, | 1647 0x0008e6ff, 0x5a810000, 0x0008eeff, 0x5aa10000, |
1598 0x0008f6ff, 0x5ac20000, 0x0008ffff, 0x5ae20000, | 1648 0x0008f6ff, 0x5ac20000, 0x0008ffff, 0x5ae20000, |
1599 0x000c00ff, 0x62000000, 0x000c08ff, 0x62200000, | 1649 0x000c00ff, 0x62000000, 0x000c08ff, 0x62200000, |
1600 0x000c10ff, 0x62400000, 0x000c18ff, 0x62610000, | 1650 0x000c10ff, 0x62400000, 0x000c18ff, 0x62610000, |
1601 0x000c20ff, 0x62810000, 0x000c29ff, 0x62a10000, | 1651 0x000c20ff, 0x62810000, 0x000c29ff, 0x62a10000, |
1602 0x000c31ff, 0x62c20000, 0x000c39ff, 0x62e20000, | 1652 0x000c31ff, 0x62c20000, 0x000c39ff, 0x62e20000, |
1603 0x000c41ff, 0x6a000000, 0x000c4aff, 0x6a200000, | 1653 0x000c41ff, 0x6a000000, 0x000c4aff, 0x6a200000, |
1604 0x000c52ff, 0x6a400000, 0x000c5aff, 0x6a610000, | 1654 0x000c52ff, 0x6a400000, 0x000c5aff, 0x6a610000, |
1605 0x000c62ff, 0x6a810000, 0x000c6aff, 0x6aa10000, | 1655 0x000c62ff, 0x6a810000, 0x000c6aff, 0x6aa10000, |
1606 0x000c73ff, 0x6ac20000, 0x000c7bff, 0x6ae20000, | 1656 0x000c73ff, 0x6ac20000, 0x000c7bff, 0x6ae20000, |
1607 0x000c83ff, 0x73000000, 0x000c8bff, 0x73200000, | 1657 0x000c83ff, 0x73000000, 0x000c8bff, 0x73200000, |
1608 0x000c94ff, 0x73400000, 0x000c9cff, 0x73610000, | 1658 0x000c94ff, 0x73400000, 0x000c9cff, 0x73610000, |
1609 0x000ca4ff, 0x73810000, 0x000cacff, 0x73a10000, | 1659 0x000ca4ff, 0x73810000, 0x000cacff, 0x73a10000, |
1610 0x000cb4ff, 0x73c20000, 0x000cbdff, 0x73e20000, | 1660 0x000cb4ff, 0x73c20000, 0x000cbdff, 0x73e20000, |
1611 0x000cc5ff, 0x7b000000, 0x000ccdff, 0x7b200000, | 1661 0x000cc5ff, 0x7b000000, 0x000ccdff, 0x7b200000, |
1612 0x000cd5ff, 0x7b400000, 0x000cdeff, 0x7b610000, | 1662 0x000cd5ff, 0x7b400000, 0x000cdeff, 0x7b610000, |
1613 0x000ce6ff, 0x7b810000, 0x000ceeff, 0x7ba10000, | 1663 0x000ce6ff, 0x7b810000, 0x000ceeff, 0x7ba10000, |
1614 0x000cf6ff, 0x7bc20000, 0x000cffff, 0x7be20000, | 1664 0x000cf6ff, 0x7bc20000, 0x000cffff, 0x7be20000, |
1615 0x001000ff, 0x83000000, 0x001008ff, 0x83200000, | 1665 0x001000ff, 0x83000000, 0x001008ff, 0x83200000, |
1616 0x001010ff, 0x83400000, 0x001018ff, 0x83610000, | 1666 0x001010ff, 0x83400000, 0x001018ff, 0x83610000, |
1617 0x001020ff, 0x83810000, 0x001029ff, 0x83a10000, | 1667 0x001020ff, 0x83810000, 0x001029ff, 0x83a10000, |
1618 0x001031ff, 0x83c20000, 0x001039ff, 0x83e20000, | 1668 0x001031ff, 0x83c20000, 0x001039ff, 0x83e20000, |
1619 0x001041ff, 0x8b000000, 0x00104aff, 0x8b200000, | 1669 0x001041ff, 0x8b000000, 0x00104aff, 0x8b200000, |
1620 0x001052ff, 0x8b400000, 0x00105aff, 0x8b610000, | 1670 0x001052ff, 0x8b400000, 0x00105aff, 0x8b610000, |
1621 0x001062ff, 0x8b810000, 0x00106aff, 0x8ba10000, | 1671 0x001062ff, 0x8b810000, 0x00106aff, 0x8ba10000, |
1622 0x001073ff, 0x8bc20000, 0x00107bff, 0x8be20000, | 1672 0x001073ff, 0x8bc20000, 0x00107bff, 0x8be20000, |
1623 0x001083ff, 0x94000000, 0x00108bff, 0x94200000, | 1673 0x001083ff, 0x94000000, 0x00108bff, 0x94200000, |
1624 0x001094ff, 0x94400000, 0x00109cff, 0x94610000, | 1674 0x001094ff, 0x94400000, 0x00109cff, 0x94610000, |
1625 0x0010a4ff, 0x94810000, 0x0010acff, 0x94a10000, | 1675 0x0010a4ff, 0x94810000, 0x0010acff, 0x94a10000, |
1626 0x0010b4ff, 0x94c20000, 0x0010bdff, 0x94e20000, | 1676 0x0010b4ff, 0x94c20000, 0x0010bdff, 0x94e20000, |
1627 0x0010c5ff, 0x9c000000, 0x0010cdff, 0x9c200000, | 1677 0x0010c5ff, 0x9c000000, 0x0010cdff, 0x9c200000, |
1628 0x0010d5ff, 0x9c400000, 0x0010deff, 0x9c610000, | 1678 0x0010d5ff, 0x9c400000, 0x0010deff, 0x9c610000, |
1629 0x0010e6ff, 0x9c810000, 0x0010eeff, 0x9ca10000, | 1679 0x0010e6ff, 0x9c810000, 0x0010eeff, 0x9ca10000, |
1630 0x0010f6ff, 0x9cc20000, 0x0010ffff, 0x9ce20000, | 1680 0x0010f6ff, 0x9cc20000, 0x0010ffff, 0x9ce20000, |
1631 0x001400ff, 0xa4000000, 0x001408ff, 0xa4200000, | 1681 0x001400ff, 0xa4000000, 0x001408ff, 0xa4200000, |
1632 0x001410ff, 0xa4400000, 0x001418ff, 0xa4610000, | 1682 0x001410ff, 0xa4400000, 0x001418ff, 0xa4610000, |
1633 0x001420ff, 0xa4810000, 0x001429ff, 0xa4a10000, | 1683 0x001420ff, 0xa4810000, 0x001429ff, 0xa4a10000, |
1634 0x001431ff, 0xa4c20000, 0x001439ff, 0xa4e20000, | 1684 0x001431ff, 0xa4c20000, 0x001439ff, 0xa4e20000, |
1635 0x001441ff, 0xac000000, 0x00144aff, 0xac200000, | 1685 0x001441ff, 0xac000000, 0x00144aff, 0xac200000, |
1636 0x001452ff, 0xac400000, 0x00145aff, 0xac610000, | 1686 0x001452ff, 0xac400000, 0x00145aff, 0xac610000, |
1637 0x001462ff, 0xac810000, 0x00146aff, 0xaca10000, | 1687 0x001462ff, 0xac810000, 0x00146aff, 0xaca10000, |
1638 0x001473ff, 0xacc20000, 0x00147bff, 0xace20000, | 1688 0x001473ff, 0xacc20000, 0x00147bff, 0xace20000, |
1639 0x001483ff, 0xb4000000, 0x00148bff, 0xb4200000, | 1689 0x001483ff, 0xb4000000, 0x00148bff, 0xb4200000, |
1640 0x001494ff, 0xb4400000, 0x00149cff, 0xb4610000, | 1690 0x001494ff, 0xb4400000, 0x00149cff, 0xb4610000, |
1641 0x0014a4ff, 0xb4810000, 0x0014acff, 0xb4a10000, | 1691 0x0014a4ff, 0xb4810000, 0x0014acff, 0xb4a10000, |
1642 0x0014b4ff, 0xb4c20000, 0x0014bdff, 0xb4e20000, | 1692 0x0014b4ff, 0xb4c20000, 0x0014bdff, 0xb4e20000, |
1643 0x0014c5ff, 0xbd000000, 0x0014cdff, 0xbd200000, | 1693 0x0014c5ff, 0xbd000000, 0x0014cdff, 0xbd200000, |
1644 0x0014d5ff, 0xbd400000, 0x0014deff, 0xbd610000, | 1694 0x0014d5ff, 0xbd400000, 0x0014deff, 0xbd610000, |
1645 0x0014e6ff, 0xbd810000, 0x0014eeff, 0xbda10000, | 1695 0x0014e6ff, 0xbd810000, 0x0014eeff, 0xbda10000, |
1646 0x0014f6ff, 0xbdc20000, 0x0014ffff, 0xbde20000, | 1696 0x0014f6ff, 0xbdc20000, 0x0014ffff, 0xbde20000, |
1647 0x001800ff, 0xc5000000, 0x001808ff, 0xc5200000, | 1697 0x001800ff, 0xc5000000, 0x001808ff, 0xc5200000, |
1648 0x001810ff, 0xc5400000, 0x001818ff, 0xc5610000, | 1698 0x001810ff, 0xc5400000, 0x001818ff, 0xc5610000, |
1649 0x001820ff, 0xc5810000, 0x001829ff, 0xc5a10000, | 1699 0x001820ff, 0xc5810000, 0x001829ff, 0xc5a10000, |
1650 0x001831ff, 0xc5c20000, 0x001839ff, 0xc5e20000, | 1700 0x001831ff, 0xc5c20000, 0x001839ff, 0xc5e20000, |
1651 0x001841ff, 0xcd000000, 0x00184aff, 0xcd200000, | 1701 0x001841ff, 0xcd000000, 0x00184aff, 0xcd200000, |
1652 0x001852ff, 0xcd400000, 0x00185aff, 0xcd610000, | 1702 0x001852ff, 0xcd400000, 0x00185aff, 0xcd610000, |
1653 0x001862ff, 0xcd810000, 0x00186aff, 0xcda10000, | 1703 0x001862ff, 0xcd810000, 0x00186aff, 0xcda10000, |
1654 0x001873ff, 0xcdc20000, 0x00187bff, 0xcde20000, | 1704 0x001873ff, 0xcdc20000, 0x00187bff, 0xcde20000, |
1655 0x001883ff, 0xd5000000, 0x00188bff, 0xd5200000, | 1705 0x001883ff, 0xd5000000, 0x00188bff, 0xd5200000, |
1656 0x001894ff, 0xd5400000, 0x00189cff, 0xd5610000, | 1706 0x001894ff, 0xd5400000, 0x00189cff, 0xd5610000, |
1657 0x0018a4ff, 0xd5810000, 0x0018acff, 0xd5a10000, | 1707 0x0018a4ff, 0xd5810000, 0x0018acff, 0xd5a10000, |
1658 0x0018b4ff, 0xd5c20000, 0x0018bdff, 0xd5e20000, | 1708 0x0018b4ff, 0xd5c20000, 0x0018bdff, 0xd5e20000, |
1659 0x0018c5ff, 0xde000000, 0x0018cdff, 0xde200000, | 1709 0x0018c5ff, 0xde000000, 0x0018cdff, 0xde200000, |
1660 0x0018d5ff, 0xde400000, 0x0018deff, 0xde610000, | 1710 0x0018d5ff, 0xde400000, 0x0018deff, 0xde610000, |
1661 0x0018e6ff, 0xde810000, 0x0018eeff, 0xdea10000, | 1711 0x0018e6ff, 0xde810000, 0x0018eeff, 0xdea10000, |
1662 0x0018f6ff, 0xdec20000, 0x0018ffff, 0xdee20000, | 1712 0x0018f6ff, 0xdec20000, 0x0018ffff, 0xdee20000, |
1663 0x001c00ff, 0xe6000000, 0x001c08ff, 0xe6200000, | 1713 0x001c00ff, 0xe6000000, 0x001c08ff, 0xe6200000, |
1664 0x001c10ff, 0xe6400000, 0x001c18ff, 0xe6610000, | 1714 0x001c10ff, 0xe6400000, 0x001c18ff, 0xe6610000, |
1665 0x001c20ff, 0xe6810000, 0x001c29ff, 0xe6a10000, | 1715 0x001c20ff, 0xe6810000, 0x001c29ff, 0xe6a10000, |
1666 0x001c31ff, 0xe6c20000, 0x001c39ff, 0xe6e20000, | 1716 0x001c31ff, 0xe6c20000, 0x001c39ff, 0xe6e20000, |
1667 0x001c41ff, 0xee000000, 0x001c4aff, 0xee200000, | 1717 0x001c41ff, 0xee000000, 0x001c4aff, 0xee200000, |
1668 0x001c52ff, 0xee400000, 0x001c5aff, 0xee610000, | 1718 0x001c52ff, 0xee400000, 0x001c5aff, 0xee610000, |
1669 0x001c62ff, 0xee810000, 0x001c6aff, 0xeea10000, | 1719 0x001c62ff, 0xee810000, 0x001c6aff, 0xeea10000, |
1670 0x001c73ff, 0xeec20000, 0x001c7bff, 0xeee20000, | 1720 0x001c73ff, 0xeec20000, 0x001c7bff, 0xeee20000, |
1671 0x001c83ff, 0xf6000000, 0x001c8bff, 0xf6200000, | 1721 0x001c83ff, 0xf6000000, 0x001c8bff, 0xf6200000, |
1672 0x001c94ff, 0xf6400000, 0x001c9cff, 0xf6610000, | 1722 0x001c94ff, 0xf6400000, 0x001c9cff, 0xf6610000, |
1673 0x001ca4ff, 0xf6810000, 0x001cacff, 0xf6a10000, | 1723 0x001ca4ff, 0xf6810000, 0x001cacff, 0xf6a10000, |
1674 0x001cb4ff, 0xf6c20000, 0x001cbdff, 0xf6e20000, | 1724 0x001cb4ff, 0xf6c20000, 0x001cbdff, 0xf6e20000, |
1675 0x001cc5ff, 0xff000000, 0x001ccdff, 0xff200000, | 1725 0x001cc5ff, 0xff000000, 0x001ccdff, 0xff200000, |
1676 0x001cd5ff, 0xff400000, 0x001cdeff, 0xff610000, | 1726 0x001cd5ff, 0xff400000, 0x001cdeff, 0xff610000, |
1677 0x001ce6ff, 0xff810000, 0x001ceeff, 0xffa10000, | 1727 0x001ce6ff, 0xff810000, 0x001ceeff, 0xffa10000, |
1678 0x001cf6ff, 0xffc20000, 0x001cffff, 0xffe20000, | 1728 0x001cf6ff, 0xffc20000, 0x001cffff, 0xffe20000, |
1679 }; | 1729 }; |
1680 static void Blit_RGB565_RGBA8888(SDL_BlitInfo *info) | 1730 static void |
1681 { | 1731 Blit_RGB565_RGBA8888 (SDL_BlitInfo * info) |
1682 Blit_RGB565_32(info, RGB565_RGBA8888_LUT); | 1732 { |
1733 Blit_RGB565_32 (info, RGB565_RGBA8888_LUT); | |
1683 } | 1734 } |
1684 | 1735 |
1685 /* Special optimized blit for RGB 5-6-5 --> BGRA 8-8-8-8 */ | 1736 /* Special optimized blit for RGB 5-6-5 --> BGRA 8-8-8-8 */ |
1686 static const Uint32 RGB565_BGRA8888_LUT[512] = { | 1737 static const Uint32 RGB565_BGRA8888_LUT[512] = { |
1687 0x00000000, 0x000000ff, 0x08000000, 0x002000ff, | 1738 0x00000000, 0x000000ff, 0x08000000, 0x002000ff, |
1688 0x10000000, 0x004000ff, 0x18000000, 0x006100ff, | 1739 0x10000000, 0x004000ff, 0x18000000, 0x006100ff, |
1689 0x20000000, 0x008100ff, 0x29000000, 0x00a100ff, | 1740 0x20000000, 0x008100ff, 0x29000000, 0x00a100ff, |
1690 0x31000000, 0x00c200ff, 0x39000000, 0x00e200ff, | 1741 0x31000000, 0x00c200ff, 0x39000000, 0x00e200ff, |
1691 0x41000000, 0x000008ff, 0x4a000000, 0x002008ff, | 1742 0x41000000, 0x000008ff, 0x4a000000, 0x002008ff, |
1692 0x52000000, 0x004008ff, 0x5a000000, 0x006108ff, | 1743 0x52000000, 0x004008ff, 0x5a000000, 0x006108ff, |
1693 0x62000000, 0x008108ff, 0x6a000000, 0x00a108ff, | 1744 0x62000000, 0x008108ff, 0x6a000000, 0x00a108ff, |
1694 0x73000000, 0x00c208ff, 0x7b000000, 0x00e208ff, | 1745 0x73000000, 0x00c208ff, 0x7b000000, 0x00e208ff, |
1695 0x83000000, 0x000010ff, 0x8b000000, 0x002010ff, | 1746 0x83000000, 0x000010ff, 0x8b000000, 0x002010ff, |
1696 0x94000000, 0x004010ff, 0x9c000000, 0x006110ff, | 1747 0x94000000, 0x004010ff, 0x9c000000, 0x006110ff, |
1697 0xa4000000, 0x008110ff, 0xac000000, 0x00a110ff, | 1748 0xa4000000, 0x008110ff, 0xac000000, 0x00a110ff, |
1698 0xb4000000, 0x00c210ff, 0xbd000000, 0x00e210ff, | 1749 0xb4000000, 0x00c210ff, 0xbd000000, 0x00e210ff, |
1699 0xc5000000, 0x000018ff, 0xcd000000, 0x002018ff, | 1750 0xc5000000, 0x000018ff, 0xcd000000, 0x002018ff, |
1700 0xd5000000, 0x004018ff, 0xde000000, 0x006118ff, | 1751 0xd5000000, 0x004018ff, 0xde000000, 0x006118ff, |
1701 0xe6000000, 0x008118ff, 0xee000000, 0x00a118ff, | 1752 0xe6000000, 0x008118ff, 0xee000000, 0x00a118ff, |
1702 0xf6000000, 0x00c218ff, 0xff000000, 0x00e218ff, | 1753 0xf6000000, 0x00c218ff, 0xff000000, 0x00e218ff, |
1703 0x00040000, 0x000020ff, 0x08040000, 0x002020ff, | 1754 0x00040000, 0x000020ff, 0x08040000, 0x002020ff, |
1704 0x10040000, 0x004020ff, 0x18040000, 0x006120ff, | 1755 0x10040000, 0x004020ff, 0x18040000, 0x006120ff, |
1705 0x20040000, 0x008120ff, 0x29040000, 0x00a120ff, | 1756 0x20040000, 0x008120ff, 0x29040000, 0x00a120ff, |
1706 0x31040000, 0x00c220ff, 0x39040000, 0x00e220ff, | 1757 0x31040000, 0x00c220ff, 0x39040000, 0x00e220ff, |
1707 0x41040000, 0x000029ff, 0x4a040000, 0x002029ff, | 1758 0x41040000, 0x000029ff, 0x4a040000, 0x002029ff, |
1708 0x52040000, 0x004029ff, 0x5a040000, 0x006129ff, | 1759 0x52040000, 0x004029ff, 0x5a040000, 0x006129ff, |
1709 0x62040000, 0x008129ff, 0x6a040000, 0x00a129ff, | 1760 0x62040000, 0x008129ff, 0x6a040000, 0x00a129ff, |
1710 0x73040000, 0x00c229ff, 0x7b040000, 0x00e229ff, | 1761 0x73040000, 0x00c229ff, 0x7b040000, 0x00e229ff, |
1711 0x83040000, 0x000031ff, 0x8b040000, 0x002031ff, | 1762 0x83040000, 0x000031ff, 0x8b040000, 0x002031ff, |
1712 0x94040000, 0x004031ff, 0x9c040000, 0x006131ff, | 1763 0x94040000, 0x004031ff, 0x9c040000, 0x006131ff, |
1713 0xa4040000, 0x008131ff, 0xac040000, 0x00a131ff, | 1764 0xa4040000, 0x008131ff, 0xac040000, 0x00a131ff, |
1714 0xb4040000, 0x00c231ff, 0xbd040000, 0x00e231ff, | 1765 0xb4040000, 0x00c231ff, 0xbd040000, 0x00e231ff, |
1715 0xc5040000, 0x000039ff, 0xcd040000, 0x002039ff, | 1766 0xc5040000, 0x000039ff, 0xcd040000, 0x002039ff, |
1716 0xd5040000, 0x004039ff, 0xde040000, 0x006139ff, | 1767 0xd5040000, 0x004039ff, 0xde040000, 0x006139ff, |
1717 0xe6040000, 0x008139ff, 0xee040000, 0x00a139ff, | 1768 0xe6040000, 0x008139ff, 0xee040000, 0x00a139ff, |
1718 0xf6040000, 0x00c239ff, 0xff040000, 0x00e239ff, | 1769 0xf6040000, 0x00c239ff, 0xff040000, 0x00e239ff, |
1719 0x00080000, 0x000041ff, 0x08080000, 0x002041ff, | 1770 0x00080000, 0x000041ff, 0x08080000, 0x002041ff, |
1720 0x10080000, 0x004041ff, 0x18080000, 0x006141ff, | 1771 0x10080000, 0x004041ff, 0x18080000, 0x006141ff, |
1721 0x20080000, 0x008141ff, 0x29080000, 0x00a141ff, | 1772 0x20080000, 0x008141ff, 0x29080000, 0x00a141ff, |
1722 0x31080000, 0x00c241ff, 0x39080000, 0x00e241ff, | 1773 0x31080000, 0x00c241ff, 0x39080000, 0x00e241ff, |
1723 0x41080000, 0x00004aff, 0x4a080000, 0x00204aff, | 1774 0x41080000, 0x00004aff, 0x4a080000, 0x00204aff, |
1724 0x52080000, 0x00404aff, 0x5a080000, 0x00614aff, | 1775 0x52080000, 0x00404aff, 0x5a080000, 0x00614aff, |
1725 0x62080000, 0x00814aff, 0x6a080000, 0x00a14aff, | 1776 0x62080000, 0x00814aff, 0x6a080000, 0x00a14aff, |
1726 0x73080000, 0x00c24aff, 0x7b080000, 0x00e24aff, | 1777 0x73080000, 0x00c24aff, 0x7b080000, 0x00e24aff, |
1727 0x83080000, 0x000052ff, 0x8b080000, 0x002052ff, | 1778 0x83080000, 0x000052ff, 0x8b080000, 0x002052ff, |
1728 0x94080000, 0x004052ff, 0x9c080000, 0x006152ff, | 1779 0x94080000, 0x004052ff, 0x9c080000, 0x006152ff, |
1729 0xa4080000, 0x008152ff, 0xac080000, 0x00a152ff, | 1780 0xa4080000, 0x008152ff, 0xac080000, 0x00a152ff, |
1730 0xb4080000, 0x00c252ff, 0xbd080000, 0x00e252ff, | 1781 0xb4080000, 0x00c252ff, 0xbd080000, 0x00e252ff, |
1731 0xc5080000, 0x00005aff, 0xcd080000, 0x00205aff, | 1782 0xc5080000, 0x00005aff, 0xcd080000, 0x00205aff, |
1732 0xd5080000, 0x00405aff, 0xde080000, 0x00615aff, | 1783 0xd5080000, 0x00405aff, 0xde080000, 0x00615aff, |
1733 0xe6080000, 0x00815aff, 0xee080000, 0x00a15aff, | 1784 0xe6080000, 0x00815aff, 0xee080000, 0x00a15aff, |
1734 0xf6080000, 0x00c25aff, 0xff080000, 0x00e25aff, | 1785 0xf6080000, 0x00c25aff, 0xff080000, 0x00e25aff, |
1735 0x000c0000, 0x000062ff, 0x080c0000, 0x002062ff, | 1786 0x000c0000, 0x000062ff, 0x080c0000, 0x002062ff, |
1736 0x100c0000, 0x004062ff, 0x180c0000, 0x006162ff, | 1787 0x100c0000, 0x004062ff, 0x180c0000, 0x006162ff, |
1737 0x200c0000, 0x008162ff, 0x290c0000, 0x00a162ff, | 1788 0x200c0000, 0x008162ff, 0x290c0000, 0x00a162ff, |
1738 0x310c0000, 0x00c262ff, 0x390c0000, 0x00e262ff, | 1789 0x310c0000, 0x00c262ff, 0x390c0000, 0x00e262ff, |
1739 0x410c0000, 0x00006aff, 0x4a0c0000, 0x00206aff, | 1790 0x410c0000, 0x00006aff, 0x4a0c0000, 0x00206aff, |
1740 0x520c0000, 0x00406aff, 0x5a0c0000, 0x00616aff, | 1791 0x520c0000, 0x00406aff, 0x5a0c0000, 0x00616aff, |
1741 0x620c0000, 0x00816aff, 0x6a0c0000, 0x00a16aff, | 1792 0x620c0000, 0x00816aff, 0x6a0c0000, 0x00a16aff, |
1742 0x730c0000, 0x00c26aff, 0x7b0c0000, 0x00e26aff, | 1793 0x730c0000, 0x00c26aff, 0x7b0c0000, 0x00e26aff, |
1743 0x830c0000, 0x000073ff, 0x8b0c0000, 0x002073ff, | 1794 0x830c0000, 0x000073ff, 0x8b0c0000, 0x002073ff, |
1744 0x940c0000, 0x004073ff, 0x9c0c0000, 0x006173ff, | 1795 0x940c0000, 0x004073ff, 0x9c0c0000, 0x006173ff, |
1745 0xa40c0000, 0x008173ff, 0xac0c0000, 0x00a173ff, | 1796 0xa40c0000, 0x008173ff, 0xac0c0000, 0x00a173ff, |
1746 0xb40c0000, 0x00c273ff, 0xbd0c0000, 0x00e273ff, | 1797 0xb40c0000, 0x00c273ff, 0xbd0c0000, 0x00e273ff, |
1747 0xc50c0000, 0x00007bff, 0xcd0c0000, 0x00207bff, | 1798 0xc50c0000, 0x00007bff, 0xcd0c0000, 0x00207bff, |
1748 0xd50c0000, 0x00407bff, 0xde0c0000, 0x00617bff, | 1799 0xd50c0000, 0x00407bff, 0xde0c0000, 0x00617bff, |
1749 0xe60c0000, 0x00817bff, 0xee0c0000, 0x00a17bff, | 1800 0xe60c0000, 0x00817bff, 0xee0c0000, 0x00a17bff, |
1750 0xf60c0000, 0x00c27bff, 0xff0c0000, 0x00e27bff, | 1801 0xf60c0000, 0x00c27bff, 0xff0c0000, 0x00e27bff, |
1751 0x00100000, 0x000083ff, 0x08100000, 0x002083ff, | 1802 0x00100000, 0x000083ff, 0x08100000, 0x002083ff, |
1752 0x10100000, 0x004083ff, 0x18100000, 0x006183ff, | 1803 0x10100000, 0x004083ff, 0x18100000, 0x006183ff, |
1753 0x20100000, 0x008183ff, 0x29100000, 0x00a183ff, | 1804 0x20100000, 0x008183ff, 0x29100000, 0x00a183ff, |
1754 0x31100000, 0x00c283ff, 0x39100000, 0x00e283ff, | 1805 0x31100000, 0x00c283ff, 0x39100000, 0x00e283ff, |
1755 0x41100000, 0x00008bff, 0x4a100000, 0x00208bff, | 1806 0x41100000, 0x00008bff, 0x4a100000, 0x00208bff, |
1756 0x52100000, 0x00408bff, 0x5a100000, 0x00618bff, | 1807 0x52100000, 0x00408bff, 0x5a100000, 0x00618bff, |
1757 0x62100000, 0x00818bff, 0x6a100000, 0x00a18bff, | 1808 0x62100000, 0x00818bff, 0x6a100000, 0x00a18bff, |
1758 0x73100000, 0x00c28bff, 0x7b100000, 0x00e28bff, | 1809 0x73100000, 0x00c28bff, 0x7b100000, 0x00e28bff, |
1759 0x83100000, 0x000094ff, 0x8b100000, 0x002094ff, | 1810 0x83100000, 0x000094ff, 0x8b100000, 0x002094ff, |
1760 0x94100000, 0x004094ff, 0x9c100000, 0x006194ff, | 1811 0x94100000, 0x004094ff, 0x9c100000, 0x006194ff, |
1761 0xa4100000, 0x008194ff, 0xac100000, 0x00a194ff, | 1812 0xa4100000, 0x008194ff, 0xac100000, 0x00a194ff, |
1762 0xb4100000, 0x00c294ff, 0xbd100000, 0x00e294ff, | 1813 0xb4100000, 0x00c294ff, 0xbd100000, 0x00e294ff, |
1763 0xc5100000, 0x00009cff, 0xcd100000, 0x00209cff, | 1814 0xc5100000, 0x00009cff, 0xcd100000, 0x00209cff, |
1764 0xd5100000, 0x00409cff, 0xde100000, 0x00619cff, | 1815 0xd5100000, 0x00409cff, 0xde100000, 0x00619cff, |
1765 0xe6100000, 0x00819cff, 0xee100000, 0x00a19cff, | 1816 0xe6100000, 0x00819cff, 0xee100000, 0x00a19cff, |
1766 0xf6100000, 0x00c29cff, 0xff100000, 0x00e29cff, | 1817 0xf6100000, 0x00c29cff, 0xff100000, 0x00e29cff, |
1767 0x00140000, 0x0000a4ff, 0x08140000, 0x0020a4ff, | 1818 0x00140000, 0x0000a4ff, 0x08140000, 0x0020a4ff, |
1768 0x10140000, 0x0040a4ff, 0x18140000, 0x0061a4ff, | 1819 0x10140000, 0x0040a4ff, 0x18140000, 0x0061a4ff, |
1769 0x20140000, 0x0081a4ff, 0x29140000, 0x00a1a4ff, | 1820 0x20140000, 0x0081a4ff, 0x29140000, 0x00a1a4ff, |
1770 0x31140000, 0x00c2a4ff, 0x39140000, 0x00e2a4ff, | 1821 0x31140000, 0x00c2a4ff, 0x39140000, 0x00e2a4ff, |
1771 0x41140000, 0x0000acff, 0x4a140000, 0x0020acff, | 1822 0x41140000, 0x0000acff, 0x4a140000, 0x0020acff, |
1772 0x52140000, 0x0040acff, 0x5a140000, 0x0061acff, | 1823 0x52140000, 0x0040acff, 0x5a140000, 0x0061acff, |
1773 0x62140000, 0x0081acff, 0x6a140000, 0x00a1acff, | 1824 0x62140000, 0x0081acff, 0x6a140000, 0x00a1acff, |
1774 0x73140000, 0x00c2acff, 0x7b140000, 0x00e2acff, | 1825 0x73140000, 0x00c2acff, 0x7b140000, 0x00e2acff, |
1775 0x83140000, 0x0000b4ff, 0x8b140000, 0x0020b4ff, | 1826 0x83140000, 0x0000b4ff, 0x8b140000, 0x0020b4ff, |
1776 0x94140000, 0x0040b4ff, 0x9c140000, 0x0061b4ff, | 1827 0x94140000, 0x0040b4ff, 0x9c140000, 0x0061b4ff, |
1777 0xa4140000, 0x0081b4ff, 0xac140000, 0x00a1b4ff, | 1828 0xa4140000, 0x0081b4ff, 0xac140000, 0x00a1b4ff, |
1778 0xb4140000, 0x00c2b4ff, 0xbd140000, 0x00e2b4ff, | 1829 0xb4140000, 0x00c2b4ff, 0xbd140000, 0x00e2b4ff, |
1779 0xc5140000, 0x0000bdff, 0xcd140000, 0x0020bdff, | 1830 0xc5140000, 0x0000bdff, 0xcd140000, 0x0020bdff, |
1780 0xd5140000, 0x0040bdff, 0xde140000, 0x0061bdff, | 1831 0xd5140000, 0x0040bdff, 0xde140000, 0x0061bdff, |
1781 0xe6140000, 0x0081bdff, 0xee140000, 0x00a1bdff, | 1832 0xe6140000, 0x0081bdff, 0xee140000, 0x00a1bdff, |
1782 0xf6140000, 0x00c2bdff, 0xff140000, 0x00e2bdff, | 1833 0xf6140000, 0x00c2bdff, 0xff140000, 0x00e2bdff, |
1783 0x00180000, 0x0000c5ff, 0x08180000, 0x0020c5ff, | 1834 0x00180000, 0x0000c5ff, 0x08180000, 0x0020c5ff, |
1784 0x10180000, 0x0040c5ff, 0x18180000, 0x0061c5ff, | 1835 0x10180000, 0x0040c5ff, 0x18180000, 0x0061c5ff, |
1785 0x20180000, 0x0081c5ff, 0x29180000, 0x00a1c5ff, | 1836 0x20180000, 0x0081c5ff, 0x29180000, 0x00a1c5ff, |
1786 0x31180000, 0x00c2c5ff, 0x39180000, 0x00e2c5ff, | 1837 0x31180000, 0x00c2c5ff, 0x39180000, 0x00e2c5ff, |
1787 0x41180000, 0x0000cdff, 0x4a180000, 0x0020cdff, | 1838 0x41180000, 0x0000cdff, 0x4a180000, 0x0020cdff, |
1788 0x52180000, 0x0040cdff, 0x5a180000, 0x0061cdff, | 1839 0x52180000, 0x0040cdff, 0x5a180000, 0x0061cdff, |
1789 0x62180000, 0x0081cdff, 0x6a180000, 0x00a1cdff, | 1840 0x62180000, 0x0081cdff, 0x6a180000, 0x00a1cdff, |
1790 0x73180000, 0x00c2cdff, 0x7b180000, 0x00e2cdff, | 1841 0x73180000, 0x00c2cdff, 0x7b180000, 0x00e2cdff, |
1791 0x83180000, 0x0000d5ff, 0x8b180000, 0x0020d5ff, | 1842 0x83180000, 0x0000d5ff, 0x8b180000, 0x0020d5ff, |
1792 0x94180000, 0x0040d5ff, 0x9c180000, 0x0061d5ff, | 1843 0x94180000, 0x0040d5ff, 0x9c180000, 0x0061d5ff, |
1793 0xa4180000, 0x0081d5ff, 0xac180000, 0x00a1d5ff, | 1844 0xa4180000, 0x0081d5ff, 0xac180000, 0x00a1d5ff, |
1794 0xb4180000, 0x00c2d5ff, 0xbd180000, 0x00e2d5ff, | 1845 0xb4180000, 0x00c2d5ff, 0xbd180000, 0x00e2d5ff, |
1795 0xc5180000, 0x0000deff, 0xcd180000, 0x0020deff, | 1846 0xc5180000, 0x0000deff, 0xcd180000, 0x0020deff, |
1796 0xd5180000, 0x0040deff, 0xde180000, 0x0061deff, | 1847 0xd5180000, 0x0040deff, 0xde180000, 0x0061deff, |
1797 0xe6180000, 0x0081deff, 0xee180000, 0x00a1deff, | 1848 0xe6180000, 0x0081deff, 0xee180000, 0x00a1deff, |
1798 0xf6180000, 0x00c2deff, 0xff180000, 0x00e2deff, | 1849 0xf6180000, 0x00c2deff, 0xff180000, 0x00e2deff, |
1799 0x001c0000, 0x0000e6ff, 0x081c0000, 0x0020e6ff, | 1850 0x001c0000, 0x0000e6ff, 0x081c0000, 0x0020e6ff, |
1800 0x101c0000, 0x0040e6ff, 0x181c0000, 0x0061e6ff, | 1851 0x101c0000, 0x0040e6ff, 0x181c0000, 0x0061e6ff, |
1801 0x201c0000, 0x0081e6ff, 0x291c0000, 0x00a1e6ff, | 1852 0x201c0000, 0x0081e6ff, 0x291c0000, 0x00a1e6ff, |
1802 0x311c0000, 0x00c2e6ff, 0x391c0000, 0x00e2e6ff, | 1853 0x311c0000, 0x00c2e6ff, 0x391c0000, 0x00e2e6ff, |
1803 0x411c0000, 0x0000eeff, 0x4a1c0000, 0x0020eeff, | 1854 0x411c0000, 0x0000eeff, 0x4a1c0000, 0x0020eeff, |
1804 0x521c0000, 0x0040eeff, 0x5a1c0000, 0x0061eeff, | 1855 0x521c0000, 0x0040eeff, 0x5a1c0000, 0x0061eeff, |
1805 0x621c0000, 0x0081eeff, 0x6a1c0000, 0x00a1eeff, | 1856 0x621c0000, 0x0081eeff, 0x6a1c0000, 0x00a1eeff, |
1806 0x731c0000, 0x00c2eeff, 0x7b1c0000, 0x00e2eeff, | 1857 0x731c0000, 0x00c2eeff, 0x7b1c0000, 0x00e2eeff, |
1807 0x831c0000, 0x0000f6ff, 0x8b1c0000, 0x0020f6ff, | 1858 0x831c0000, 0x0000f6ff, 0x8b1c0000, 0x0020f6ff, |
1808 0x941c0000, 0x0040f6ff, 0x9c1c0000, 0x0061f6ff, | 1859 0x941c0000, 0x0040f6ff, 0x9c1c0000, 0x0061f6ff, |
1809 0xa41c0000, 0x0081f6ff, 0xac1c0000, 0x00a1f6ff, | 1860 0xa41c0000, 0x0081f6ff, 0xac1c0000, 0x00a1f6ff, |
1810 0xb41c0000, 0x00c2f6ff, 0xbd1c0000, 0x00e2f6ff, | 1861 0xb41c0000, 0x00c2f6ff, 0xbd1c0000, 0x00e2f6ff, |
1811 0xc51c0000, 0x0000ffff, 0xcd1c0000, 0x0020ffff, | 1862 0xc51c0000, 0x0000ffff, 0xcd1c0000, 0x0020ffff, |
1812 0xd51c0000, 0x0040ffff, 0xde1c0000, 0x0061ffff, | 1863 0xd51c0000, 0x0040ffff, 0xde1c0000, 0x0061ffff, |
1813 0xe61c0000, 0x0081ffff, 0xee1c0000, 0x00a1ffff, | 1864 0xe61c0000, 0x0081ffff, 0xee1c0000, 0x00a1ffff, |
1814 0xf61c0000, 0x00c2ffff, 0xff1c0000, 0x00e2ffff | 1865 0xf61c0000, 0x00c2ffff, 0xff1c0000, 0x00e2ffff |
1815 }; | 1866 }; |
1816 static void Blit_RGB565_BGRA8888(SDL_BlitInfo *info) | 1867 static void |
1817 { | 1868 Blit_RGB565_BGRA8888 (SDL_BlitInfo * info) |
1818 Blit_RGB565_32(info, RGB565_BGRA8888_LUT); | 1869 { |
1870 Blit_RGB565_32 (info, RGB565_BGRA8888_LUT); | |
1819 } | 1871 } |
1820 | 1872 |
1821 /* Special optimized blit for RGB 8-8-8 --> RGB 3-3-2 */ | 1873 /* Special optimized blit for RGB 8-8-8 --> RGB 3-3-2 */ |
1822 #ifndef RGB888_RGB332 | 1874 #ifndef RGB888_RGB332 |
1823 #define RGB888_RGB332(dst, src) { \ | 1875 #define RGB888_RGB332(dst, src) { \ |
1824 dst = (((src)&0x00E00000)>>16)| \ | 1876 dst = (((src)&0x00E00000)>>16)| \ |
1825 (((src)&0x0000E000)>>11)| \ | 1877 (((src)&0x0000E000)>>11)| \ |
1826 (((src)&0x000000C0)>>6); \ | 1878 (((src)&0x000000C0)>>6); \ |
1827 } | 1879 } |
1828 #endif | 1880 #endif |
1829 static void Blit_RGB888_index8_map(SDL_BlitInfo *info) | 1881 static void |
1882 Blit_RGB888_index8_map (SDL_BlitInfo * info) | |
1830 { | 1883 { |
1831 #ifndef USE_DUFFS_LOOP | 1884 #ifndef USE_DUFFS_LOOP |
1832 int c; | 1885 int c; |
1833 #endif | 1886 #endif |
1834 int Pixel; | 1887 int Pixel; |
1835 int width, height; | 1888 int width, height; |
1836 Uint32 *src; | 1889 Uint32 *src; |
1837 const Uint8 *map; | 1890 const Uint8 *map; |
1838 Uint8 *dst; | 1891 Uint8 *dst; |
1839 int srcskip, dstskip; | 1892 int srcskip, dstskip; |
1840 | 1893 |
1841 /* Set up some basic variables */ | 1894 /* Set up some basic variables */ |
1842 width = info->d_width; | 1895 width = info->d_width; |
1843 height = info->d_height; | 1896 height = info->d_height; |
1844 src = (Uint32 *)info->s_pixels; | 1897 src = (Uint32 *) info->s_pixels; |
1845 srcskip = info->s_skip/4; | 1898 srcskip = info->s_skip / 4; |
1846 dst = info->d_pixels; | 1899 dst = info->d_pixels; |
1847 dstskip = info->d_skip; | 1900 dstskip = info->d_skip; |
1848 map = info->table; | 1901 map = info->table; |
1849 | 1902 |
1850 #ifdef USE_DUFFS_LOOP | 1903 #ifdef USE_DUFFS_LOOP |
1851 while ( height-- ) { | 1904 while (height--) { |
1905 /* *INDENT-OFF* */ | |
1852 DUFFS_LOOP( | 1906 DUFFS_LOOP( |
1853 RGB888_RGB332(Pixel, *src); | 1907 RGB888_RGB332(Pixel, *src); |
1854 *dst++ = map[Pixel]; | 1908 *dst++ = map[Pixel]; |
1855 ++src; | 1909 ++src; |
1856 , width); | 1910 , width); |
1857 src += srcskip; | 1911 /* *INDENT-ON* */ |
1858 dst += dstskip; | 1912 src += srcskip; |
1859 } | 1913 dst += dstskip; |
1914 } | |
1860 #else | 1915 #else |
1861 while ( height-- ) { | 1916 while (height--) { |
1862 for ( c=width/4; c; --c ) { | 1917 for (c = width / 4; c; --c) { |
1863 /* Pack RGB into 8bit pixel */ | 1918 /* Pack RGB into 8bit pixel */ |
1864 RGB888_RGB332(Pixel, *src); | 1919 RGB888_RGB332 (Pixel, *src); |
1865 *dst++ = map[Pixel]; | 1920 *dst++ = map[Pixel]; |
1866 ++src; | 1921 ++src; |
1867 RGB888_RGB332(Pixel, *src); | 1922 RGB888_RGB332 (Pixel, *src); |
1868 *dst++ = map[Pixel]; | 1923 *dst++ = map[Pixel]; |
1869 ++src; | 1924 ++src; |
1870 RGB888_RGB332(Pixel, *src); | 1925 RGB888_RGB332 (Pixel, *src); |
1871 *dst++ = map[Pixel]; | 1926 *dst++ = map[Pixel]; |
1872 ++src; | 1927 ++src; |
1873 RGB888_RGB332(Pixel, *src); | 1928 RGB888_RGB332 (Pixel, *src); |
1874 *dst++ = map[Pixel]; | 1929 *dst++ = map[Pixel]; |
1875 ++src; | 1930 ++src; |
1876 } | 1931 } |
1877 switch ( width & 3 ) { | 1932 switch (width & 3) { |
1878 case 3: | 1933 case 3: |
1879 RGB888_RGB332(Pixel, *src); | 1934 RGB888_RGB332 (Pixel, *src); |
1880 *dst++ = map[Pixel]; | 1935 *dst++ = map[Pixel]; |
1881 ++src; | 1936 ++src; |
1882 case 2: | 1937 case 2: |
1883 RGB888_RGB332(Pixel, *src); | 1938 RGB888_RGB332 (Pixel, *src); |
1884 *dst++ = map[Pixel]; | 1939 *dst++ = map[Pixel]; |
1885 ++src; | 1940 ++src; |
1886 case 1: | 1941 case 1: |
1887 RGB888_RGB332(Pixel, *src); | 1942 RGB888_RGB332 (Pixel, *src); |
1888 *dst++ = map[Pixel]; | 1943 *dst++ = map[Pixel]; |
1889 ++src; | 1944 ++src; |
1890 } | 1945 } |
1891 src += srcskip; | 1946 src += srcskip; |
1892 dst += dstskip; | 1947 dst += dstskip; |
1893 } | 1948 } |
1894 #endif /* USE_DUFFS_LOOP */ | 1949 #endif /* USE_DUFFS_LOOP */ |
1895 } | 1950 } |
1896 static void BlitNto1(SDL_BlitInfo *info) | 1951 static void |
1952 BlitNto1 (SDL_BlitInfo * info) | |
1897 { | 1953 { |
1898 #ifndef USE_DUFFS_LOOP | 1954 #ifndef USE_DUFFS_LOOP |
1899 int c; | 1955 int c; |
1900 #endif | 1956 #endif |
1901 int width, height; | 1957 int width, height; |
1902 Uint8 *src; | 1958 Uint8 *src; |
1903 const Uint8 *map; | 1959 const Uint8 *map; |
1904 Uint8 *dst; | 1960 Uint8 *dst; |
1905 int srcskip, dstskip; | 1961 int srcskip, dstskip; |
1906 int srcbpp; | 1962 int srcbpp; |
1907 Uint32 Pixel; | 1963 Uint32 Pixel; |
1908 int sR, sG, sB; | 1964 int sR, sG, sB; |
1909 SDL_PixelFormat *srcfmt; | 1965 SDL_PixelFormat *srcfmt; |
1910 | 1966 |
1911 /* Set up some basic variables */ | 1967 /* Set up some basic variables */ |
1912 width = info->d_width; | 1968 width = info->d_width; |
1913 height = info->d_height; | 1969 height = info->d_height; |
1914 src = info->s_pixels; | 1970 src = info->s_pixels; |
1915 srcskip = info->s_skip; | 1971 srcskip = info->s_skip; |
1916 dst = info->d_pixels; | 1972 dst = info->d_pixels; |
1917 dstskip = info->d_skip; | 1973 dstskip = info->d_skip; |
1918 map = info->table; | 1974 map = info->table; |
1919 srcfmt = info->src; | 1975 srcfmt = info->src; |
1920 srcbpp = srcfmt->BytesPerPixel; | 1976 srcbpp = srcfmt->BytesPerPixel; |
1921 | 1977 |
1922 if ( map == NULL ) { | 1978 if (map == NULL) { |
1923 while ( height-- ) { | 1979 while (height--) { |
1924 #ifdef USE_DUFFS_LOOP | 1980 #ifdef USE_DUFFS_LOOP |
1981 /* *INDENT-OFF* */ | |
1925 DUFFS_LOOP( | 1982 DUFFS_LOOP( |
1926 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, | 1983 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, |
1927 sR, sG, sB); | 1984 sR, sG, sB); |
1928 if ( 1 ) { | 1985 if ( 1 ) { |
1929 /* Pack RGB into 8bit pixel */ | 1986 /* Pack RGB into 8bit pixel */ |
1932 ((sB>>6)<<(0)) ; | 1989 ((sB>>6)<<(0)) ; |
1933 } | 1990 } |
1934 dst++; | 1991 dst++; |
1935 src += srcbpp; | 1992 src += srcbpp; |
1936 , width); | 1993 , width); |
1994 /* *INDENT-ON* */ | |
1937 #else | 1995 #else |
1938 for ( c=width; c; --c ) { | 1996 for (c = width; c; --c) { |
1939 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, | 1997 DISEMBLE_RGB (src, srcbpp, srcfmt, Pixel, sR, sG, sB); |
1940 sR, sG, sB); | 1998 if (1) { |
1941 if ( 1 ) { | 1999 /* Pack RGB into 8bit pixel */ |
1942 /* Pack RGB into 8bit pixel */ | 2000 *dst = ((sR >> 5) << (3 + 2)) | |
1943 *dst = ((sR>>5)<<(3+2))| | 2001 ((sG >> 5) << (2)) | ((sB >> 6) << (0)); |
1944 ((sG>>5)<<(2)) | | 2002 } |
1945 ((sB>>6)<<(0)) ; | 2003 dst++; |
1946 } | 2004 src += srcbpp; |
1947 dst++; | 2005 } |
1948 src += srcbpp; | |
1949 } | |
1950 #endif | 2006 #endif |
1951 src += srcskip; | 2007 src += srcskip; |
1952 dst += dstskip; | 2008 dst += dstskip; |
1953 } | 2009 } |
1954 } else { | 2010 } else { |
1955 while ( height-- ) { | 2011 while (height--) { |
1956 #ifdef USE_DUFFS_LOOP | 2012 #ifdef USE_DUFFS_LOOP |
2013 /* *INDENT-OFF* */ | |
1957 DUFFS_LOOP( | 2014 DUFFS_LOOP( |
1958 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, | 2015 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, |
1959 sR, sG, sB); | 2016 sR, sG, sB); |
1960 if ( 1 ) { | 2017 if ( 1 ) { |
1961 /* Pack RGB into 8bit pixel */ | 2018 /* Pack RGB into 8bit pixel */ |
1964 ((sB>>6)<<(0)) ]; | 2021 ((sB>>6)<<(0)) ]; |
1965 } | 2022 } |
1966 dst++; | 2023 dst++; |
1967 src += srcbpp; | 2024 src += srcbpp; |
1968 , width); | 2025 , width); |
2026 /* *INDENT-ON* */ | |
1969 #else | 2027 #else |
1970 for ( c=width; c; --c ) { | 2028 for (c = width; c; --c) { |
1971 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, | 2029 DISEMBLE_RGB (src, srcbpp, srcfmt, Pixel, sR, sG, sB); |
1972 sR, sG, sB); | 2030 if (1) { |
1973 if ( 1 ) { | 2031 /* Pack RGB into 8bit pixel */ |
1974 /* Pack RGB into 8bit pixel */ | 2032 *dst = map[((sR >> 5) << (3 + 2)) | |
1975 *dst = map[((sR>>5)<<(3+2))| | 2033 ((sG >> 5) << (2)) | ((sB >> 6) << (0))]; |
1976 ((sG>>5)<<(2)) | | 2034 } |
1977 ((sB>>6)<<(0)) ]; | 2035 dst++; |
1978 } | 2036 src += srcbpp; |
1979 dst++; | 2037 } |
1980 src += srcbpp; | |
1981 } | |
1982 #endif /* USE_DUFFS_LOOP */ | 2038 #endif /* USE_DUFFS_LOOP */ |
1983 src += srcskip; | 2039 src += srcskip; |
1984 dst += dstskip; | 2040 dst += dstskip; |
1985 } | 2041 } |
1986 } | 2042 } |
1987 } | 2043 } |
1988 | 2044 |
1989 /* blits 32 bit RGB<->RGBA with both surfaces having the same R,G,B fields */ | 2045 /* blits 32 bit RGB<->RGBA with both surfaces having the same R,G,B fields */ |
1990 static void Blit4to4MaskAlpha(SDL_BlitInfo *info) | 2046 static void |
1991 { | 2047 Blit4to4MaskAlpha (SDL_BlitInfo * info) |
1992 int width = info->d_width; | 2048 { |
1993 int height = info->d_height; | 2049 int width = info->d_width; |
1994 Uint32 *src = (Uint32 *)info->s_pixels; | 2050 int height = info->d_height; |
1995 int srcskip = info->s_skip; | 2051 Uint32 *src = (Uint32 *) info->s_pixels; |
1996 Uint32 *dst = (Uint32 *)info->d_pixels; | 2052 int srcskip = info->s_skip; |
1997 int dstskip = info->d_skip; | 2053 Uint32 *dst = (Uint32 *) info->d_pixels; |
1998 SDL_PixelFormat *srcfmt = info->src; | 2054 int dstskip = info->d_skip; |
1999 SDL_PixelFormat *dstfmt = info->dst; | 2055 SDL_PixelFormat *srcfmt = info->src; |
2000 | 2056 SDL_PixelFormat *dstfmt = info->dst; |
2001 if (dstfmt->Amask) { | 2057 |
2002 /* RGB->RGBA, SET_ALPHA */ | 2058 if (dstfmt->Amask) { |
2003 Uint32 mask = (srcfmt->alpha >> dstfmt->Aloss) << dstfmt->Ashift; | 2059 /* RGB->RGBA, SET_ALPHA */ |
2004 | 2060 Uint32 mask = (srcfmt->alpha >> dstfmt->Aloss) << dstfmt->Ashift; |
2005 while ( height-- ) { | 2061 |
2062 while (height--) { | |
2063 /* *INDENT-OFF* */ | |
2006 DUFFS_LOOP( | 2064 DUFFS_LOOP( |
2007 { | 2065 { |
2008 *dst = *src | mask; | 2066 *dst = *src | mask; |
2009 ++dst; | 2067 ++dst; |
2010 ++src; | 2068 ++src; |
2011 }, | 2069 }, |
2012 width); | 2070 width); |
2013 src = (Uint32*)((Uint8*)src + srcskip); | 2071 /* *INDENT-ON* */ |
2014 dst = (Uint32*)((Uint8*)dst + dstskip); | 2072 src = (Uint32 *) ((Uint8 *) src + srcskip); |
2015 } | 2073 dst = (Uint32 *) ((Uint8 *) dst + dstskip); |
2016 } else { | 2074 } |
2017 /* RGBA->RGB, NO_ALPHA */ | 2075 } else { |
2018 Uint32 mask = srcfmt->Rmask | srcfmt->Gmask | srcfmt->Bmask; | 2076 /* RGBA->RGB, NO_ALPHA */ |
2019 | 2077 Uint32 mask = srcfmt->Rmask | srcfmt->Gmask | srcfmt->Bmask; |
2020 while ( height-- ) { | 2078 |
2079 while (height--) { | |
2080 /* *INDENT-OFF* */ | |
2021 DUFFS_LOOP( | 2081 DUFFS_LOOP( |
2022 { | 2082 { |
2023 *dst = *src & mask; | 2083 *dst = *src & mask; |
2024 ++dst; | 2084 ++dst; |
2025 ++src; | 2085 ++src; |
2026 }, | 2086 }, |
2027 width); | 2087 width); |
2028 src = (Uint32*)((Uint8*)src + srcskip); | 2088 /* *INDENT-ON* */ |
2029 dst = (Uint32*)((Uint8*)dst + dstskip); | 2089 src = (Uint32 *) ((Uint8 *) src + srcskip); |
2030 } | 2090 dst = (Uint32 *) ((Uint8 *) dst + dstskip); |
2031 } | 2091 } |
2032 } | 2092 } |
2033 | 2093 } |
2034 static void BlitNtoN(SDL_BlitInfo *info) | 2094 |
2035 { | 2095 static void |
2036 int width = info->d_width; | 2096 BlitNtoN (SDL_BlitInfo * info) |
2037 int height = info->d_height; | 2097 { |
2038 Uint8 *src = info->s_pixels; | 2098 int width = info->d_width; |
2039 int srcskip = info->s_skip; | 2099 int height = info->d_height; |
2040 Uint8 *dst = info->d_pixels; | 2100 Uint8 *src = info->s_pixels; |
2041 int dstskip = info->d_skip; | 2101 int srcskip = info->s_skip; |
2042 SDL_PixelFormat *srcfmt = info->src; | 2102 Uint8 *dst = info->d_pixels; |
2043 int srcbpp = srcfmt->BytesPerPixel; | 2103 int dstskip = info->d_skip; |
2044 SDL_PixelFormat *dstfmt = info->dst; | 2104 SDL_PixelFormat *srcfmt = info->src; |
2045 int dstbpp = dstfmt->BytesPerPixel; | 2105 int srcbpp = srcfmt->BytesPerPixel; |
2046 unsigned alpha = dstfmt->Amask ? srcfmt->alpha : 0; | 2106 SDL_PixelFormat *dstfmt = info->dst; |
2047 | 2107 int dstbpp = dstfmt->BytesPerPixel; |
2048 while ( height-- ) { | 2108 unsigned alpha = dstfmt->Amask ? srcfmt->alpha : 0; |
2109 | |
2110 while (height--) { | |
2111 /* *INDENT-OFF* */ | |
2049 DUFFS_LOOP( | 2112 DUFFS_LOOP( |
2050 { | 2113 { |
2051 Uint32 Pixel; | 2114 Uint32 Pixel; |
2052 unsigned sR; | 2115 unsigned sR; |
2053 unsigned sG; | 2116 unsigned sG; |
2056 ASSEMBLE_RGBA(dst, dstbpp, dstfmt, sR, sG, sB, alpha); | 2119 ASSEMBLE_RGBA(dst, dstbpp, dstfmt, sR, sG, sB, alpha); |
2057 dst += dstbpp; | 2120 dst += dstbpp; |
2058 src += srcbpp; | 2121 src += srcbpp; |
2059 }, | 2122 }, |
2060 width); | 2123 width); |
2061 src += srcskip; | 2124 /* *INDENT-ON* */ |
2062 dst += dstskip; | 2125 src += srcskip; |
2063 } | 2126 dst += dstskip; |
2064 } | 2127 } |
2065 | 2128 } |
2066 static void BlitNtoNCopyAlpha(SDL_BlitInfo *info) | 2129 |
2067 { | 2130 static void |
2068 int width = info->d_width; | 2131 BlitNtoNCopyAlpha (SDL_BlitInfo * info) |
2069 int height = info->d_height; | 2132 { |
2070 Uint8 *src = info->s_pixels; | 2133 int width = info->d_width; |
2071 int srcskip = info->s_skip; | 2134 int height = info->d_height; |
2072 Uint8 *dst = info->d_pixels; | 2135 Uint8 *src = info->s_pixels; |
2073 int dstskip = info->d_skip; | 2136 int srcskip = info->s_skip; |
2074 SDL_PixelFormat *srcfmt = info->src; | 2137 Uint8 *dst = info->d_pixels; |
2075 int srcbpp = srcfmt->BytesPerPixel; | 2138 int dstskip = info->d_skip; |
2076 SDL_PixelFormat *dstfmt = info->dst; | 2139 SDL_PixelFormat *srcfmt = info->src; |
2077 int dstbpp = dstfmt->BytesPerPixel; | 2140 int srcbpp = srcfmt->BytesPerPixel; |
2078 int c; | 2141 SDL_PixelFormat *dstfmt = info->dst; |
2079 | 2142 int dstbpp = dstfmt->BytesPerPixel; |
2080 /* FIXME: should map alpha to [0..255] correctly! */ | 2143 int c; |
2081 while ( height-- ) { | 2144 |
2082 for ( c=width; c; --c ) { | 2145 /* FIXME: should map alpha to [0..255] correctly! */ |
2083 Uint32 Pixel; | 2146 while (height--) { |
2084 unsigned sR, sG, sB, sA; | 2147 for (c = width; c; --c) { |
2085 DISEMBLE_RGBA(src, srcbpp, srcfmt, Pixel, | 2148 Uint32 Pixel; |
2086 sR, sG, sB, sA); | 2149 unsigned sR, sG, sB, sA; |
2087 ASSEMBLE_RGBA(dst, dstbpp, dstfmt, | 2150 DISEMBLE_RGBA (src, srcbpp, srcfmt, Pixel, sR, sG, sB, sA); |
2088 sR, sG, sB, sA); | 2151 ASSEMBLE_RGBA (dst, dstbpp, dstfmt, sR, sG, sB, sA); |
2089 dst += dstbpp; | 2152 dst += dstbpp; |
2090 src += srcbpp; | 2153 src += srcbpp; |
2091 } | 2154 } |
2092 src += srcskip; | 2155 src += srcskip; |
2093 dst += dstskip; | 2156 dst += dstskip; |
2094 } | 2157 } |
2095 } | 2158 } |
2096 | 2159 |
2097 static void BlitNto1Key(SDL_BlitInfo *info) | 2160 static void |
2098 { | 2161 BlitNto1Key (SDL_BlitInfo * info) |
2099 int width = info->d_width; | 2162 { |
2100 int height = info->d_height; | 2163 int width = info->d_width; |
2101 Uint8 *src = info->s_pixels; | 2164 int height = info->d_height; |
2102 int srcskip = info->s_skip; | 2165 Uint8 *src = info->s_pixels; |
2103 Uint8 *dst = info->d_pixels; | 2166 int srcskip = info->s_skip; |
2104 int dstskip = info->d_skip; | 2167 Uint8 *dst = info->d_pixels; |
2105 SDL_PixelFormat *srcfmt = info->src; | 2168 int dstskip = info->d_skip; |
2106 const Uint8 *palmap = info->table; | 2169 SDL_PixelFormat *srcfmt = info->src; |
2107 Uint32 ckey = srcfmt->colorkey; | 2170 const Uint8 *palmap = info->table; |
2108 Uint32 rgbmask = ~srcfmt->Amask; | 2171 Uint32 ckey = srcfmt->colorkey; |
2109 int srcbpp; | 2172 Uint32 rgbmask = ~srcfmt->Amask; |
2110 Uint32 Pixel; | 2173 int srcbpp; |
2111 unsigned sR, sG, sB; | 2174 Uint32 Pixel; |
2112 | 2175 unsigned sR, sG, sB; |
2113 /* Set up some basic variables */ | 2176 |
2114 srcbpp = srcfmt->BytesPerPixel; | 2177 /* Set up some basic variables */ |
2115 ckey &= rgbmask; | 2178 srcbpp = srcfmt->BytesPerPixel; |
2116 | 2179 ckey &= rgbmask; |
2117 if ( palmap == NULL ) { | 2180 |
2118 while ( height-- ) { | 2181 if (palmap == NULL) { |
2182 while (height--) { | |
2183 /* *INDENT-OFF* */ | |
2119 DUFFS_LOOP( | 2184 DUFFS_LOOP( |
2120 { | 2185 { |
2121 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, | 2186 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, |
2122 sR, sG, sB); | 2187 sR, sG, sB); |
2123 if ( (Pixel & rgbmask) != ckey ) { | 2188 if ( (Pixel & rgbmask) != ckey ) { |
2128 } | 2193 } |
2129 dst++; | 2194 dst++; |
2130 src += srcbpp; | 2195 src += srcbpp; |
2131 }, | 2196 }, |
2132 width); | 2197 width); |
2133 src += srcskip; | 2198 /* *INDENT-ON* */ |
2134 dst += dstskip; | 2199 src += srcskip; |
2135 } | 2200 dst += dstskip; |
2136 } else { | 2201 } |
2137 while ( height-- ) { | 2202 } else { |
2203 while (height--) { | |
2204 /* *INDENT-OFF* */ | |
2138 DUFFS_LOOP( | 2205 DUFFS_LOOP( |
2139 { | 2206 { |
2140 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, | 2207 DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, |
2141 sR, sG, sB); | 2208 sR, sG, sB); |
2142 if ( (Pixel & rgbmask) != ckey ) { | 2209 if ( (Pixel & rgbmask) != ckey ) { |
2147 } | 2214 } |
2148 dst++; | 2215 dst++; |
2149 src += srcbpp; | 2216 src += srcbpp; |
2150 }, | 2217 }, |
2151 width); | 2218 width); |
2152 src += srcskip; | 2219 /* *INDENT-ON* */ |
2153 dst += dstskip; | 2220 src += srcskip; |
2154 } | 2221 dst += dstskip; |
2155 } | 2222 } |
2156 } | 2223 } |
2157 | 2224 } |
2158 static void Blit2to2Key(SDL_BlitInfo *info) | 2225 |
2159 { | 2226 static void |
2160 int width = info->d_width; | 2227 Blit2to2Key (SDL_BlitInfo * info) |
2161 int height = info->d_height; | 2228 { |
2162 Uint16 *srcp = (Uint16 *)info->s_pixels; | 2229 int width = info->d_width; |
2163 int srcskip = info->s_skip; | 2230 int height = info->d_height; |
2164 Uint16 *dstp = (Uint16 *)info->d_pixels; | 2231 Uint16 *srcp = (Uint16 *) info->s_pixels; |
2165 int dstskip = info->d_skip; | 2232 int srcskip = info->s_skip; |
2166 Uint32 ckey = info->src->colorkey; | 2233 Uint16 *dstp = (Uint16 *) info->d_pixels; |
2167 Uint32 rgbmask = ~info->src->Amask; | 2234 int dstskip = info->d_skip; |
2168 | 2235 Uint32 ckey = info->src->colorkey; |
2169 /* Set up some basic variables */ | 2236 Uint32 rgbmask = ~info->src->Amask; |
2170 srcskip /= 2; | 2237 |
2171 dstskip /= 2; | 2238 /* Set up some basic variables */ |
2172 ckey &= rgbmask; | 2239 srcskip /= 2; |
2173 | 2240 dstskip /= 2; |
2174 while ( height-- ) { | 2241 ckey &= rgbmask; |
2242 | |
2243 while (height--) { | |
2244 /* *INDENT-OFF* */ | |
2175 DUFFS_LOOP( | 2245 DUFFS_LOOP( |
2176 { | 2246 { |
2177 if ( (*srcp & rgbmask) != ckey ) { | 2247 if ( (*srcp & rgbmask) != ckey ) { |
2178 *dstp = *srcp; | 2248 *dstp = *srcp; |
2179 } | 2249 } |
2180 dstp++; | 2250 dstp++; |
2181 srcp++; | 2251 srcp++; |
2182 }, | 2252 }, |
2183 width); | 2253 width); |
2184 srcp += srcskip; | 2254 /* *INDENT-ON* */ |
2185 dstp += dstskip; | 2255 srcp += srcskip; |
2186 } | 2256 dstp += dstskip; |
2187 } | 2257 } |
2188 | 2258 } |
2189 static void BlitNtoNKey(SDL_BlitInfo *info) | 2259 |
2190 { | 2260 static void |
2191 int width = info->d_width; | 2261 BlitNtoNKey (SDL_BlitInfo * info) |
2192 int height = info->d_height; | 2262 { |
2193 Uint8 *src = info->s_pixels; | 2263 int width = info->d_width; |
2194 int srcskip = info->s_skip; | 2264 int height = info->d_height; |
2195 Uint8 *dst = info->d_pixels; | 2265 Uint8 *src = info->s_pixels; |
2196 int dstskip = info->d_skip; | 2266 int srcskip = info->s_skip; |
2197 Uint32 ckey = info->src->colorkey; | 2267 Uint8 *dst = info->d_pixels; |
2198 SDL_PixelFormat *srcfmt = info->src; | 2268 int dstskip = info->d_skip; |
2199 SDL_PixelFormat *dstfmt = info->dst; | 2269 Uint32 ckey = info->src->colorkey; |
2200 int srcbpp = srcfmt->BytesPerPixel; | 2270 SDL_PixelFormat *srcfmt = info->src; |
2201 int dstbpp = dstfmt->BytesPerPixel; | 2271 SDL_PixelFormat *dstfmt = info->dst; |
2202 unsigned alpha = dstfmt->Amask ? srcfmt->alpha : 0; | 2272 int srcbpp = srcfmt->BytesPerPixel; |
2203 Uint32 rgbmask = ~srcfmt->Amask; | 2273 int dstbpp = dstfmt->BytesPerPixel; |
2204 | 2274 unsigned alpha = dstfmt->Amask ? srcfmt->alpha : 0; |
2205 /* Set up some basic variables */ | 2275 Uint32 rgbmask = ~srcfmt->Amask; |
2206 ckey &= rgbmask; | 2276 |
2207 | 2277 /* Set up some basic variables */ |
2208 while ( height-- ) { | 2278 ckey &= rgbmask; |
2279 | |
2280 while (height--) { | |
2281 /* *INDENT-OFF* */ | |
2209 DUFFS_LOOP( | 2282 DUFFS_LOOP( |
2210 { | 2283 { |
2211 Uint32 Pixel; | 2284 Uint32 Pixel; |
2212 unsigned sR; | 2285 unsigned sR; |
2213 unsigned sG; | 2286 unsigned sG; |
2220 } | 2293 } |
2221 dst += dstbpp; | 2294 dst += dstbpp; |
2222 src += srcbpp; | 2295 src += srcbpp; |
2223 }, | 2296 }, |
2224 width); | 2297 width); |
2225 src += srcskip; | 2298 /* *INDENT-ON* */ |
2226 dst += dstskip; | 2299 src += srcskip; |
2227 } | 2300 dst += dstskip; |
2228 } | 2301 } |
2229 | 2302 } |
2230 static void BlitNtoNKeyCopyAlpha(SDL_BlitInfo *info) | 2303 |
2231 { | 2304 static void |
2232 int width = info->d_width; | 2305 BlitNtoNKeyCopyAlpha (SDL_BlitInfo * info) |
2233 int height = info->d_height; | 2306 { |
2234 Uint8 *src = info->s_pixels; | 2307 int width = info->d_width; |
2235 int srcskip = info->s_skip; | 2308 int height = info->d_height; |
2236 Uint8 *dst = info->d_pixels; | 2309 Uint8 *src = info->s_pixels; |
2237 int dstskip = info->d_skip; | 2310 int srcskip = info->s_skip; |
2238 Uint32 ckey = info->src->colorkey; | 2311 Uint8 *dst = info->d_pixels; |
2239 SDL_PixelFormat *srcfmt = info->src; | 2312 int dstskip = info->d_skip; |
2240 SDL_PixelFormat *dstfmt = info->dst; | 2313 Uint32 ckey = info->src->colorkey; |
2241 Uint32 rgbmask = ~srcfmt->Amask; | 2314 SDL_PixelFormat *srcfmt = info->src; |
2242 | 2315 SDL_PixelFormat *dstfmt = info->dst; |
2243 Uint8 srcbpp; | 2316 Uint32 rgbmask = ~srcfmt->Amask; |
2244 Uint8 dstbpp; | 2317 |
2245 Uint32 Pixel; | 2318 Uint8 srcbpp; |
2246 unsigned sR, sG, sB, sA; | 2319 Uint8 dstbpp; |
2247 | 2320 Uint32 Pixel; |
2248 /* Set up some basic variables */ | 2321 unsigned sR, sG, sB, sA; |
2249 srcbpp = srcfmt->BytesPerPixel; | 2322 |
2250 dstbpp = dstfmt->BytesPerPixel; | 2323 /* Set up some basic variables */ |
2251 ckey &= rgbmask; | 2324 srcbpp = srcfmt->BytesPerPixel; |
2252 | 2325 dstbpp = dstfmt->BytesPerPixel; |
2253 /* FIXME: should map alpha to [0..255] correctly! */ | 2326 ckey &= rgbmask; |
2254 while ( height-- ) { | 2327 |
2328 /* FIXME: should map alpha to [0..255] correctly! */ | |
2329 while (height--) { | |
2330 /* *INDENT-OFF* */ | |
2255 DUFFS_LOOP( | 2331 DUFFS_LOOP( |
2256 { | 2332 { |
2257 DISEMBLE_RGBA(src, srcbpp, srcfmt, Pixel, | 2333 DISEMBLE_RGBA(src, srcbpp, srcfmt, Pixel, |
2258 sR, sG, sB, sA); | 2334 sR, sG, sB, sA); |
2259 if ( (Pixel & rgbmask) != ckey ) { | 2335 if ( (Pixel & rgbmask) != ckey ) { |
2262 } | 2338 } |
2263 dst += dstbpp; | 2339 dst += dstbpp; |
2264 src += srcbpp; | 2340 src += srcbpp; |
2265 }, | 2341 }, |
2266 width); | 2342 width); |
2267 src += srcskip; | 2343 /* *INDENT-ON* */ |
2268 dst += dstskip; | 2344 src += srcskip; |
2269 } | 2345 dst += dstskip; |
2346 } | |
2270 } | 2347 } |
2271 | 2348 |
2272 /* Normal N to N optimized blitters */ | 2349 /* Normal N to N optimized blitters */ |
2273 struct blit_table { | 2350 struct blit_table |
2274 Uint32 srcR, srcG, srcB; | 2351 { |
2275 int dstbpp; | 2352 Uint32 srcR, srcG, srcB; |
2276 Uint32 dstR, dstG, dstB; | 2353 int dstbpp; |
2277 Uint32 blit_features; | 2354 Uint32 dstR, dstG, dstB; |
2278 void *aux_data; | 2355 Uint32 blit_features; |
2279 SDL_loblit blitfunc; | 2356 void *aux_data; |
2280 enum { NO_ALPHA=1, SET_ALPHA=2, COPY_ALPHA=4 } alpha; | 2357 SDL_loblit blitfunc; |
2358 enum | |
2359 { NO_ALPHA = 1, SET_ALPHA = 2, COPY_ALPHA = 4 } alpha; | |
2281 }; | 2360 }; |
2282 static const struct blit_table normal_blit_1[] = { | 2361 static const struct blit_table normal_blit_1[] = { |
2283 /* Default for 8-bit RGB source, an invalid combination */ | 2362 /* Default for 8-bit RGB source, an invalid combination */ |
2284 { 0,0,0, 0, 0,0,0, 0, NULL, NULL }, | 2363 {0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL}, |
2285 }; | 2364 }; |
2286 static const struct blit_table normal_blit_2[] = { | 2365 static const struct blit_table normal_blit_2[] = { |
2287 #if SDL_HERMES_BLITTERS | 2366 #if SDL_HERMES_BLITTERS |
2288 { 0x0000F800,0x000007E0,0x0000001F, 2, 0x0000001F,0x000007E0,0x0000F800, | 2367 {0x0000F800, 0x000007E0, 0x0000001F, 2, 0x0000001F, 0x000007E0, |
2289 0, ConvertX86p16_16BGR565, ConvertX86, NO_ALPHA }, | 2368 0x0000F800, |
2290 { 0x0000F800,0x000007E0,0x0000001F, 2, 0x00007C00,0x000003E0,0x0000001F, | 2369 0, ConvertX86p16_16BGR565, ConvertX86, NO_ALPHA}, |
2291 0, ConvertX86p16_16RGB555, ConvertX86, NO_ALPHA }, | 2370 {0x0000F800, 0x000007E0, 0x0000001F, 2, 0x00007C00, 0x000003E0, |
2292 { 0x0000F800,0x000007E0,0x0000001F, 2, 0x0000001F,0x000003E0,0x00007C00, | 2371 0x0000001F, |
2293 0, ConvertX86p16_16BGR555, ConvertX86, NO_ALPHA }, | 2372 0, ConvertX86p16_16RGB555, ConvertX86, NO_ALPHA}, |
2373 {0x0000F800, 0x000007E0, 0x0000001F, 2, 0x0000001F, 0x000003E0, | |
2374 0x00007C00, | |
2375 0, ConvertX86p16_16BGR555, ConvertX86, NO_ALPHA}, | |
2294 #elif SDL_ALTIVEC_BLITTERS | 2376 #elif SDL_ALTIVEC_BLITTERS |
2295 /* has-altivec */ | 2377 /* has-altivec */ |
2296 { 0x0000F800,0x000007E0,0x0000001F, 4, 0x00000000,0x00000000,0x00000000, | 2378 {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x00000000, 0x00000000, |
2297 2, NULL, Blit_RGB565_32Altivec, NO_ALPHA | COPY_ALPHA | SET_ALPHA }, | 2379 0x00000000, |
2298 { 0x00007C00,0x000003E0,0x0000001F, 4, 0x00000000,0x00000000,0x00000000, | 2380 2, NULL, Blit_RGB565_32Altivec, NO_ALPHA | COPY_ALPHA | SET_ALPHA}, |
2299 2, NULL, Blit_RGB555_32Altivec, NO_ALPHA | COPY_ALPHA | SET_ALPHA }, | 2381 {0x00007C00, 0x000003E0, 0x0000001F, 4, 0x00000000, 0x00000000, |
2382 0x00000000, | |
2383 2, NULL, Blit_RGB555_32Altivec, NO_ALPHA | COPY_ALPHA | SET_ALPHA}, | |
2300 #endif | 2384 #endif |
2301 { 0x0000F800,0x000007E0,0x0000001F, 4, 0x00FF0000,0x0000FF00,0x000000FF, | 2385 {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x00FF0000, 0x0000FF00, |
2302 0, NULL, Blit_RGB565_ARGB8888, SET_ALPHA }, | 2386 0x000000FF, |
2303 { 0x0000F800,0x000007E0,0x0000001F, 4, 0x000000FF,0x0000FF00,0x00FF0000, | 2387 0, NULL, Blit_RGB565_ARGB8888, SET_ALPHA}, |
2304 0, NULL, Blit_RGB565_ABGR8888, SET_ALPHA }, | 2388 {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x000000FF, 0x0000FF00, |
2305 { 0x0000F800,0x000007E0,0x0000001F, 4, 0xFF000000,0x00FF0000,0x0000FF00, | 2389 0x00FF0000, |
2306 0, NULL, Blit_RGB565_RGBA8888, SET_ALPHA }, | 2390 0, NULL, Blit_RGB565_ABGR8888, SET_ALPHA}, |
2307 { 0x0000F800,0x000007E0,0x0000001F, 4, 0x0000FF00,0x00FF0000,0xFF000000, | 2391 {0x0000F800, 0x000007E0, 0x0000001F, 4, 0xFF000000, 0x00FF0000, |
2308 0, NULL, Blit_RGB565_BGRA8888, SET_ALPHA }, | 2392 0x0000FF00, |
2393 0, NULL, Blit_RGB565_RGBA8888, SET_ALPHA}, | |
2394 {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x0000FF00, 0x00FF0000, | |
2395 0xFF000000, | |
2396 0, NULL, Blit_RGB565_BGRA8888, SET_ALPHA}, | |
2309 | 2397 |
2310 /* Default for 16-bit RGB source, used if no other blitter matches */ | 2398 /* Default for 16-bit RGB source, used if no other blitter matches */ |
2311 { 0,0,0, 0, 0,0,0, 0, NULL, BlitNtoN, 0 } | 2399 {0, 0, 0, 0, 0, 0, 0, 0, NULL, BlitNtoN, 0} |
2312 }; | 2400 }; |
2313 static const struct blit_table normal_blit_3[] = { | 2401 static const struct blit_table normal_blit_3[] = { |
2314 /* Default for 24-bit RGB source, never optimized */ | 2402 /* Default for 24-bit RGB source, never optimized */ |
2315 { 0,0,0, 0, 0,0,0, 0, NULL, BlitNtoN, 0 } | 2403 {0, 0, 0, 0, 0, 0, 0, 0, NULL, BlitNtoN, 0} |
2316 }; | 2404 }; |
2317 static const struct blit_table normal_blit_4[] = { | 2405 static const struct blit_table normal_blit_4[] = { |
2318 #if SDL_HERMES_BLITTERS | 2406 #if SDL_HERMES_BLITTERS |
2319 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000F800,0x000007E0,0x0000001F, | 2407 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000F800, 0x000007E0, |
2320 1, ConvertMMXpII32_16RGB565, ConvertMMX, NO_ALPHA }, | 2408 0x0000001F, |
2321 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000F800,0x000007E0,0x0000001F, | 2409 1, ConvertMMXpII32_16RGB565, ConvertMMX, NO_ALPHA}, |
2322 0, ConvertX86p32_16RGB565, ConvertX86, NO_ALPHA }, | 2410 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000F800, 0x000007E0, |
2323 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000001F,0x000007E0,0x0000F800, | 2411 0x0000001F, |
2324 1, ConvertMMXpII32_16BGR565, ConvertMMX, NO_ALPHA }, | 2412 0, ConvertX86p32_16RGB565, ConvertX86, NO_ALPHA}, |
2325 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000001F,0x000007E0,0x0000F800, | 2413 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000001F, 0x000007E0, |
2326 0, ConvertX86p32_16BGR565, ConvertX86, NO_ALPHA }, | 2414 0x0000F800, |
2327 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x00007C00,0x000003E0,0x0000001F, | 2415 1, ConvertMMXpII32_16BGR565, ConvertMMX, NO_ALPHA}, |
2328 1, ConvertMMXpII32_16RGB555, ConvertMMX, NO_ALPHA }, | 2416 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000001F, 0x000007E0, |
2329 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x00007C00,0x000003E0,0x0000001F, | 2417 0x0000F800, |
2330 0, ConvertX86p32_16RGB555, ConvertX86, NO_ALPHA }, | 2418 0, ConvertX86p32_16BGR565, ConvertX86, NO_ALPHA}, |
2331 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000001F,0x000003E0,0x00007C00, | 2419 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x00007C00, 0x000003E0, |
2332 1, ConvertMMXpII32_16BGR555, ConvertMMX, NO_ALPHA }, | 2420 0x0000001F, |
2333 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000001F,0x000003E0,0x00007C00, | 2421 1, ConvertMMXpII32_16RGB555, ConvertMMX, NO_ALPHA}, |
2334 0, ConvertX86p32_16BGR555, ConvertX86, NO_ALPHA }, | 2422 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x00007C00, 0x000003E0, |
2335 { 0x00FF0000,0x0000FF00,0x000000FF, 3, 0x00FF0000,0x0000FF00,0x000000FF, | 2423 0x0000001F, |
2336 0, ConvertX86p32_24RGB888, ConvertX86, NO_ALPHA }, | 2424 0, ConvertX86p32_16RGB555, ConvertX86, NO_ALPHA}, |
2337 { 0x00FF0000,0x0000FF00,0x000000FF, 3, 0x000000FF,0x0000FF00,0x00FF0000, | 2425 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000001F, 0x000003E0, |
2338 0, ConvertX86p32_24BGR888, ConvertX86, NO_ALPHA }, | 2426 0x00007C00, |
2339 { 0x00FF0000,0x0000FF00,0x000000FF, 4, 0x000000FF,0x0000FF00,0x00FF0000, | 2427 1, ConvertMMXpII32_16BGR555, ConvertMMX, NO_ALPHA}, |
2340 0, ConvertX86p32_32BGR888, ConvertX86, NO_ALPHA }, | 2428 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000001F, 0x000003E0, |
2341 { 0x00FF0000,0x0000FF00,0x000000FF, 4, 0xFF000000,0x00FF0000,0x0000FF00, | 2429 0x00007C00, |
2342 0, ConvertX86p32_32RGBA888, ConvertX86, NO_ALPHA }, | 2430 0, ConvertX86p32_16BGR555, ConvertX86, NO_ALPHA}, |
2343 { 0x00FF0000,0x0000FF00,0x000000FF, 4, 0x0000FF00,0x00FF0000,0xFF000000, | 2431 {0x00FF0000, 0x0000FF00, 0x000000FF, 3, 0x00FF0000, 0x0000FF00, |
2344 0, ConvertX86p32_32BGRA888, ConvertX86, NO_ALPHA }, | 2432 0x000000FF, |
2433 0, ConvertX86p32_24RGB888, ConvertX86, NO_ALPHA}, | |
2434 {0x00FF0000, 0x0000FF00, 0x000000FF, 3, 0x000000FF, 0x0000FF00, | |
2435 0x00FF0000, | |
2436 0, ConvertX86p32_24BGR888, ConvertX86, NO_ALPHA}, | |
2437 {0x00FF0000, 0x0000FF00, 0x000000FF, 4, 0x000000FF, 0x0000FF00, | |
2438 0x00FF0000, | |
2439 0, ConvertX86p32_32BGR888, ConvertX86, NO_ALPHA}, | |
2440 {0x00FF0000, 0x0000FF00, 0x000000FF, 4, 0xFF000000, 0x00FF0000, | |
2441 0x0000FF00, | |
2442 0, ConvertX86p32_32RGBA888, ConvertX86, NO_ALPHA}, | |
2443 {0x00FF0000, 0x0000FF00, 0x000000FF, 4, 0x0000FF00, 0x00FF0000, | |
2444 0xFF000000, | |
2445 0, ConvertX86p32_32BGRA888, ConvertX86, NO_ALPHA}, | |
2345 #else | 2446 #else |
2346 #if SDL_ALTIVEC_BLITTERS | 2447 #if SDL_ALTIVEC_BLITTERS |
2347 /* has-altivec | dont-use-prefetch */ | 2448 /* has-altivec | dont-use-prefetch */ |
2348 { 0x00000000,0x00000000,0x00000000, 4, 0x00000000,0x00000000,0x00000000, | 2449 {0x00000000, 0x00000000, 0x00000000, 4, 0x00000000, 0x00000000, |
2349 6, NULL, ConvertAltivec32to32_noprefetch, NO_ALPHA | COPY_ALPHA | SET_ALPHA }, | 2450 0x00000000, |
2451 6, NULL, ConvertAltivec32to32_noprefetch, | |
2452 NO_ALPHA | COPY_ALPHA | SET_ALPHA}, | |
2350 /* has-altivec */ | 2453 /* has-altivec */ |
2351 { 0x00000000,0x00000000,0x00000000, 4, 0x00000000,0x00000000,0x00000000, | 2454 {0x00000000, 0x00000000, 0x00000000, 4, 0x00000000, 0x00000000, |
2352 2, NULL, ConvertAltivec32to32_prefetch, NO_ALPHA | COPY_ALPHA | SET_ALPHA }, | 2455 0x00000000, |
2456 2, NULL, ConvertAltivec32to32_prefetch, | |
2457 NO_ALPHA | COPY_ALPHA | SET_ALPHA}, | |
2353 /* has-altivec */ | 2458 /* has-altivec */ |
2354 { 0x00000000,0x00000000,0x00000000, 2, 0x0000F800,0x000007E0,0x0000001F, | 2459 {0x00000000, 0x00000000, 0x00000000, 2, 0x0000F800, 0x000007E0, |
2355 2, NULL, Blit_RGB888_RGB565Altivec, NO_ALPHA }, | 2460 0x0000001F, |
2461 2, NULL, Blit_RGB888_RGB565Altivec, NO_ALPHA}, | |
2356 #endif | 2462 #endif |
2357 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x0000F800,0x000007E0,0x0000001F, | 2463 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000F800, 0x000007E0, |
2358 0, NULL, Blit_RGB888_RGB565, NO_ALPHA }, | 2464 0x0000001F, |
2359 { 0x00FF0000,0x0000FF00,0x000000FF, 2, 0x00007C00,0x000003E0,0x0000001F, | 2465 0, NULL, Blit_RGB888_RGB565, NO_ALPHA}, |
2360 0, NULL, Blit_RGB888_RGB555, NO_ALPHA }, | 2466 {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x00007C00, 0x000003E0, |
2467 0x0000001F, | |
2468 0, NULL, Blit_RGB888_RGB555, NO_ALPHA}, | |
2361 #endif | 2469 #endif |
2362 /* Default for 32-bit RGB source, used if no other blitter matches */ | 2470 /* Default for 32-bit RGB source, used if no other blitter matches */ |
2363 { 0,0,0, 0, 0,0,0, 0, NULL, BlitNtoN, 0 } | 2471 {0, 0, 0, 0, 0, 0, 0, 0, NULL, BlitNtoN, 0} |
2364 }; | 2472 }; |
2365 static const struct blit_table *normal_blit[] = { | 2473 static const struct blit_table *normal_blit[] = { |
2366 normal_blit_1, normal_blit_2, normal_blit_3, normal_blit_4 | 2474 normal_blit_1, normal_blit_2, normal_blit_3, normal_blit_4 |
2367 }; | 2475 }; |
2368 | 2476 |
2369 /* Mask matches table, or table entry is zero */ | 2477 /* Mask matches table, or table entry is zero */ |
2370 #define MASKOK(x, y) (((x) == (y)) || ((y) == 0x00000000)) | 2478 #define MASKOK(x, y) (((x) == (y)) || ((y) == 0x00000000)) |
2371 | 2479 |
2372 SDL_loblit SDL_CalculateBlitN(SDL_Surface *surface, int blit_index) | 2480 SDL_loblit |
2373 { | 2481 SDL_CalculateBlitN (SDL_Surface * surface, int blit_index) |
2374 struct private_swaccel *sdata; | 2482 { |
2375 SDL_PixelFormat *srcfmt; | 2483 struct private_swaccel *sdata; |
2376 SDL_PixelFormat *dstfmt; | 2484 SDL_PixelFormat *srcfmt; |
2377 const struct blit_table *table; | 2485 SDL_PixelFormat *dstfmt; |
2378 int which; | 2486 const struct blit_table *table; |
2379 SDL_loblit blitfun; | 2487 int which; |
2380 | 2488 SDL_loblit blitfun; |
2381 /* Set up data for choosing the blit */ | 2489 |
2382 sdata = surface->map->sw_data; | 2490 /* Set up data for choosing the blit */ |
2383 srcfmt = surface->format; | 2491 sdata = surface->map->sw_data; |
2384 dstfmt = surface->map->dst->format; | 2492 srcfmt = surface->format; |
2385 | 2493 dstfmt = surface->map->dst->format; |
2386 if ( blit_index & 2 ) { | 2494 |
2387 /* alpha or alpha+colorkey */ | 2495 if (blit_index & 2) { |
2388 return SDL_CalculateAlphaBlit(surface, blit_index); | 2496 /* alpha or alpha+colorkey */ |
2389 } | 2497 return SDL_CalculateAlphaBlit (surface, blit_index); |
2390 | 2498 } |
2391 /* We don't support destinations less than 8-bits */ | 2499 |
2392 if ( dstfmt->BitsPerPixel < 8 ) { | 2500 /* We don't support destinations less than 8-bits */ |
2393 return(NULL); | 2501 if (dstfmt->BitsPerPixel < 8) { |
2394 } | 2502 return (NULL); |
2395 | 2503 } |
2396 if(blit_index == 1) { | 2504 |
2397 /* colorkey blit: Here we don't have too many options, mostly | 2505 if (blit_index == 1) { |
2398 because RLE is the preferred fast way to deal with this. | 2506 /* colorkey blit: Here we don't have too many options, mostly |
2399 If a particular case turns out to be useful we'll add it. */ | 2507 because RLE is the preferred fast way to deal with this. |
2400 | 2508 If a particular case turns out to be useful we'll add it. */ |
2401 if(srcfmt->BytesPerPixel == 2 | 2509 |
2402 && surface->map->identity) | 2510 if (srcfmt->BytesPerPixel == 2 && surface->map->identity) |
2403 return Blit2to2Key; | 2511 return Blit2to2Key; |
2404 else if(dstfmt->BytesPerPixel == 1) | 2512 else if (dstfmt->BytesPerPixel == 1) |
2405 return BlitNto1Key; | 2513 return BlitNto1Key; |
2406 else { | 2514 else { |
2407 #if SDL_ALTIVEC_BLITTERS | 2515 #if SDL_ALTIVEC_BLITTERS |
2408 if((srcfmt->BytesPerPixel == 4) && (dstfmt->BytesPerPixel == 4) && SDL_HasAltiVec()) { | 2516 if ((srcfmt->BytesPerPixel == 4) && (dstfmt->BytesPerPixel == 4) |
2409 return Blit32to32KeyAltivec; | 2517 && SDL_HasAltiVec ()) { |
2410 } else | 2518 return Blit32to32KeyAltivec; |
2519 } else | |
2411 #endif | 2520 #endif |
2412 | 2521 |
2413 if(srcfmt->Amask && dstfmt->Amask) | 2522 if (srcfmt->Amask && dstfmt->Amask) |
2414 return BlitNtoNKeyCopyAlpha; | 2523 return BlitNtoNKeyCopyAlpha; |
2415 else | 2524 else |
2416 return BlitNtoNKey; | 2525 return BlitNtoNKey; |
2417 } | 2526 } |
2418 } | 2527 } |
2419 | 2528 |
2420 blitfun = NULL; | 2529 blitfun = NULL; |
2421 if ( dstfmt->BitsPerPixel == 8 ) { | 2530 if (dstfmt->BitsPerPixel == 8) { |
2422 /* We assume 8-bit destinations are palettized */ | 2531 /* We assume 8-bit destinations are palettized */ |
2423 if ( (srcfmt->BytesPerPixel == 4) && | 2532 if ((srcfmt->BytesPerPixel == 4) && |
2424 (srcfmt->Rmask == 0x00FF0000) && | 2533 (srcfmt->Rmask == 0x00FF0000) && |
2425 (srcfmt->Gmask == 0x0000FF00) && | 2534 (srcfmt->Gmask == 0x0000FF00) && (srcfmt->Bmask == 0x000000FF)) { |
2426 (srcfmt->Bmask == 0x000000FF) ) { | 2535 if (surface->map->table) { |
2427 if ( surface->map->table ) { | 2536 blitfun = Blit_RGB888_index8_map; |
2428 blitfun = Blit_RGB888_index8_map; | 2537 } else { |
2429 } else { | |
2430 #if SDL_HERMES_BLITTERS | 2538 #if SDL_HERMES_BLITTERS |
2431 sdata->aux_data = ConvertX86p32_8RGB332; | 2539 sdata->aux_data = ConvertX86p32_8RGB332; |
2432 blitfun = ConvertX86; | 2540 blitfun = ConvertX86; |
2433 #else | 2541 #else |
2434 blitfun = Blit_RGB888_index8; | 2542 blitfun = Blit_RGB888_index8; |
2435 #endif | 2543 #endif |
2436 } | 2544 } |
2437 } else { | 2545 } else { |
2438 blitfun = BlitNto1; | 2546 blitfun = BlitNto1; |
2439 } | 2547 } |
2440 } else { | 2548 } else { |
2441 /* Now the meat, choose the blitter we want */ | 2549 /* Now the meat, choose the blitter we want */ |
2442 int a_need = NO_ALPHA; | 2550 int a_need = NO_ALPHA; |
2443 if(dstfmt->Amask) | 2551 if (dstfmt->Amask) |
2444 a_need = srcfmt->Amask ? COPY_ALPHA : SET_ALPHA; | 2552 a_need = srcfmt->Amask ? COPY_ALPHA : SET_ALPHA; |
2445 table = normal_blit[srcfmt->BytesPerPixel-1]; | 2553 table = normal_blit[srcfmt->BytesPerPixel - 1]; |
2446 for ( which=0; table[which].dstbpp; ++which ) { | 2554 for (which = 0; table[which].dstbpp; ++which) { |
2447 if ( MASKOK(srcfmt->Rmask, table[which].srcR) && | 2555 if (MASKOK (srcfmt->Rmask, table[which].srcR) && |
2448 MASKOK(srcfmt->Gmask, table[which].srcG) && | 2556 MASKOK (srcfmt->Gmask, table[which].srcG) && |
2449 MASKOK(srcfmt->Bmask, table[which].srcB) && | 2557 MASKOK (srcfmt->Bmask, table[which].srcB) && |
2450 MASKOK(dstfmt->Rmask, table[which].dstR) && | 2558 MASKOK (dstfmt->Rmask, table[which].dstR) && |
2451 MASKOK(dstfmt->Gmask, table[which].dstG) && | 2559 MASKOK (dstfmt->Gmask, table[which].dstG) && |
2452 MASKOK(dstfmt->Bmask, table[which].dstB) && | 2560 MASKOK (dstfmt->Bmask, table[which].dstB) && |
2453 dstfmt->BytesPerPixel == table[which].dstbpp && | 2561 dstfmt->BytesPerPixel == table[which].dstbpp && |
2454 (a_need & table[which].alpha) == a_need && | 2562 (a_need & table[which].alpha) == a_need && |
2455 ((table[which].blit_features & GetBlitFeatures()) == table[which].blit_features) ) | 2563 ((table[which].blit_features & GetBlitFeatures ()) == |
2456 break; | 2564 table[which].blit_features)) |
2457 } | 2565 break; |
2458 sdata->aux_data = table[which].aux_data; | 2566 } |
2459 blitfun = table[which].blitfunc; | 2567 sdata->aux_data = table[which].aux_data; |
2460 | 2568 blitfun = table[which].blitfunc; |
2461 if(blitfun == BlitNtoN) { /* default C fallback catch-all. Slow! */ | 2569 |
2462 /* Fastpath C fallback: 32bit RGB<->RGBA blit with matching RGB */ | 2570 if (blitfun == BlitNtoN) { /* default C fallback catch-all. Slow! */ |
2463 if ( srcfmt->BytesPerPixel == 4 && dstfmt->BytesPerPixel == 4 && | 2571 /* Fastpath C fallback: 32bit RGB<->RGBA blit with matching RGB */ |
2464 srcfmt->Rmask == dstfmt->Rmask && | 2572 if (srcfmt->BytesPerPixel == 4 && dstfmt->BytesPerPixel == 4 && |
2465 srcfmt->Gmask == dstfmt->Gmask && | 2573 srcfmt->Rmask == dstfmt->Rmask && |
2466 srcfmt->Bmask == dstfmt->Bmask ) { | 2574 srcfmt->Gmask == dstfmt->Gmask && |
2467 blitfun = Blit4to4MaskAlpha; | 2575 srcfmt->Bmask == dstfmt->Bmask) { |
2468 } else if ( a_need == COPY_ALPHA ) { | 2576 blitfun = Blit4to4MaskAlpha; |
2469 blitfun = BlitNtoNCopyAlpha; | 2577 } else if (a_need == COPY_ALPHA) { |
2470 } | 2578 blitfun = BlitNtoNCopyAlpha; |
2471 } | 2579 } |
2472 } | 2580 } |
2581 } | |
2473 | 2582 |
2474 #ifdef DEBUG_ASM | 2583 #ifdef DEBUG_ASM |
2475 #if SDL_HERMES_BLITTERS | 2584 #if SDL_HERMES_BLITTERS |
2476 if ( blitfun == ConvertMMX ) | 2585 if (blitfun == ConvertMMX) |
2477 fprintf(stderr, "Using mmx blit\n"); | 2586 fprintf (stderr, "Using mmx blit\n"); |
2478 else | 2587 else if (blitfun == ConvertX86) |
2479 if ( blitfun == ConvertX86 ) | 2588 fprintf (stderr, "Using asm blit\n"); |
2480 fprintf(stderr, "Using asm blit\n"); | 2589 else |
2481 else | |
2482 #endif | 2590 #endif |
2483 if ( (blitfun == BlitNtoN) || (blitfun == BlitNto1) ) | 2591 if ((blitfun == BlitNtoN) || (blitfun == BlitNto1)) |
2484 fprintf(stderr, "Using C blit\n"); | 2592 fprintf (stderr, "Using C blit\n"); |
2485 else | 2593 else |
2486 fprintf(stderr, "Using optimized C blit\n"); | 2594 fprintf (stderr, "Using optimized C blit\n"); |
2487 #endif /* DEBUG_ASM */ | 2595 #endif /* DEBUG_ASM */ |
2488 | 2596 |
2489 return(blitfun); | 2597 return (blitfun); |
2490 } | 2598 } |
2599 | |
2600 /* vi: set ts=4 sw=4 expandtab: */ |