comparison src/video/SDL_blit_copy.c @ 2248:5cd2a2293cf0

Okay, I figured out the intrinsics for SIMD memcpy
author Sam Lantinga <slouken@libsdl.org>
date Thu, 16 Aug 2007 02:14:13 +0000
parents 93994f65c74c
children 5a58b57b6724
comparison
equal deleted inserted replaced
2247:93994f65c74c 2248:5cd2a2293cf0
22 #include "SDL_config.h" 22 #include "SDL_config.h"
23 23
24 #include "SDL_video.h" 24 #include "SDL_video.h"
25 #include "SDL_blit.h" 25 #include "SDL_blit.h"
26 26
27 /* The MMX/SSE intrinsics don't give access to specific registers for 27 #ifdef __MMX__
28 the most memory parallelism, so we'll use GCC inline assembly here... 28 #include <mmintrin.h>
29 */ 29 #endif
30 #ifndef __GNUC__ 30 #ifdef __SSE__
31 #undef __MMX__ 31 #include <xmmintrin.h>
32 #undef __SSE__
33 #endif 32 #endif
34 33
35 #ifdef __MMX__ 34 #ifdef __MMX__
36 static __inline__ void 35 static __inline__ void
37 SDL_memcpyMMX(Uint8 *dst, const Uint8 *src, int len) 36 SDL_memcpyMMX(Uint8 * dst, const Uint8 * src, int len)
38 { 37 {
39 int i; 38 int i;
40 39
40 __m64 values[8];
41 for (i = len / 64; i--;) { 41 for (i = len / 64; i--;) {
42 __asm__ __volatile__ ( 42 _mm_prefetch(src, _MM_HINT_NTA);
43 "prefetchnta (%0)\n" 43 values[0] = *(__m64 *) (src + 0);
44 "movq (%0), %%mm0\n" 44 values[1] = *(__m64 *) (src + 8);
45 "movq 8(%0), %%mm1\n" 45 values[2] = *(__m64 *) (src + 16);
46 "movq 16(%0), %%mm2\n" 46 values[3] = *(__m64 *) (src + 24);
47 "movq 24(%0), %%mm3\n" 47 values[4] = *(__m64 *) (src + 32);
48 "movq 32(%0), %%mm4\n" 48 values[5] = *(__m64 *) (src + 40);
49 "movq 40(%0), %%mm5\n" 49 values[6] = *(__m64 *) (src + 48);
50 "movq 48(%0), %%mm6\n" 50 values[7] = *(__m64 *) (src + 56);
51 "movq 56(%0), %%mm7\n" 51 _mm_stream_pi((__m64 *) (dst + 0), values[0]);
52 "movntq %%mm0, (%1)\n" 52 _mm_stream_pi((__m64 *) (dst + 8), values[1]);
53 "movntq %%mm1, 8(%1)\n" 53 _mm_stream_pi((__m64 *) (dst + 16), values[2]);
54 "movntq %%mm2, 16(%1)\n" 54 _mm_stream_pi((__m64 *) (dst + 24), values[3]);
55 "movntq %%mm3, 24(%1)\n" 55 _mm_stream_pi((__m64 *) (dst + 32), values[4]);
56 "movntq %%mm4, 32(%1)\n" 56 _mm_stream_pi((__m64 *) (dst + 40), values[5]);
57 "movntq %%mm5, 40(%1)\n" 57 _mm_stream_pi((__m64 *) (dst + 48), values[6]);
58 "movntq %%mm6, 48(%1)\n" 58 _mm_stream_pi((__m64 *) (dst + 56), values[7]);
59 "movntq %%mm7, 56(%1)\n"
60 :: "r" (src), "r" (dst) : "memory");
61 src += 64; 59 src += 64;
62 dst += 64; 60 dst += 64;
63 } 61 }
62
64 if (len & 63) 63 if (len & 63)
65 SDL_memcpy(dst, src, len & 63); 64 SDL_memcpy(dst, src, len & 63);
66 } 65 }
67 #endif /* __MMX__ */ 66 #endif /* __MMX__ */
68 67
69 #ifdef __SSE__ 68 #ifdef __SSE__
70 static __inline__ void 69 static __inline__ void
71 SDL_memcpySSE(Uint8 *dst, const Uint8 *src, int len) 70 SDL_memcpySSE(Uint8 * dst, const Uint8 * src, int len)
72 { 71 {
73 int i; 72 int i;
74 73
74 __m128 values[4];
75 for (i = len / 64; i--;) { 75 for (i = len / 64; i--;) {
76 __asm__ __volatile__ ( 76 _mm_prefetch(src, _MM_HINT_NTA);
77 "prefetchnta (%0)\n" 77 values[0] = *(__m128 *) (src + 0);
78 "movaps (%0), %%xmm0\n" 78 values[1] = *(__m128 *) (src + 16);
79 "movaps 16(%0), %%xmm1\n" 79 values[2] = *(__m128 *) (src + 32);
80 "movaps 32(%0), %%xmm2\n" 80 values[3] = *(__m128 *) (src + 48);
81 "movaps 48(%0), %%xmm3\n" 81 _mm_stream_ps((float *) (dst + 0), values[0]);
82 "movntps %%xmm0, (%1)\n" 82 _mm_stream_ps((float *) (dst + 16), values[1]);
83 "movntps %%xmm1, 16(%1)\n" 83 _mm_stream_ps((float *) (dst + 32), values[2]);
84 "movntps %%xmm2, 32(%1)\n" 84 _mm_stream_ps((float *) (dst + 48), values[3]);
85 "movntps %%xmm3, 48(%1)\n"
86 :: "r" (src), "r" (dst) : "memory");
87 src += 64; 85 src += 64;
88 dst += 64; 86 dst += 64;
89 } 87 }
88
90 if (len & 63) 89 if (len & 63)
91 SDL_memcpy(dst, src, len & 63); 90 SDL_memcpy(dst, src, len & 63);
92 } 91 }
93 #endif /* __SSE__ */ 92 #endif /* __SSE__ */
94 93
105 dst = info->d_pixels; 104 dst = info->d_pixels;
106 srcskip = w + info->s_skip; 105 srcskip = w + info->s_skip;
107 dstskip = w + info->d_skip; 106 dstskip = w + info->d_skip;
108 107
109 #ifdef __SSE__ 108 #ifdef __SSE__
110 if (SDL_HasSSE() && !((uintptr_t)src & 15) && !((uintptr_t)dst & 15)) { 109 if (SDL_HasSSE() && !((uintptr_t) src & 15) && !((uintptr_t) dst & 15)) {
111 while (h--) { 110 while (h--) {
112 SDL_memcpySSE(dst, src, w); 111 SDL_memcpySSE(dst, src, w);
113 src += srcskip; 112 src += srcskip;
114 dst += dstskip; 113 dst += dstskip;
115 } 114 }
116 return; 115 return;
117 } 116 }
118 #endif 117 #endif
119 118
120 #ifdef __MMX__ 119 #ifdef __MMX__
121 if (SDL_HasMMX() && !((uintptr_t)src & 7) && !((uintptr_t)dst & 7)) { 120 if (SDL_HasMMX() && !((uintptr_t) src & 7) && !((uintptr_t) dst & 7)) {
122 while (h--) { 121 while (h--) {
123 SDL_memcpyMMX(dst, src, w); 122 SDL_memcpyMMX(dst, src, w);
124 src += srcskip; 123 src += srcskip;
125 dst += dstskip; 124 dst += dstskip;
126 } 125 }
127 __asm__ __volatile__(" emms\n"::); 126 _mm_empty();
128 return; 127 return;
129 } 128 }
130 #endif 129 #endif
131 130
132 while (h--) { 131 while (h--) {
146 w = info->d_width * info->dst->BytesPerPixel; 145 w = info->d_width * info->dst->BytesPerPixel;
147 h = info->d_height; 146 h = info->d_height;
148 src = info->s_pixels; 147 src = info->s_pixels;
149 dst = info->d_pixels; 148 dst = info->d_pixels;
150 skip = w + info->s_skip; 149 skip = w + info->s_skip;
151 if ((dst < src) || (dst >= (src + h*skip))) { 150 if ((dst < src) || (dst >= (src + h * skip))) {
152 SDL_BlitCopy(info); 151 SDL_BlitCopy(info);
153 } else { 152 } else {
154 src += ((h - 1) * skip); 153 src += ((h - 1) * skip);
155 dst += ((h - 1) * skip); 154 dst += ((h - 1) * skip);
156 while (h--) { 155 while (h--) {