1 | /* |
---|
2 | * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt |
---|
3 | * |
---|
4 | * This file is part of FFmpeg. |
---|
5 | * |
---|
6 | * FFmpeg is free software; you can redistribute it and/or |
---|
7 | * modify it under the terms of the GNU Lesser General Public |
---|
8 | * License as published by the Free Software Foundation; either |
---|
9 | * version 2.1 of the License, or (at your option) any later version. |
---|
10 | * |
---|
11 | * FFmpeg is distributed in the hope that it will be useful, |
---|
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
14 | * Lesser General Public License for more details. |
---|
15 | * |
---|
16 | * You should have received a copy of the GNU Lesser General Public |
---|
17 | * License along with FFmpeg; if not, write to the Free Software |
---|
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
---|
19 | */ |
---|
20 | |
---|
21 | #include "dsputil_mmx.h" |
---|
22 | |
---|
23 | DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL; |
---|
24 | DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL; |
---|
25 | |
---|
26 | /***********************************/ |
---|
27 | /* IDCT */ |
---|
28 | |
---|
29 | #define SUMSUB_BADC( a, b, c, d ) \ |
---|
30 | "paddw "#b", "#a" \n\t"\ |
---|
31 | "paddw "#d", "#c" \n\t"\ |
---|
32 | "paddw "#b", "#b" \n\t"\ |
---|
33 | "paddw "#d", "#d" \n\t"\ |
---|
34 | "psubw "#a", "#b" \n\t"\ |
---|
35 | "psubw "#c", "#d" \n\t" |
---|
36 | |
---|
37 | #define SUMSUBD2_AB( a, b, t ) \ |
---|
38 | "movq "#b", "#t" \n\t"\ |
---|
39 | "psraw $1 , "#b" \n\t"\ |
---|
40 | "paddw "#a", "#b" \n\t"\ |
---|
41 | "psraw $1 , "#a" \n\t"\ |
---|
42 | "psubw "#t", "#a" \n\t" |
---|
43 | |
---|
44 | #define IDCT4_1D( s02, s13, d02, d13, t ) \ |
---|
45 | SUMSUB_BA ( s02, d02 )\ |
---|
46 | SUMSUBD2_AB( s13, d13, t )\ |
---|
47 | SUMSUB_BADC( d13, s02, s13, d02 ) |
---|
48 | |
---|
49 | #define STORE_DIFF_4P( p, t, z ) \ |
---|
50 | "psraw $6, "#p" \n\t"\ |
---|
51 | "movd (%0), "#t" \n\t"\ |
---|
52 | "punpcklbw "#z", "#t" \n\t"\ |
---|
53 | "paddsw "#t", "#p" \n\t"\ |
---|
54 | "packuswb "#z", "#p" \n\t"\ |
---|
55 | "movd "#p", (%0) \n\t" |
---|
56 | |
---|
57 | static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride) |
---|
58 | { |
---|
59 | /* Load dct coeffs */ |
---|
60 | __asm__ volatile( |
---|
61 | "movq (%0), %%mm0 \n\t" |
---|
62 | "movq 8(%0), %%mm1 \n\t" |
---|
63 | "movq 16(%0), %%mm2 \n\t" |
---|
64 | "movq 24(%0), %%mm3 \n\t" |
---|
65 | :: "r"(block) ); |
---|
66 | |
---|
67 | __asm__ volatile( |
---|
68 | /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ |
---|
69 | IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) |
---|
70 | |
---|
71 | "movq %0, %%mm6 \n\t" |
---|
72 | /* in: 1,4,0,2 out: 1,2,3,0 */ |
---|
73 | TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) |
---|
74 | |
---|
75 | "paddw %%mm6, %%mm3 \n\t" |
---|
76 | |
---|
77 | /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ |
---|
78 | IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) |
---|
79 | |
---|
80 | "pxor %%mm7, %%mm7 \n\t" |
---|
81 | :: "m"(ff_pw_32)); |
---|
82 | |
---|
83 | __asm__ volatile( |
---|
84 | STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) |
---|
85 | "add %1, %0 \n\t" |
---|
86 | STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) |
---|
87 | "add %1, %0 \n\t" |
---|
88 | STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) |
---|
89 | "add %1, %0 \n\t" |
---|
90 | STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) |
---|
91 | : "+r"(dst) |
---|
92 | : "r" ((x86_reg)stride) |
---|
93 | ); |
---|
94 | } |
---|
95 | |
---|
96 | static inline void h264_idct8_1d(int16_t *block) |
---|
97 | { |
---|
98 | __asm__ volatile( |
---|
99 | "movq 112(%0), %%mm7 \n\t" |
---|
100 | "movq 80(%0), %%mm0 \n\t" |
---|
101 | "movq 48(%0), %%mm3 \n\t" |
---|
102 | "movq 16(%0), %%mm5 \n\t" |
---|
103 | |
---|
104 | "movq %%mm0, %%mm4 \n\t" |
---|
105 | "movq %%mm5, %%mm1 \n\t" |
---|
106 | "psraw $1, %%mm4 \n\t" |
---|
107 | "psraw $1, %%mm1 \n\t" |
---|
108 | "paddw %%mm0, %%mm4 \n\t" |
---|
109 | "paddw %%mm5, %%mm1 \n\t" |
---|
110 | "paddw %%mm7, %%mm4 \n\t" |
---|
111 | "paddw %%mm0, %%mm1 \n\t" |
---|
112 | "psubw %%mm5, %%mm4 \n\t" |
---|
113 | "paddw %%mm3, %%mm1 \n\t" |
---|
114 | |
---|
115 | "psubw %%mm3, %%mm5 \n\t" |
---|
116 | "psubw %%mm3, %%mm0 \n\t" |
---|
117 | "paddw %%mm7, %%mm5 \n\t" |
---|
118 | "psubw %%mm7, %%mm0 \n\t" |
---|
119 | "psraw $1, %%mm3 \n\t" |
---|
120 | "psraw $1, %%mm7 \n\t" |
---|
121 | "psubw %%mm3, %%mm5 \n\t" |
---|
122 | "psubw %%mm7, %%mm0 \n\t" |
---|
123 | |
---|
124 | "movq %%mm4, %%mm3 \n\t" |
---|
125 | "movq %%mm1, %%mm7 \n\t" |
---|
126 | "psraw $2, %%mm1 \n\t" |
---|
127 | "psraw $2, %%mm3 \n\t" |
---|
128 | "paddw %%mm5, %%mm3 \n\t" |
---|
129 | "psraw $2, %%mm5 \n\t" |
---|
130 | "paddw %%mm0, %%mm1 \n\t" |
---|
131 | "psraw $2, %%mm0 \n\t" |
---|
132 | "psubw %%mm4, %%mm5 \n\t" |
---|
133 | "psubw %%mm0, %%mm7 \n\t" |
---|
134 | |
---|
135 | "movq 32(%0), %%mm2 \n\t" |
---|
136 | "movq 96(%0), %%mm6 \n\t" |
---|
137 | "movq %%mm2, %%mm4 \n\t" |
---|
138 | "movq %%mm6, %%mm0 \n\t" |
---|
139 | "psraw $1, %%mm4 \n\t" |
---|
140 | "psraw $1, %%mm6 \n\t" |
---|
141 | "psubw %%mm0, %%mm4 \n\t" |
---|
142 | "paddw %%mm2, %%mm6 \n\t" |
---|
143 | |
---|
144 | "movq (%0), %%mm2 \n\t" |
---|
145 | "movq 64(%0), %%mm0 \n\t" |
---|
146 | SUMSUB_BA( %%mm0, %%mm2 ) |
---|
147 | SUMSUB_BA( %%mm6, %%mm0 ) |
---|
148 | SUMSUB_BA( %%mm4, %%mm2 ) |
---|
149 | SUMSUB_BA( %%mm7, %%mm6 ) |
---|
150 | SUMSUB_BA( %%mm5, %%mm4 ) |
---|
151 | SUMSUB_BA( %%mm3, %%mm2 ) |
---|
152 | SUMSUB_BA( %%mm1, %%mm0 ) |
---|
153 | :: "r"(block) |
---|
154 | ); |
---|
155 | } |
---|
156 | |
---|
157 | static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) |
---|
158 | { |
---|
159 | int i; |
---|
160 | DECLARE_ALIGNED(8, int16_t, b2)[64]; |
---|
161 | |
---|
162 | block[0] += 32; |
---|
163 | |
---|
164 | for(i=0; i<2; i++){ |
---|
165 | DECLARE_ALIGNED(8, uint64_t, tmp); |
---|
166 | |
---|
167 | h264_idct8_1d(block+4*i); |
---|
168 | |
---|
169 | __asm__ volatile( |
---|
170 | "movq %%mm7, %0 \n\t" |
---|
171 | TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) |
---|
172 | "movq %%mm0, 8(%1) \n\t" |
---|
173 | "movq %%mm6, 24(%1) \n\t" |
---|
174 | "movq %%mm7, 40(%1) \n\t" |
---|
175 | "movq %%mm4, 56(%1) \n\t" |
---|
176 | "movq %0, %%mm7 \n\t" |
---|
177 | TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) |
---|
178 | "movq %%mm7, (%1) \n\t" |
---|
179 | "movq %%mm1, 16(%1) \n\t" |
---|
180 | "movq %%mm0, 32(%1) \n\t" |
---|
181 | "movq %%mm3, 48(%1) \n\t" |
---|
182 | : "=m"(tmp) |
---|
183 | : "r"(b2+32*i) |
---|
184 | : "memory" |
---|
185 | ); |
---|
186 | } |
---|
187 | |
---|
188 | for(i=0; i<2; i++){ |
---|
189 | h264_idct8_1d(b2+4*i); |
---|
190 | |
---|
191 | __asm__ volatile( |
---|
192 | "psraw $6, %%mm7 \n\t" |
---|
193 | "psraw $6, %%mm6 \n\t" |
---|
194 | "psraw $6, %%mm5 \n\t" |
---|
195 | "psraw $6, %%mm4 \n\t" |
---|
196 | "psraw $6, %%mm3 \n\t" |
---|
197 | "psraw $6, %%mm2 \n\t" |
---|
198 | "psraw $6, %%mm1 \n\t" |
---|
199 | "psraw $6, %%mm0 \n\t" |
---|
200 | |
---|
201 | "movq %%mm7, (%0) \n\t" |
---|
202 | "movq %%mm5, 16(%0) \n\t" |
---|
203 | "movq %%mm3, 32(%0) \n\t" |
---|
204 | "movq %%mm1, 48(%0) \n\t" |
---|
205 | "movq %%mm0, 64(%0) \n\t" |
---|
206 | "movq %%mm2, 80(%0) \n\t" |
---|
207 | "movq %%mm4, 96(%0) \n\t" |
---|
208 | "movq %%mm6, 112(%0) \n\t" |
---|
209 | :: "r"(b2+4*i) |
---|
210 | : "memory" |
---|
211 | ); |
---|
212 | } |
---|
213 | |
---|
214 | add_pixels_clamped_mmx(b2, dst, stride); |
---|
215 | } |
---|
216 | |
---|
217 | #define STORE_DIFF_8P( p, d, t, z )\ |
---|
218 | "movq "#d", "#t" \n"\ |
---|
219 | "psraw $6, "#p" \n"\ |
---|
220 | "punpcklbw "#z", "#t" \n"\ |
---|
221 | "paddsw "#t", "#p" \n"\ |
---|
222 | "packuswb "#p", "#p" \n"\ |
---|
223 | "movq "#p", "#d" \n" |
---|
224 | |
---|
225 | #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\ |
---|
226 | "movdqa "#c", "#a" \n"\ |
---|
227 | "movdqa "#g", "#e" \n"\ |
---|
228 | "psraw $1, "#c" \n"\ |
---|
229 | "psraw $1, "#g" \n"\ |
---|
230 | "psubw "#e", "#c" \n"\ |
---|
231 | "paddw "#a", "#g" \n"\ |
---|
232 | "movdqa "#b", "#e" \n"\ |
---|
233 | "psraw $1, "#e" \n"\ |
---|
234 | "paddw "#b", "#e" \n"\ |
---|
235 | "paddw "#d", "#e" \n"\ |
---|
236 | "paddw "#f", "#e" \n"\ |
---|
237 | "movdqa "#f", "#a" \n"\ |
---|
238 | "psraw $1, "#a" \n"\ |
---|
239 | "paddw "#f", "#a" \n"\ |
---|
240 | "paddw "#h", "#a" \n"\ |
---|
241 | "psubw "#b", "#a" \n"\ |
---|
242 | "psubw "#d", "#b" \n"\ |
---|
243 | "psubw "#d", "#f" \n"\ |
---|
244 | "paddw "#h", "#b" \n"\ |
---|
245 | "psubw "#h", "#f" \n"\ |
---|
246 | "psraw $1, "#d" \n"\ |
---|
247 | "psraw $1, "#h" \n"\ |
---|
248 | "psubw "#d", "#b" \n"\ |
---|
249 | "psubw "#h", "#f" \n"\ |
---|
250 | "movdqa "#e", "#d" \n"\ |
---|
251 | "movdqa "#a", "#h" \n"\ |
---|
252 | "psraw $2, "#d" \n"\ |
---|
253 | "psraw $2, "#h" \n"\ |
---|
254 | "paddw "#f", "#d" \n"\ |
---|
255 | "paddw "#b", "#h" \n"\ |
---|
256 | "psraw $2, "#f" \n"\ |
---|
257 | "psraw $2, "#b" \n"\ |
---|
258 | "psubw "#f", "#e" \n"\ |
---|
259 | "psubw "#a", "#b" \n"\ |
---|
260 | "movdqa 0x00(%1), "#a" \n"\ |
---|
261 | "movdqa 0x40(%1), "#f" \n"\ |
---|
262 | SUMSUB_BA(f, a)\ |
---|
263 | SUMSUB_BA(g, f)\ |
---|
264 | SUMSUB_BA(c, a)\ |
---|
265 | SUMSUB_BA(e, g)\ |
---|
266 | SUMSUB_BA(b, c)\ |
---|
267 | SUMSUB_BA(h, a)\ |
---|
268 | SUMSUB_BA(d, f) |
---|
269 | |
---|
270 | static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride) |
---|
271 | { |
---|
272 | __asm__ volatile( |
---|
273 | "movdqa 0x10(%1), %%xmm1 \n" |
---|
274 | "movdqa 0x20(%1), %%xmm2 \n" |
---|
275 | "movdqa 0x30(%1), %%xmm3 \n" |
---|
276 | "movdqa 0x50(%1), %%xmm5 \n" |
---|
277 | "movdqa 0x60(%1), %%xmm6 \n" |
---|
278 | "movdqa 0x70(%1), %%xmm7 \n" |
---|
279 | H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) |
---|
280 | TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1)) |
---|
281 | "paddw %4, %%xmm4 \n" |
---|
282 | "movdqa %%xmm4, 0x00(%1) \n" |
---|
283 | "movdqa %%xmm2, 0x40(%1) \n" |
---|
284 | H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1) |
---|
285 | "movdqa %%xmm6, 0x60(%1) \n" |
---|
286 | "movdqa %%xmm7, 0x70(%1) \n" |
---|
287 | "pxor %%xmm7, %%xmm7 \n" |
---|
288 | STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7) |
---|
289 | STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7) |
---|
290 | STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7) |
---|
291 | STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7) |
---|
292 | "lea (%0,%2,4), %0 \n" |
---|
293 | STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7) |
---|
294 | STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7) |
---|
295 | "movdqa 0x60(%1), %%xmm0 \n" |
---|
296 | "movdqa 0x70(%1), %%xmm1 \n" |
---|
297 | STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7) |
---|
298 | STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7) |
---|
299 | :"+r"(dst) |
---|
300 | :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32) |
---|
301 | ); |
---|
302 | } |
---|
303 | |
---|
304 | static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) |
---|
305 | { |
---|
306 | int dc = (block[0] + 32) >> 6; |
---|
307 | __asm__ volatile( |
---|
308 | "movd %0, %%mm0 \n\t" |
---|
309 | "pshufw $0, %%mm0, %%mm0 \n\t" |
---|
310 | "pxor %%mm1, %%mm1 \n\t" |
---|
311 | "psubw %%mm0, %%mm1 \n\t" |
---|
312 | "packuswb %%mm0, %%mm0 \n\t" |
---|
313 | "packuswb %%mm1, %%mm1 \n\t" |
---|
314 | ::"r"(dc) |
---|
315 | ); |
---|
316 | __asm__ volatile( |
---|
317 | "movd %0, %%mm2 \n\t" |
---|
318 | "movd %1, %%mm3 \n\t" |
---|
319 | "movd %2, %%mm4 \n\t" |
---|
320 | "movd %3, %%mm5 \n\t" |
---|
321 | "paddusb %%mm0, %%mm2 \n\t" |
---|
322 | "paddusb %%mm0, %%mm3 \n\t" |
---|
323 | "paddusb %%mm0, %%mm4 \n\t" |
---|
324 | "paddusb %%mm0, %%mm5 \n\t" |
---|
325 | "psubusb %%mm1, %%mm2 \n\t" |
---|
326 | "psubusb %%mm1, %%mm3 \n\t" |
---|
327 | "psubusb %%mm1, %%mm4 \n\t" |
---|
328 | "psubusb %%mm1, %%mm5 \n\t" |
---|
329 | "movd %%mm2, %0 \n\t" |
---|
330 | "movd %%mm3, %1 \n\t" |
---|
331 | "movd %%mm4, %2 \n\t" |
---|
332 | "movd %%mm5, %3 \n\t" |
---|
333 | :"+m"(*(uint32_t*)(dst+0*stride)), |
---|
334 | "+m"(*(uint32_t*)(dst+1*stride)), |
---|
335 | "+m"(*(uint32_t*)(dst+2*stride)), |
---|
336 | "+m"(*(uint32_t*)(dst+3*stride)) |
---|
337 | ); |
---|
338 | } |
---|
339 | |
---|
340 | static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) |
---|
341 | { |
---|
342 | int dc = (block[0] + 32) >> 6; |
---|
343 | int y; |
---|
344 | __asm__ volatile( |
---|
345 | "movd %0, %%mm0 \n\t" |
---|
346 | "pshufw $0, %%mm0, %%mm0 \n\t" |
---|
347 | "pxor %%mm1, %%mm1 \n\t" |
---|
348 | "psubw %%mm0, %%mm1 \n\t" |
---|
349 | "packuswb %%mm0, %%mm0 \n\t" |
---|
350 | "packuswb %%mm1, %%mm1 \n\t" |
---|
351 | ::"r"(dc) |
---|
352 | ); |
---|
353 | for(y=2; y--; dst += 4*stride){ |
---|
354 | __asm__ volatile( |
---|
355 | "movq %0, %%mm2 \n\t" |
---|
356 | "movq %1, %%mm3 \n\t" |
---|
357 | "movq %2, %%mm4 \n\t" |
---|
358 | "movq %3, %%mm5 \n\t" |
---|
359 | "paddusb %%mm0, %%mm2 \n\t" |
---|
360 | "paddusb %%mm0, %%mm3 \n\t" |
---|
361 | "paddusb %%mm0, %%mm4 \n\t" |
---|
362 | "paddusb %%mm0, %%mm5 \n\t" |
---|
363 | "psubusb %%mm1, %%mm2 \n\t" |
---|
364 | "psubusb %%mm1, %%mm3 \n\t" |
---|
365 | "psubusb %%mm1, %%mm4 \n\t" |
---|
366 | "psubusb %%mm1, %%mm5 \n\t" |
---|
367 | "movq %%mm2, %0 \n\t" |
---|
368 | "movq %%mm3, %1 \n\t" |
---|
369 | "movq %%mm4, %2 \n\t" |
---|
370 | "movq %%mm5, %3 \n\t" |
---|
371 | :"+m"(*(uint64_t*)(dst+0*stride)), |
---|
372 | "+m"(*(uint64_t*)(dst+1*stride)), |
---|
373 | "+m"(*(uint64_t*)(dst+2*stride)), |
---|
374 | "+m"(*(uint64_t*)(dst+3*stride)) |
---|
375 | ); |
---|
376 | } |
---|
377 | } |
---|
378 | |
---|
379 | //FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split |
---|
380 | static const uint8_t scan8[16 + 2*4]={ |
---|
381 | 4+1*8, 5+1*8, 4+2*8, 5+2*8, |
---|
382 | 6+1*8, 7+1*8, 6+2*8, 7+2*8, |
---|
383 | 4+3*8, 5+3*8, 4+4*8, 5+4*8, |
---|
384 | 6+3*8, 7+3*8, 6+4*8, 7+4*8, |
---|
385 | 1+1*8, 2+1*8, |
---|
386 | 1+2*8, 2+2*8, |
---|
387 | 1+4*8, 2+4*8, |
---|
388 | 1+5*8, 2+5*8, |
---|
389 | }; |
---|
390 | |
---|
391 | static void ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
392 | int i; |
---|
393 | for(i=0; i<16; i++){ |
---|
394 | if(nnzc[ scan8[i] ]) |
---|
395 | ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); |
---|
396 | } |
---|
397 | } |
---|
398 | |
---|
399 | static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
400 | int i; |
---|
401 | for(i=0; i<16; i+=4){ |
---|
402 | if(nnzc[ scan8[i] ]) |
---|
403 | ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride); |
---|
404 | } |
---|
405 | } |
---|
406 | |
---|
407 | |
---|
408 | static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
409 | int i; |
---|
410 | for(i=0; i<16; i++){ |
---|
411 | int nnz = nnzc[ scan8[i] ]; |
---|
412 | if(nnz){ |
---|
413 | if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); |
---|
414 | else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); |
---|
415 | } |
---|
416 | } |
---|
417 | } |
---|
418 | |
---|
419 | static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
420 | int i; |
---|
421 | for(i=0; i<16; i++){ |
---|
422 | if(nnzc[ scan8[i] ] || block[i*16]) |
---|
423 | ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); |
---|
424 | } |
---|
425 | } |
---|
426 | |
---|
427 | static void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
428 | int i; |
---|
429 | for(i=0; i<16; i++){ |
---|
430 | if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); |
---|
431 | else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); |
---|
432 | } |
---|
433 | } |
---|
434 | |
---|
435 | static void ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
436 | int i; |
---|
437 | for(i=0; i<16; i+=4){ |
---|
438 | int nnz = nnzc[ scan8[i] ]; |
---|
439 | if(nnz){ |
---|
440 | if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); |
---|
441 | else ff_h264_idct8_add_mmx (dst + block_offset[i], block + i*16, stride); |
---|
442 | } |
---|
443 | } |
---|
444 | } |
---|
445 | |
---|
446 | static void ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
447 | int i; |
---|
448 | for(i=0; i<16; i+=4){ |
---|
449 | int nnz = nnzc[ scan8[i] ]; |
---|
450 | if(nnz){ |
---|
451 | if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); |
---|
452 | else ff_h264_idct8_add_sse2 (dst + block_offset[i], block + i*16, stride); |
---|
453 | } |
---|
454 | } |
---|
455 | } |
---|
456 | |
---|
457 | static void ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
458 | int i; |
---|
459 | for(i=16; i<16+8; i++){ |
---|
460 | if(nnzc[ scan8[i] ] || block[i*16]) |
---|
461 | ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
---|
462 | } |
---|
463 | } |
---|
464 | |
---|
465 | static void ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
466 | int i; |
---|
467 | for(i=16; i<16+8; i++){ |
---|
468 | if(nnzc[ scan8[i] ]) |
---|
469 | ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
---|
470 | else if(block[i*16]) |
---|
471 | ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
---|
472 | } |
---|
473 | } |
---|
474 | |
---|
475 | #if CONFIG_GPL && HAVE_YASM |
---|
476 | static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride) |
---|
477 | { |
---|
478 | __asm__ volatile( |
---|
479 | "movd %0, %%mm0 \n\t" // 0 0 X D |
---|
480 | "punpcklwd %1, %%mm0 \n\t" // x X d D |
---|
481 | "paddsw %2, %%mm0 \n\t" |
---|
482 | "psraw $6, %%mm0 \n\t" |
---|
483 | "punpcklwd %%mm0, %%mm0 \n\t" // d d D D |
---|
484 | "pxor %%mm1, %%mm1 \n\t" // 0 0 0 0 |
---|
485 | "psubw %%mm0, %%mm1 \n\t" // -d-d-D-D |
---|
486 | "packuswb %%mm1, %%mm0 \n\t" // -d-d-D-D d d D D |
---|
487 | "pshufw $0xFA, %%mm0, %%mm1 \n\t" // -d-d-d-d-D-D-D-D |
---|
488 | "punpcklwd %%mm0, %%mm0 \n\t" // d d d d D D D D |
---|
489 | ::"m"(block[ 0]), |
---|
490 | "m"(block[16]), |
---|
491 | "m"(ff_pw_32) |
---|
492 | ); |
---|
493 | __asm__ volatile( |
---|
494 | "movq %0, %%mm2 \n\t" |
---|
495 | "movq %1, %%mm3 \n\t" |
---|
496 | "movq %2, %%mm4 \n\t" |
---|
497 | "movq %3, %%mm5 \n\t" |
---|
498 | "paddusb %%mm0, %%mm2 \n\t" |
---|
499 | "paddusb %%mm0, %%mm3 \n\t" |
---|
500 | "paddusb %%mm0, %%mm4 \n\t" |
---|
501 | "paddusb %%mm0, %%mm5 \n\t" |
---|
502 | "psubusb %%mm1, %%mm2 \n\t" |
---|
503 | "psubusb %%mm1, %%mm3 \n\t" |
---|
504 | "psubusb %%mm1, %%mm4 \n\t" |
---|
505 | "psubusb %%mm1, %%mm5 \n\t" |
---|
506 | "movq %%mm2, %0 \n\t" |
---|
507 | "movq %%mm3, %1 \n\t" |
---|
508 | "movq %%mm4, %2 \n\t" |
---|
509 | "movq %%mm5, %3 \n\t" |
---|
510 | :"+m"(*(uint64_t*)(dst+0*stride)), |
---|
511 | "+m"(*(uint64_t*)(dst+1*stride)), |
---|
512 | "+m"(*(uint64_t*)(dst+2*stride)), |
---|
513 | "+m"(*(uint64_t*)(dst+3*stride)) |
---|
514 | ); |
---|
515 | } |
---|
516 | |
---|
517 | extern void ff_x264_add8x4_idct_sse2(uint8_t *dst, int16_t *block, int stride); |
---|
518 | |
---|
519 | static void ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
520 | int i; |
---|
521 | for(i=0; i<16; i+=2) |
---|
522 | if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) |
---|
523 | ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride); |
---|
524 | } |
---|
525 | |
---|
526 | static void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
527 | int i; |
---|
528 | for(i=0; i<16; i+=2){ |
---|
529 | if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) |
---|
530 | ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride); |
---|
531 | else if(block[i*16]|block[i*16+16]) |
---|
532 | ff_h264_idct_dc_add8_mmx2(dst + block_offset[i], block + i*16, stride); |
---|
533 | } |
---|
534 | } |
---|
535 | |
---|
536 | static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
---|
537 | int i; |
---|
538 | for(i=16; i<16+8; i+=2){ |
---|
539 | if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) |
---|
540 | ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
---|
541 | else if(block[i*16]|block[i*16+16]) |
---|
542 | ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
---|
543 | } |
---|
544 | } |
---|
545 | #endif |
---|
546 | |
---|
547 | /***********************************/ |
---|
548 | /* deblocking */ |
---|
549 | |
---|
550 | // out: o = |x-y|>a |
---|
551 | // clobbers: t |
---|
552 | #define DIFF_GT_MMX(x,y,a,o,t)\ |
---|
553 | "movq "#y", "#t" \n\t"\ |
---|
554 | "movq "#x", "#o" \n\t"\ |
---|
555 | "psubusb "#x", "#t" \n\t"\ |
---|
556 | "psubusb "#y", "#o" \n\t"\ |
---|
557 | "por "#t", "#o" \n\t"\ |
---|
558 | "psubusb "#a", "#o" \n\t" |
---|
559 | |
---|
560 | // out: o = |x-y|>a |
---|
561 | // clobbers: t |
---|
562 | #define DIFF_GT2_MMX(x,y,a,o,t)\ |
---|
563 | "movq "#y", "#t" \n\t"\ |
---|
564 | "movq "#x", "#o" \n\t"\ |
---|
565 | "psubusb "#x", "#t" \n\t"\ |
---|
566 | "psubusb "#y", "#o" \n\t"\ |
---|
567 | "psubusb "#a", "#t" \n\t"\ |
---|
568 | "psubusb "#a", "#o" \n\t"\ |
---|
569 | "pcmpeqb "#t", "#o" \n\t"\ |
---|
570 | |
---|
571 | // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 |
---|
572 | // out: mm5=beta-1, mm7=mask |
---|
573 | // clobbers: mm4,mm6 |
---|
574 | #define H264_DEBLOCK_MASK(alpha1, beta1) \ |
---|
575 | "pshufw $0, "#alpha1", %%mm4 \n\t"\ |
---|
576 | "pshufw $0, "#beta1 ", %%mm5 \n\t"\ |
---|
577 | "packuswb %%mm4, %%mm4 \n\t"\ |
---|
578 | "packuswb %%mm5, %%mm5 \n\t"\ |
---|
579 | DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\ |
---|
580 | DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\ |
---|
581 | "por %%mm4, %%mm7 \n\t"\ |
---|
582 | DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\ |
---|
583 | "por %%mm4, %%mm7 \n\t"\ |
---|
584 | "pxor %%mm6, %%mm6 \n\t"\ |
---|
585 | "pcmpeqb %%mm6, %%mm7 \n\t" |
---|
586 | |
---|
587 | // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) |
---|
588 | // out: mm1=p0' mm2=q0' |
---|
589 | // clobbers: mm0,3-6 |
---|
590 | #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\ |
---|
591 | "movq %%mm1 , %%mm5 \n\t"\ |
---|
592 | "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\ |
---|
593 | "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\ |
---|
594 | "pcmpeqb %%mm4 , %%mm4 \n\t"\ |
---|
595 | "pxor %%mm4 , %%mm3 \n\t"\ |
---|
596 | "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\ |
---|
597 | "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\ |
---|
598 | "pxor %%mm1 , %%mm4 \n\t"\ |
---|
599 | "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\ |
---|
600 | "pavgb %%mm5 , %%mm3 \n\t"\ |
---|
601 | "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\ |
---|
602 | "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\ |
---|
603 | "psubusb %%mm3 , %%mm6 \n\t"\ |
---|
604 | "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\ |
---|
605 | "pminub %%mm7 , %%mm6 \n\t"\ |
---|
606 | "pminub %%mm7 , %%mm3 \n\t"\ |
---|
607 | "psubusb %%mm6 , %%mm1 \n\t"\ |
---|
608 | "psubusb %%mm3 , %%mm2 \n\t"\ |
---|
609 | "paddusb %%mm3 , %%mm1 \n\t"\ |
---|
610 | "paddusb %%mm6 , %%mm2 \n\t" |
---|
611 | |
---|
612 | // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone |
---|
613 | // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 ) |
---|
614 | // clobbers: q2, tmp, tc0 |
---|
615 | #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\ |
---|
616 | "movq %%mm1, "#tmp" \n\t"\ |
---|
617 | "pavgb %%mm2, "#tmp" \n\t"\ |
---|
618 | "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\ |
---|
619 | "pxor "q2addr", "#tmp" \n\t"\ |
---|
620 | "pand %9, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\ |
---|
621 | "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\ |
---|
622 | "movq "#p1", "#tmp" \n\t"\ |
---|
623 | "psubusb "#tc0", "#tmp" \n\t"\ |
---|
624 | "paddusb "#p1", "#tc0" \n\t"\ |
---|
625 | "pmaxub "#tmp", "#q2" \n\t"\ |
---|
626 | "pminub "#tc0", "#q2" \n\t"\ |
---|
627 | "movq "#q2", "q1addr" \n\t" |
---|
628 | |
---|
629 | static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) |
---|
630 | { |
---|
631 | DECLARE_ALIGNED(8, uint64_t, tmp0)[2]; |
---|
632 | |
---|
633 | __asm__ volatile( |
---|
634 | "movq (%2,%4), %%mm0 \n\t" //p1 |
---|
635 | "movq (%2,%4,2), %%mm1 \n\t" //p0 |
---|
636 | "movq (%3), %%mm2 \n\t" //q0 |
---|
637 | "movq (%3,%4), %%mm3 \n\t" //q1 |
---|
638 | H264_DEBLOCK_MASK(%7, %8) |
---|
639 | |
---|
640 | "movd %6, %%mm4 \n\t" |
---|
641 | "punpcklbw %%mm4, %%mm4 \n\t" |
---|
642 | "punpcklwd %%mm4, %%mm4 \n\t" |
---|
643 | "pcmpeqb %%mm3, %%mm3 \n\t" |
---|
644 | "movq %%mm4, %%mm6 \n\t" |
---|
645 | "pcmpgtb %%mm3, %%mm4 \n\t" |
---|
646 | "movq %%mm6, %1 \n\t" |
---|
647 | "pand %%mm4, %%mm7 \n\t" |
---|
648 | "movq %%mm7, %0 \n\t" |
---|
649 | |
---|
650 | /* filter p1 */ |
---|
651 | "movq (%2), %%mm3 \n\t" //p2 |
---|
652 | DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1 |
---|
653 | "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta |
---|
654 | "pand %1, %%mm7 \n\t" // mask & tc0 |
---|
655 | "movq %%mm7, %%mm4 \n\t" |
---|
656 | "psubb %%mm6, %%mm7 \n\t" |
---|
657 | "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0 |
---|
658 | H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%2)", "(%2,%4)", %%mm6, %%mm4) |
---|
659 | |
---|
660 | /* filter q1 */ |
---|
661 | "movq (%3,%4,2), %%mm4 \n\t" //q2 |
---|
662 | DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1 |
---|
663 | "pand %0, %%mm6 \n\t" |
---|
664 | "movq %1, %%mm5 \n\t" // can be merged with the and below but is slower then |
---|
665 | "pand %%mm6, %%mm5 \n\t" |
---|
666 | "psubb %%mm6, %%mm7 \n\t" |
---|
667 | "movq (%3,%4), %%mm3 \n\t" |
---|
668 | H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%3,%4,2)", "(%3,%4)", %%mm5, %%mm6) |
---|
669 | |
---|
670 | /* filter p0, q0 */ |
---|
671 | H264_DEBLOCK_P0_Q0(%9, unused) |
---|
672 | "movq %%mm1, (%2,%4,2) \n\t" |
---|
673 | "movq %%mm2, (%3) \n\t" |
---|
674 | |
---|
675 | : "=m"(tmp0[0]), "=m"(tmp0[1]) |
---|
676 | : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride), |
---|
677 | "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1), |
---|
678 | "m"(ff_bone) |
---|
679 | ); |
---|
680 | } |
---|
681 | |
---|
682 | static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
---|
683 | { |
---|
684 | if((tc0[0] & tc0[1]) >= 0) |
---|
685 | h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0); |
---|
686 | if((tc0[2] & tc0[3]) >= 0) |
---|
687 | h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2); |
---|
688 | } |
---|
689 | |
---|
690 | static void Part1(uint8_t *pix, uint8_t *trans, int stride) |
---|
691 | { |
---|
692 | transpose4x4(trans, pix-4, 8, stride); |
---|
693 | transpose4x4(trans +4*8, pix, 8, stride); |
---|
694 | transpose4x4(trans+4, pix-4+4*stride, 8, stride); |
---|
695 | transpose4x4(trans+4+4*8, pix +4*stride, 8, stride); |
---|
696 | } |
---|
697 | static void Part2(uint8_t *pix, uint8_t *trans, int stride) |
---|
698 | { |
---|
699 | transpose4x4(pix-2, trans +2*8, stride, 8); |
---|
700 | transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8); |
---|
701 | } |
---|
702 | static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
---|
703 | { |
---|
704 | //FIXME: could cut some load/stores by merging transpose with filter |
---|
705 | // also, it only needs to transpose 6x8 |
---|
706 | DECLARE_ALIGNED(8, uint8_t, trans)[8*8]; |
---|
707 | int i; |
---|
708 | for(i=0; i<2; i++, pix+=8*stride, tc0+=2) { |
---|
709 | if((tc0[0] & tc0[1]) < 0) |
---|
710 | continue; |
---|
711 | // transpose4x4(trans, pix-4, 8, stride); |
---|
712 | // transpose4x4(trans +4*8, pix, 8, stride); |
---|
713 | // transpose4x4(trans+4, pix-4+4*stride, 8, stride); |
---|
714 | // transpose4x4(trans+4+4*8, pix +4*stride, 8, stride); |
---|
715 | { |
---|
716 | void(*Part1Ptr)(uint8_t*, uint8_t*, int)=Part1; |
---|
717 | Part1Ptr(pix, trans, stride); |
---|
718 | } |
---|
719 | h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0); |
---|
720 | { |
---|
721 | void(*Part2Ptr)(uint8_t*, uint8_t*, int)=Part2; |
---|
722 | Part2Ptr(pix, trans, stride); |
---|
723 | } |
---|
724 | // transpose4x4(pix-2, trans +2*8, stride, 8); |
---|
725 | // transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8); |
---|
726 | } |
---|
727 | } |
---|
728 | |
---|
729 | static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) |
---|
730 | { |
---|
731 | __asm__ volatile( |
---|
732 | "movq (%0), %%mm0 \n\t" //p1 |
---|
733 | "movq (%0,%2), %%mm1 \n\t" //p0 |
---|
734 | "movq (%1), %%mm2 \n\t" //q0 |
---|
735 | "movq (%1,%2), %%mm3 \n\t" //q1 |
---|
736 | H264_DEBLOCK_MASK(%4, %5) |
---|
737 | "movd %3, %%mm6 \n\t" |
---|
738 | "punpcklbw %%mm6, %%mm6 \n\t" |
---|
739 | "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask |
---|
740 | H264_DEBLOCK_P0_Q0(%6, %7) |
---|
741 | "movq %%mm1, (%0,%2) \n\t" |
---|
742 | "movq %%mm2, (%1) \n\t" |
---|
743 | |
---|
744 | :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride), |
---|
745 | "r"(*(uint32_t*)tc0), |
---|
746 | "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F) |
---|
747 | ); |
---|
748 | } |
---|
749 | |
---|
750 | static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
---|
751 | { |
---|
752 | h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0); |
---|
753 | } |
---|
754 | |
---|
755 | static void Part2C(uint8_t *pix, uint8_t *trans, int stride) |
---|
756 | { |
---|
757 | transpose4x4(pix-2, trans, stride, 8); |
---|
758 | transpose4x4(pix-2+4*stride, trans+4, stride, 8); |
---|
759 | } |
---|
760 | |
---|
761 | static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
---|
762 | { |
---|
763 | //FIXME: could cut some load/stores by merging transpose with filter |
---|
764 | DECLARE_ALIGNED(8, uint8_t, trans)[8*4]; |
---|
765 | transpose4x4(trans, pix-2, 8, stride); |
---|
766 | transpose4x4(trans+4, pix-2+4*stride, 8, stride); |
---|
767 | h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0); |
---|
768 | { |
---|
769 | void(*Part2CPtr)(uint8_t*, uint8_t*, int)=Part2C; |
---|
770 | Part2CPtr(pix, trans, stride); |
---|
771 | } |
---|
772 | // transpose4x4(pix-2, trans, stride, 8); |
---|
773 | // transpose4x4(pix-2+4*stride, trans+4, stride, 8); |
---|
774 | } |
---|
775 | |
---|
776 | // p0 = (p0 + q1 + 2*p1 + 2) >> 2 |
---|
777 | #define H264_FILTER_CHROMA4(p0, p1, q1, one) \ |
---|
778 | "movq "#p0", %%mm4 \n\t"\ |
---|
779 | "pxor "#q1", %%mm4 \n\t"\ |
---|
780 | "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\ |
---|
781 | "pavgb "#q1", "#p0" \n\t"\ |
---|
782 | "psubusb %%mm4, "#p0" \n\t"\ |
---|
783 | "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\ |
---|
784 | |
---|
785 | static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1) |
---|
786 | { |
---|
787 | __asm__ volatile( |
---|
788 | "movq (%0), %%mm0 \n\t" |
---|
789 | "movq (%0,%2), %%mm1 \n\t" |
---|
790 | "movq (%1), %%mm2 \n\t" |
---|
791 | "movq (%1,%2), %%mm3 \n\t" |
---|
792 | H264_DEBLOCK_MASK(%3, %4) |
---|
793 | "movq %%mm1, %%mm5 \n\t" |
---|
794 | "movq %%mm2, %%mm6 \n\t" |
---|
795 | H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0' |
---|
796 | H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0' |
---|
797 | "psubb %%mm5, %%mm1 \n\t" |
---|
798 | "psubb %%mm6, %%mm2 \n\t" |
---|
799 | "pand %%mm7, %%mm1 \n\t" |
---|
800 | "pand %%mm7, %%mm2 \n\t" |
---|
801 | "paddb %%mm5, %%mm1 \n\t" |
---|
802 | "paddb %%mm6, %%mm2 \n\t" |
---|
803 | "movq %%mm1, (%0,%2) \n\t" |
---|
804 | "movq %%mm2, (%1) \n\t" |
---|
805 | :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride), |
---|
806 | "m"(alpha1), "m"(beta1), "m"(ff_bone) |
---|
807 | ); |
---|
808 | } |
---|
809 | |
---|
810 | static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) |
---|
811 | { |
---|
812 | h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1); |
---|
813 | } |
---|
814 | |
---|
815 | static void Part2CI(uint8_t *pix, uint8_t *trans, int stride) |
---|
816 | { |
---|
817 | transpose4x4(pix-2, trans, stride, 8); |
---|
818 | transpose4x4(pix-2+4*stride, trans+4, stride, 8); |
---|
819 | } |
---|
820 | static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) |
---|
821 | { |
---|
822 | //FIXME: could cut some load/stores by merging transpose with filter |
---|
823 | DECLARE_ALIGNED(8, uint8_t, trans)[8*4]; |
---|
824 | transpose4x4(trans, pix-2, 8, stride); |
---|
825 | transpose4x4(trans+4, pix-2+4*stride, 8, stride); |
---|
826 | h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1); |
---|
827 | { |
---|
828 | void(*Part2CIPtr)(uint8_t*, uint8_t*, int)=Part2CI; |
---|
829 | Part2CIPtr(pix, trans, stride); |
---|
830 | } |
---|
831 | // transpose4x4(pix-2, trans, stride, 8); |
---|
832 | // transpose4x4(pix-2+4*stride, trans+4, stride, 8); |
---|
833 | } |
---|
834 | |
---|
835 | static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], |
---|
836 | int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { |
---|
837 | int dir; |
---|
838 | __asm__ volatile( |
---|
839 | "movq %0, %%mm7 \n" |
---|
840 | "movq %1, %%mm6 \n" |
---|
841 | ::"m"(ff_pb_1), "m"(ff_pb_3) |
---|
842 | ); |
---|
843 | if(field) |
---|
844 | __asm__ volatile( |
---|
845 | "movq %0, %%mm6 \n" |
---|
846 | ::"m"(ff_pb_3_1) |
---|
847 | ); |
---|
848 | __asm__ volatile( |
---|
849 | "movq %%mm6, %%mm5 \n" |
---|
850 | "paddb %%mm5, %%mm5 \n" |
---|
851 | :); |
---|
852 | |
---|
853 | // could do a special case for dir==0 && edges==1, but it only reduces the |
---|
854 | // average filter time by 1.2% |
---|
855 | for( dir=1; dir>=0; dir-- ) { |
---|
856 | const x86_reg d_idx = dir ? -8 : -1; |
---|
857 | const int mask_mv = dir ? mask_mv1 : mask_mv0; |
---|
858 | DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; |
---|
859 | int b_idx, edge; |
---|
860 | for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) { |
---|
861 | __asm__ volatile( |
---|
862 | "pand %0, %%mm0 \n\t" |
---|
863 | ::"m"(mask_dir) |
---|
864 | ); |
---|
865 | if(!(mask_mv & edge)) { |
---|
866 | if(bidir) { |
---|
867 | __asm__ volatile( |
---|
868 | "movd (%1,%0), %%mm2 \n" |
---|
869 | "punpckldq 40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] } |
---|
870 | "pshufw $0x44, (%1), %%mm0 \n" // { ref0[b], ref0[b] } |
---|
871 | "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] } |
---|
872 | "pshufw $0x4E, %%mm2, %%mm3 \n" |
---|
873 | "psubb %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } |
---|
874 | "psubb %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } |
---|
875 | "1: \n" |
---|
876 | "por %%mm1, %%mm0 \n" |
---|
877 | "movq (%2,%0,4), %%mm1 \n" |
---|
878 | "movq 8(%2,%0,4), %%mm2 \n" |
---|
879 | "movq %%mm1, %%mm3 \n" |
---|
880 | "movq %%mm2, %%mm4 \n" |
---|
881 | "psubw (%2), %%mm1 \n" |
---|
882 | "psubw 8(%2), %%mm2 \n" |
---|
883 | "psubw 160(%2), %%mm3 \n" |
---|
884 | "psubw 168(%2), %%mm4 \n" |
---|
885 | "packsswb %%mm2, %%mm1 \n" |
---|
886 | "packsswb %%mm4, %%mm3 \n" |
---|
887 | "paddb %%mm6, %%mm1 \n" |
---|
888 | "paddb %%mm6, %%mm3 \n" |
---|
889 | "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit |
---|
890 | "psubusb %%mm5, %%mm3 \n" |
---|
891 | "packsswb %%mm3, %%mm1 \n" |
---|
892 | "add $40, %0 \n" |
---|
893 | "cmp $40, %0 \n" |
---|
894 | "jl 1b \n" |
---|
895 | "sub $80, %0 \n" |
---|
896 | "pshufw $0x4E, %%mm1, %%mm1 \n" |
---|
897 | "por %%mm1, %%mm0 \n" |
---|
898 | "pshufw $0x4E, %%mm0, %%mm1 \n" |
---|
899 | "pminub %%mm1, %%mm0 \n" |
---|
900 | ::"r"(d_idx), |
---|
901 | "r"(ref[0]+b_idx), |
---|
902 | "r"(mv[0]+b_idx) |
---|
903 | ); |
---|
904 | } else { |
---|
905 | __asm__ volatile( |
---|
906 | "movd (%1), %%mm0 \n" |
---|
907 | "psubb (%1,%0), %%mm0 \n" // ref[b] != ref[bn] |
---|
908 | "movq (%2), %%mm1 \n" |
---|
909 | "movq 8(%2), %%mm2 \n" |
---|
910 | "psubw (%2,%0,4), %%mm1 \n" |
---|
911 | "psubw 8(%2,%0,4), %%mm2 \n" |
---|
912 | "packsswb %%mm2, %%mm1 \n" |
---|
913 | "paddb %%mm6, %%mm1 \n" |
---|
914 | "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit |
---|
915 | "packsswb %%mm1, %%mm1 \n" |
---|
916 | "por %%mm1, %%mm0 \n" |
---|
917 | ::"r"(d_idx), |
---|
918 | "r"(ref[0]+b_idx), |
---|
919 | "r"(mv[0]+b_idx) |
---|
920 | ); |
---|
921 | } |
---|
922 | } |
---|
923 | __asm__ volatile( |
---|
924 | "movd %0, %%mm1 \n" |
---|
925 | "por %1, %%mm1 \n" // nnz[b] || nnz[bn] |
---|
926 | ::"m"(nnz[b_idx]), |
---|
927 | "m"(nnz[b_idx+d_idx]) |
---|
928 | ); |
---|
929 | __asm__ volatile( |
---|
930 | "pminub %%mm7, %%mm1 \n" |
---|
931 | "pminub %%mm7, %%mm0 \n" |
---|
932 | "psllw $1, %%mm1 \n" |
---|
933 | "pxor %%mm2, %%mm2 \n" |
---|
934 | "pmaxub %%mm0, %%mm1 \n" |
---|
935 | "punpcklbw %%mm2, %%mm1 \n" |
---|
936 | "movq %%mm1, %0 \n" |
---|
937 | :"=m"(*bS[dir][edge]) |
---|
938 | ::"memory" |
---|
939 | ); |
---|
940 | } |
---|
941 | edges = 4; |
---|
942 | step = 1; |
---|
943 | } |
---|
944 | __asm__ volatile( |
---|
945 | "movq (%0), %%mm0 \n\t" |
---|
946 | "movq 8(%0), %%mm1 \n\t" |
---|
947 | "movq 16(%0), %%mm2 \n\t" |
---|
948 | "movq 24(%0), %%mm3 \n\t" |
---|
949 | TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4) |
---|
950 | "movq %%mm0, (%0) \n\t" |
---|
951 | "movq %%mm3, 8(%0) \n\t" |
---|
952 | "movq %%mm4, 16(%0) \n\t" |
---|
953 | "movq %%mm2, 24(%0) \n\t" |
---|
954 | ::"r"(bS[0]) |
---|
955 | :"memory" |
---|
956 | ); |
---|
957 | } |
---|
958 | |
---|
959 | /***********************************/ |
---|
960 | /* motion compensation */ |
---|
961 | |
---|
962 | #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\ |
---|
963 | "mov"#q" "#C", "#T" \n\t"\ |
---|
964 | "mov"#d" (%0), "#F" \n\t"\ |
---|
965 | "paddw "#D", "#T" \n\t"\ |
---|
966 | "psllw $2, "#T" \n\t"\ |
---|
967 | "psubw "#B", "#T" \n\t"\ |
---|
968 | "psubw "#E", "#T" \n\t"\ |
---|
969 | "punpcklbw "#Z", "#F" \n\t"\ |
---|
970 | "pmullw %4, "#T" \n\t"\ |
---|
971 | "paddw %5, "#A" \n\t"\ |
---|
972 | "add %2, %0 \n\t"\ |
---|
973 | "paddw "#F", "#A" \n\t"\ |
---|
974 | "paddw "#A", "#T" \n\t"\ |
---|
975 | "psraw $5, "#T" \n\t"\ |
---|
976 | "packuswb "#T", "#T" \n\t"\ |
---|
977 | OP(T, (%1), A, d)\ |
---|
978 | "add %3, %1 \n\t" |
---|
979 | |
---|
980 | #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\ |
---|
981 | "mov"#q" "#C", "#T" \n\t"\ |
---|
982 | "mov"#d" (%0), "#F" \n\t"\ |
---|
983 | "paddw "#D", "#T" \n\t"\ |
---|
984 | "psllw $2, "#T" \n\t"\ |
---|
985 | "paddw %4, "#A" \n\t"\ |
---|
986 | "psubw "#B", "#T" \n\t"\ |
---|
987 | "psubw "#E", "#T" \n\t"\ |
---|
988 | "punpcklbw "#Z", "#F" \n\t"\ |
---|
989 | "pmullw %3, "#T" \n\t"\ |
---|
990 | "paddw "#F", "#A" \n\t"\ |
---|
991 | "add %2, %0 \n\t"\ |
---|
992 | "paddw "#A", "#T" \n\t"\ |
---|
993 | "mov"#q" "#T", "#OF"(%1) \n\t" |
---|
994 | |
---|
995 | #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q) |
---|
996 | #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q) |
---|
997 | #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa) |
---|
998 | #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa) |
---|
999 | |
---|
1000 | |
---|
1001 | #define QPEL_H264(OPNAME, OP, MMX)\ |
---|
1002 | static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1003 | int h=4;\ |
---|
1004 | \ |
---|
1005 | __asm__ volatile(\ |
---|
1006 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1007 | "movq "MANGLE(ff_pw_5) ", %%mm4\n\t"\ |
---|
1008 | "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\ |
---|
1009 | "1: \n\t"\ |
---|
1010 | "movd -1(%0), %%mm1 \n\t"\ |
---|
1011 | "movd (%0), %%mm2 \n\t"\ |
---|
1012 | "movd 1(%0), %%mm3 \n\t"\ |
---|
1013 | "movd 2(%0), %%mm0 \n\t"\ |
---|
1014 | "punpcklbw %%mm7, %%mm1 \n\t"\ |
---|
1015 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1016 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1017 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1018 | "paddw %%mm0, %%mm1 \n\t"\ |
---|
1019 | "paddw %%mm3, %%mm2 \n\t"\ |
---|
1020 | "movd -2(%0), %%mm0 \n\t"\ |
---|
1021 | "movd 3(%0), %%mm3 \n\t"\ |
---|
1022 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1023 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1024 | "paddw %%mm3, %%mm0 \n\t"\ |
---|
1025 | "psllw $2, %%mm2 \n\t"\ |
---|
1026 | "psubw %%mm1, %%mm2 \n\t"\ |
---|
1027 | "pmullw %%mm4, %%mm2 \n\t"\ |
---|
1028 | "paddw %%mm5, %%mm0 \n\t"\ |
---|
1029 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1030 | "psraw $5, %%mm0 \n\t"\ |
---|
1031 | "packuswb %%mm0, %%mm0 \n\t"\ |
---|
1032 | OP(%%mm0, (%1),%%mm6, d)\ |
---|
1033 | "add %3, %0 \n\t"\ |
---|
1034 | "add %4, %1 \n\t"\ |
---|
1035 | "decl %2 \n\t"\ |
---|
1036 | " jnz 1b \n\t"\ |
---|
1037 | : "+a"(src), "+c"(dst), "+g"(h)\ |
---|
1038 | : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ |
---|
1039 | : "memory"\ |
---|
1040 | );\ |
---|
1041 | }\ |
---|
1042 | static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
---|
1043 | int h=4;\ |
---|
1044 | __asm__ volatile(\ |
---|
1045 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1046 | "movq %0, %%mm4 \n\t"\ |
---|
1047 | "movq %1, %%mm5 \n\t"\ |
---|
1048 | :: "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1049 | );\ |
---|
1050 | do{\ |
---|
1051 | __asm__ volatile(\ |
---|
1052 | "movd -1(%0), %%mm1 \n\t"\ |
---|
1053 | "movd (%0), %%mm2 \n\t"\ |
---|
1054 | "movd 1(%0), %%mm3 \n\t"\ |
---|
1055 | "movd 2(%0), %%mm0 \n\t"\ |
---|
1056 | "punpcklbw %%mm7, %%mm1 \n\t"\ |
---|
1057 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1058 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1059 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1060 | "paddw %%mm0, %%mm1 \n\t"\ |
---|
1061 | "paddw %%mm3, %%mm2 \n\t"\ |
---|
1062 | "movd -2(%0), %%mm0 \n\t"\ |
---|
1063 | "movd 3(%0), %%mm3 \n\t"\ |
---|
1064 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1065 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1066 | "paddw %%mm3, %%mm0 \n\t"\ |
---|
1067 | "psllw $2, %%mm2 \n\t"\ |
---|
1068 | "psubw %%mm1, %%mm2 \n\t"\ |
---|
1069 | "pmullw %%mm4, %%mm2 \n\t"\ |
---|
1070 | "paddw %%mm5, %%mm0 \n\t"\ |
---|
1071 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1072 | "movd (%2), %%mm3 \n\t"\ |
---|
1073 | "psraw $5, %%mm0 \n\t"\ |
---|
1074 | "packuswb %%mm0, %%mm0 \n\t"\ |
---|
1075 | PAVGB" %%mm3, %%mm0 \n\t"\ |
---|
1076 | OP(%%mm0, (%1),%%mm6, d)\ |
---|
1077 | "add %4, %0 \n\t"\ |
---|
1078 | "add %4, %1 \n\t"\ |
---|
1079 | "add %3, %2 \n\t"\ |
---|
1080 | : "+a"(src), "+c"(dst), "+d"(src2)\ |
---|
1081 | : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\ |
---|
1082 | : "memory"\ |
---|
1083 | );\ |
---|
1084 | }while(--h);\ |
---|
1085 | }\ |
---|
1086 | static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1087 | src -= 2*srcStride;\ |
---|
1088 | __asm__ volatile(\ |
---|
1089 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1090 | "movd (%0), %%mm0 \n\t"\ |
---|
1091 | "add %2, %0 \n\t"\ |
---|
1092 | "movd (%0), %%mm1 \n\t"\ |
---|
1093 | "add %2, %0 \n\t"\ |
---|
1094 | "movd (%0), %%mm2 \n\t"\ |
---|
1095 | "add %2, %0 \n\t"\ |
---|
1096 | "movd (%0), %%mm3 \n\t"\ |
---|
1097 | "add %2, %0 \n\t"\ |
---|
1098 | "movd (%0), %%mm4 \n\t"\ |
---|
1099 | "add %2, %0 \n\t"\ |
---|
1100 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1101 | "punpcklbw %%mm7, %%mm1 \n\t"\ |
---|
1102 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1103 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1104 | "punpcklbw %%mm7, %%mm4 \n\t"\ |
---|
1105 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
---|
1106 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ |
---|
1107 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ |
---|
1108 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ |
---|
1109 | \ |
---|
1110 | : "+a"(src), "+c"(dst)\ |
---|
1111 | : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1112 | : "memory"\ |
---|
1113 | );\ |
---|
1114 | }\ |
---|
1115 | static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
---|
1116 | int h=4;\ |
---|
1117 | int w=3;\ |
---|
1118 | src -= 2*srcStride+2;\ |
---|
1119 | while(w--){\ |
---|
1120 | __asm__ volatile(\ |
---|
1121 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1122 | "movd (%0), %%mm0 \n\t"\ |
---|
1123 | "add %2, %0 \n\t"\ |
---|
1124 | "movd (%0), %%mm1 \n\t"\ |
---|
1125 | "add %2, %0 \n\t"\ |
---|
1126 | "movd (%0), %%mm2 \n\t"\ |
---|
1127 | "add %2, %0 \n\t"\ |
---|
1128 | "movd (%0), %%mm3 \n\t"\ |
---|
1129 | "add %2, %0 \n\t"\ |
---|
1130 | "movd (%0), %%mm4 \n\t"\ |
---|
1131 | "add %2, %0 \n\t"\ |
---|
1132 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1133 | "punpcklbw %%mm7, %%mm1 \n\t"\ |
---|
1134 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1135 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1136 | "punpcklbw %%mm7, %%mm4 \n\t"\ |
---|
1137 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\ |
---|
1138 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\ |
---|
1139 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\ |
---|
1140 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\ |
---|
1141 | \ |
---|
1142 | : "+a"(src)\ |
---|
1143 | : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1144 | : "memory"\ |
---|
1145 | );\ |
---|
1146 | tmp += 4;\ |
---|
1147 | src += 4 - 9*srcStride;\ |
---|
1148 | }\ |
---|
1149 | tmp -= 3*4;\ |
---|
1150 | __asm__ volatile(\ |
---|
1151 | "1: \n\t"\ |
---|
1152 | "movq (%0), %%mm0 \n\t"\ |
---|
1153 | "paddw 10(%0), %%mm0 \n\t"\ |
---|
1154 | "movq 2(%0), %%mm1 \n\t"\ |
---|
1155 | "paddw 8(%0), %%mm1 \n\t"\ |
---|
1156 | "movq 4(%0), %%mm2 \n\t"\ |
---|
1157 | "paddw 6(%0), %%mm2 \n\t"\ |
---|
1158 | "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\ |
---|
1159 | "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\ |
---|
1160 | "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\ |
---|
1161 | "paddsw %%mm2, %%mm0 \n\t"\ |
---|
1162 | "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\ |
---|
1163 | "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\ |
---|
1164 | "psraw $6, %%mm0 \n\t"\ |
---|
1165 | "packuswb %%mm0, %%mm0 \n\t"\ |
---|
1166 | OP(%%mm0, (%1),%%mm7, d)\ |
---|
1167 | "add $24, %0 \n\t"\ |
---|
1168 | "add %3, %1 \n\t"\ |
---|
1169 | "decl %2 \n\t"\ |
---|
1170 | " jnz 1b \n\t"\ |
---|
1171 | : "+a"(tmp), "+c"(dst), "+g"(h)\ |
---|
1172 | : "S"((x86_reg)dstStride)\ |
---|
1173 | : "memory"\ |
---|
1174 | );\ |
---|
1175 | }\ |
---|
1176 | \ |
---|
1177 | static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1178 | int h=8;\ |
---|
1179 | __asm__ volatile(\ |
---|
1180 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1181 | "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\ |
---|
1182 | "1: \n\t"\ |
---|
1183 | "movq (%0), %%mm0 \n\t"\ |
---|
1184 | "movq 1(%0), %%mm2 \n\t"\ |
---|
1185 | "movq %%mm0, %%mm1 \n\t"\ |
---|
1186 | "movq %%mm2, %%mm3 \n\t"\ |
---|
1187 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1188 | "punpckhbw %%mm7, %%mm1 \n\t"\ |
---|
1189 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1190 | "punpckhbw %%mm7, %%mm3 \n\t"\ |
---|
1191 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1192 | "paddw %%mm3, %%mm1 \n\t"\ |
---|
1193 | "psllw $2, %%mm0 \n\t"\ |
---|
1194 | "psllw $2, %%mm1 \n\t"\ |
---|
1195 | "movq -1(%0), %%mm2 \n\t"\ |
---|
1196 | "movq 2(%0), %%mm4 \n\t"\ |
---|
1197 | "movq %%mm2, %%mm3 \n\t"\ |
---|
1198 | "movq %%mm4, %%mm5 \n\t"\ |
---|
1199 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1200 | "punpckhbw %%mm7, %%mm3 \n\t"\ |
---|
1201 | "punpcklbw %%mm7, %%mm4 \n\t"\ |
---|
1202 | "punpckhbw %%mm7, %%mm5 \n\t"\ |
---|
1203 | "paddw %%mm4, %%mm2 \n\t"\ |
---|
1204 | "paddw %%mm3, %%mm5 \n\t"\ |
---|
1205 | "psubw %%mm2, %%mm0 \n\t"\ |
---|
1206 | "psubw %%mm5, %%mm1 \n\t"\ |
---|
1207 | "pmullw %%mm6, %%mm0 \n\t"\ |
---|
1208 | "pmullw %%mm6, %%mm1 \n\t"\ |
---|
1209 | "movd -2(%0), %%mm2 \n\t"\ |
---|
1210 | "movd 7(%0), %%mm5 \n\t"\ |
---|
1211 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1212 | "punpcklbw %%mm7, %%mm5 \n\t"\ |
---|
1213 | "paddw %%mm3, %%mm2 \n\t"\ |
---|
1214 | "paddw %%mm5, %%mm4 \n\t"\ |
---|
1215 | "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\ |
---|
1216 | "paddw %%mm5, %%mm2 \n\t"\ |
---|
1217 | "paddw %%mm5, %%mm4 \n\t"\ |
---|
1218 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1219 | "paddw %%mm4, %%mm1 \n\t"\ |
---|
1220 | "psraw $5, %%mm0 \n\t"\ |
---|
1221 | "psraw $5, %%mm1 \n\t"\ |
---|
1222 | "packuswb %%mm1, %%mm0 \n\t"\ |
---|
1223 | OP(%%mm0, (%1),%%mm5, q)\ |
---|
1224 | "add %3, %0 \n\t"\ |
---|
1225 | "add %4, %1 \n\t"\ |
---|
1226 | "decl %2 \n\t"\ |
---|
1227 | " jnz 1b \n\t"\ |
---|
1228 | : "+a"(src), "+c"(dst), "+g"(h)\ |
---|
1229 | : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ |
---|
1230 | : "memory"\ |
---|
1231 | );\ |
---|
1232 | }\ |
---|
1233 | \ |
---|
1234 | static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
---|
1235 | int h=8;\ |
---|
1236 | __asm__ volatile(\ |
---|
1237 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1238 | "movq %0, %%mm6 \n\t"\ |
---|
1239 | :: "m"(ff_pw_5)\ |
---|
1240 | );\ |
---|
1241 | do{\ |
---|
1242 | __asm__ volatile(\ |
---|
1243 | "movq (%0), %%mm0 \n\t"\ |
---|
1244 | "movq 1(%0), %%mm2 \n\t"\ |
---|
1245 | "movq %%mm0, %%mm1 \n\t"\ |
---|
1246 | "movq %%mm2, %%mm3 \n\t"\ |
---|
1247 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1248 | "punpckhbw %%mm7, %%mm1 \n\t"\ |
---|
1249 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1250 | "punpckhbw %%mm7, %%mm3 \n\t"\ |
---|
1251 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1252 | "paddw %%mm3, %%mm1 \n\t"\ |
---|
1253 | "psllw $2, %%mm0 \n\t"\ |
---|
1254 | "psllw $2, %%mm1 \n\t"\ |
---|
1255 | "movq -1(%0), %%mm2 \n\t"\ |
---|
1256 | "movq 2(%0), %%mm4 \n\t"\ |
---|
1257 | "movq %%mm2, %%mm3 \n\t"\ |
---|
1258 | "movq %%mm4, %%mm5 \n\t"\ |
---|
1259 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1260 | "punpckhbw %%mm7, %%mm3 \n\t"\ |
---|
1261 | "punpcklbw %%mm7, %%mm4 \n\t"\ |
---|
1262 | "punpckhbw %%mm7, %%mm5 \n\t"\ |
---|
1263 | "paddw %%mm4, %%mm2 \n\t"\ |
---|
1264 | "paddw %%mm3, %%mm5 \n\t"\ |
---|
1265 | "psubw %%mm2, %%mm0 \n\t"\ |
---|
1266 | "psubw %%mm5, %%mm1 \n\t"\ |
---|
1267 | "pmullw %%mm6, %%mm0 \n\t"\ |
---|
1268 | "pmullw %%mm6, %%mm1 \n\t"\ |
---|
1269 | "movd -2(%0), %%mm2 \n\t"\ |
---|
1270 | "movd 7(%0), %%mm5 \n\t"\ |
---|
1271 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1272 | "punpcklbw %%mm7, %%mm5 \n\t"\ |
---|
1273 | "paddw %%mm3, %%mm2 \n\t"\ |
---|
1274 | "paddw %%mm5, %%mm4 \n\t"\ |
---|
1275 | "movq %5, %%mm5 \n\t"\ |
---|
1276 | "paddw %%mm5, %%mm2 \n\t"\ |
---|
1277 | "paddw %%mm5, %%mm4 \n\t"\ |
---|
1278 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1279 | "paddw %%mm4, %%mm1 \n\t"\ |
---|
1280 | "psraw $5, %%mm0 \n\t"\ |
---|
1281 | "psraw $5, %%mm1 \n\t"\ |
---|
1282 | "movq (%2), %%mm4 \n\t"\ |
---|
1283 | "packuswb %%mm1, %%mm0 \n\t"\ |
---|
1284 | PAVGB" %%mm4, %%mm0 \n\t"\ |
---|
1285 | OP(%%mm0, (%1),%%mm5, q)\ |
---|
1286 | "add %4, %0 \n\t"\ |
---|
1287 | "add %4, %1 \n\t"\ |
---|
1288 | "add %3, %2 \n\t"\ |
---|
1289 | : "+a"(src), "+c"(dst), "+d"(src2)\ |
---|
1290 | : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ |
---|
1291 | "m"(ff_pw_16)\ |
---|
1292 | : "memory"\ |
---|
1293 | );\ |
---|
1294 | }while(--h);\ |
---|
1295 | }\ |
---|
1296 | \ |
---|
1297 | static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
---|
1298 | int w= 2;\ |
---|
1299 | src -= 2*srcStride;\ |
---|
1300 | \ |
---|
1301 | while(w--){\ |
---|
1302 | __asm__ volatile(\ |
---|
1303 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1304 | "movd (%0), %%mm0 \n\t"\ |
---|
1305 | "add %2, %0 \n\t"\ |
---|
1306 | "movd (%0), %%mm1 \n\t"\ |
---|
1307 | "add %2, %0 \n\t"\ |
---|
1308 | "movd (%0), %%mm2 \n\t"\ |
---|
1309 | "add %2, %0 \n\t"\ |
---|
1310 | "movd (%0), %%mm3 \n\t"\ |
---|
1311 | "add %2, %0 \n\t"\ |
---|
1312 | "movd (%0), %%mm4 \n\t"\ |
---|
1313 | "add %2, %0 \n\t"\ |
---|
1314 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1315 | "punpcklbw %%mm7, %%mm1 \n\t"\ |
---|
1316 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1317 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1318 | "punpcklbw %%mm7, %%mm4 \n\t"\ |
---|
1319 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
---|
1320 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ |
---|
1321 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ |
---|
1322 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ |
---|
1323 | QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ |
---|
1324 | QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ |
---|
1325 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
---|
1326 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ |
---|
1327 | \ |
---|
1328 | : "+a"(src), "+c"(dst)\ |
---|
1329 | : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1330 | : "memory"\ |
---|
1331 | );\ |
---|
1332 | if(h==16){\ |
---|
1333 | __asm__ volatile(\ |
---|
1334 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ |
---|
1335 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ |
---|
1336 | QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ |
---|
1337 | QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ |
---|
1338 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
---|
1339 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ |
---|
1340 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ |
---|
1341 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ |
---|
1342 | \ |
---|
1343 | : "+a"(src), "+c"(dst)\ |
---|
1344 | : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1345 | : "memory"\ |
---|
1346 | );\ |
---|
1347 | }\ |
---|
1348 | src += 4-(h+5)*srcStride;\ |
---|
1349 | dst += 4-h*dstStride;\ |
---|
1350 | }\ |
---|
1351 | }\ |
---|
1352 | static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\ |
---|
1353 | int w = (size+8)>>2;\ |
---|
1354 | src -= 2*srcStride+2;\ |
---|
1355 | while(w--){\ |
---|
1356 | __asm__ volatile(\ |
---|
1357 | "pxor %%mm7, %%mm7 \n\t"\ |
---|
1358 | "movd (%0), %%mm0 \n\t"\ |
---|
1359 | "add %2, %0 \n\t"\ |
---|
1360 | "movd (%0), %%mm1 \n\t"\ |
---|
1361 | "add %2, %0 \n\t"\ |
---|
1362 | "movd (%0), %%mm2 \n\t"\ |
---|
1363 | "add %2, %0 \n\t"\ |
---|
1364 | "movd (%0), %%mm3 \n\t"\ |
---|
1365 | "add %2, %0 \n\t"\ |
---|
1366 | "movd (%0), %%mm4 \n\t"\ |
---|
1367 | "add %2, %0 \n\t"\ |
---|
1368 | "punpcklbw %%mm7, %%mm0 \n\t"\ |
---|
1369 | "punpcklbw %%mm7, %%mm1 \n\t"\ |
---|
1370 | "punpcklbw %%mm7, %%mm2 \n\t"\ |
---|
1371 | "punpcklbw %%mm7, %%mm3 \n\t"\ |
---|
1372 | "punpcklbw %%mm7, %%mm4 \n\t"\ |
---|
1373 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\ |
---|
1374 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\ |
---|
1375 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\ |
---|
1376 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\ |
---|
1377 | QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\ |
---|
1378 | QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\ |
---|
1379 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\ |
---|
1380 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\ |
---|
1381 | : "+a"(src)\ |
---|
1382 | : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1383 | : "memory"\ |
---|
1384 | );\ |
---|
1385 | if(size==16){\ |
---|
1386 | __asm__ volatile(\ |
---|
1387 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ |
---|
1388 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ |
---|
1389 | QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ |
---|
1390 | QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\ |
---|
1391 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\ |
---|
1392 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\ |
---|
1393 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\ |
---|
1394 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\ |
---|
1395 | : "+a"(src)\ |
---|
1396 | : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1397 | : "memory"\ |
---|
1398 | );\ |
---|
1399 | }\ |
---|
1400 | tmp += 4;\ |
---|
1401 | src += 4 - (size+5)*srcStride;\ |
---|
1402 | }\ |
---|
1403 | }\ |
---|
1404 | static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ |
---|
1405 | int w = size>>4;\ |
---|
1406 | do{\ |
---|
1407 | int h = size;\ |
---|
1408 | __asm__ volatile(\ |
---|
1409 | "1: \n\t"\ |
---|
1410 | "movq (%0), %%mm0 \n\t"\ |
---|
1411 | "movq 8(%0), %%mm3 \n\t"\ |
---|
1412 | "movq 2(%0), %%mm1 \n\t"\ |
---|
1413 | "movq 10(%0), %%mm4 \n\t"\ |
---|
1414 | "paddw %%mm4, %%mm0 \n\t"\ |
---|
1415 | "paddw %%mm3, %%mm1 \n\t"\ |
---|
1416 | "paddw 18(%0), %%mm3 \n\t"\ |
---|
1417 | "paddw 16(%0), %%mm4 \n\t"\ |
---|
1418 | "movq 4(%0), %%mm2 \n\t"\ |
---|
1419 | "movq 12(%0), %%mm5 \n\t"\ |
---|
1420 | "paddw 6(%0), %%mm2 \n\t"\ |
---|
1421 | "paddw 14(%0), %%mm5 \n\t"\ |
---|
1422 | "psubw %%mm1, %%mm0 \n\t"\ |
---|
1423 | "psubw %%mm4, %%mm3 \n\t"\ |
---|
1424 | "psraw $2, %%mm0 \n\t"\ |
---|
1425 | "psraw $2, %%mm3 \n\t"\ |
---|
1426 | "psubw %%mm1, %%mm0 \n\t"\ |
---|
1427 | "psubw %%mm4, %%mm3 \n\t"\ |
---|
1428 | "paddsw %%mm2, %%mm0 \n\t"\ |
---|
1429 | "paddsw %%mm5, %%mm3 \n\t"\ |
---|
1430 | "psraw $2, %%mm0 \n\t"\ |
---|
1431 | "psraw $2, %%mm3 \n\t"\ |
---|
1432 | "paddw %%mm2, %%mm0 \n\t"\ |
---|
1433 | "paddw %%mm5, %%mm3 \n\t"\ |
---|
1434 | "psraw $6, %%mm0 \n\t"\ |
---|
1435 | "psraw $6, %%mm3 \n\t"\ |
---|
1436 | "packuswb %%mm3, %%mm0 \n\t"\ |
---|
1437 | OP(%%mm0, (%1),%%mm7, q)\ |
---|
1438 | "add $48, %0 \n\t"\ |
---|
1439 | "add %3, %1 \n\t"\ |
---|
1440 | "decl %2 \n\t"\ |
---|
1441 | " jnz 1b \n\t"\ |
---|
1442 | : "+a"(tmp), "+c"(dst), "+g"(h)\ |
---|
1443 | : "S"((x86_reg)dstStride)\ |
---|
1444 | : "memory"\ |
---|
1445 | );\ |
---|
1446 | tmp += 8 - size*24;\ |
---|
1447 | dst += 8 - size*dstStride;\ |
---|
1448 | }while(w--);\ |
---|
1449 | }\ |
---|
1450 | \ |
---|
1451 | static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1452 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ |
---|
1453 | }\ |
---|
1454 | static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1455 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ |
---|
1456 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ |
---|
1457 | }\ |
---|
1458 | \ |
---|
1459 | static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1460 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ |
---|
1461 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ |
---|
1462 | src += 8*srcStride;\ |
---|
1463 | dst += 8*dstStride;\ |
---|
1464 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ |
---|
1465 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ |
---|
1466 | }\ |
---|
1467 | \ |
---|
1468 | static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
---|
1469 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ |
---|
1470 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ |
---|
1471 | src += 8*dstStride;\ |
---|
1472 | dst += 8*dstStride;\ |
---|
1473 | src2 += 8*src2Stride;\ |
---|
1474 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ |
---|
1475 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ |
---|
1476 | }\ |
---|
1477 | \ |
---|
1478 | static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ |
---|
1479 | put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\ |
---|
1480 | OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ |
---|
1481 | }\ |
---|
1482 | static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
---|
1483 | OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\ |
---|
1484 | }\ |
---|
1485 | \ |
---|
1486 | static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
---|
1487 | OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\ |
---|
1488 | }\ |
---|
1489 | \ |
---|
1490 | static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ |
---|
1491 | {\ |
---|
1492 | __asm__ volatile(\ |
---|
1493 | "movq (%1), %%mm0 \n\t"\ |
---|
1494 | "movq 24(%1), %%mm1 \n\t"\ |
---|
1495 | "psraw $5, %%mm0 \n\t"\ |
---|
1496 | "psraw $5, %%mm1 \n\t"\ |
---|
1497 | "packuswb %%mm0, %%mm0 \n\t"\ |
---|
1498 | "packuswb %%mm1, %%mm1 \n\t"\ |
---|
1499 | PAVGB" (%0), %%mm0 \n\t"\ |
---|
1500 | PAVGB" (%0,%3), %%mm1 \n\t"\ |
---|
1501 | OP(%%mm0, (%2), %%mm4, d)\ |
---|
1502 | OP(%%mm1, (%2,%4), %%mm5, d)\ |
---|
1503 | "lea (%0,%3,2), %0 \n\t"\ |
---|
1504 | "lea (%2,%4,2), %2 \n\t"\ |
---|
1505 | "movq 48(%1), %%mm0 \n\t"\ |
---|
1506 | "movq 72(%1), %%mm1 \n\t"\ |
---|
1507 | "psraw $5, %%mm0 \n\t"\ |
---|
1508 | "psraw $5, %%mm1 \n\t"\ |
---|
1509 | "packuswb %%mm0, %%mm0 \n\t"\ |
---|
1510 | "packuswb %%mm1, %%mm1 \n\t"\ |
---|
1511 | PAVGB" (%0), %%mm0 \n\t"\ |
---|
1512 | PAVGB" (%0,%3), %%mm1 \n\t"\ |
---|
1513 | OP(%%mm0, (%2), %%mm4, d)\ |
---|
1514 | OP(%%mm1, (%2,%4), %%mm5, d)\ |
---|
1515 | :"+a"(src8), "+c"(src16), "+d"(dst)\ |
---|
1516 | :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\ |
---|
1517 | :"memory");\ |
---|
1518 | }\ |
---|
1519 | static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ |
---|
1520 | {\ |
---|
1521 | do{\ |
---|
1522 | __asm__ volatile(\ |
---|
1523 | "movq (%1), %%mm0 \n\t"\ |
---|
1524 | "movq 8(%1), %%mm1 \n\t"\ |
---|
1525 | "movq 48(%1), %%mm2 \n\t"\ |
---|
1526 | "movq 8+48(%1), %%mm3 \n\t"\ |
---|
1527 | "psraw $5, %%mm0 \n\t"\ |
---|
1528 | "psraw $5, %%mm1 \n\t"\ |
---|
1529 | "psraw $5, %%mm2 \n\t"\ |
---|
1530 | "psraw $5, %%mm3 \n\t"\ |
---|
1531 | "packuswb %%mm1, %%mm0 \n\t"\ |
---|
1532 | "packuswb %%mm3, %%mm2 \n\t"\ |
---|
1533 | PAVGB" (%0), %%mm0 \n\t"\ |
---|
1534 | PAVGB" (%0,%3), %%mm2 \n\t"\ |
---|
1535 | OP(%%mm0, (%2), %%mm5, q)\ |
---|
1536 | OP(%%mm2, (%2,%4), %%mm5, q)\ |
---|
1537 | ::"a"(src8), "c"(src16), "d"(dst),\ |
---|
1538 | "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\ |
---|
1539 | :"memory");\ |
---|
1540 | src8 += 2L*src8Stride;\ |
---|
1541 | src16 += 48;\ |
---|
1542 | dst += 2L*dstStride;\ |
---|
1543 | }while(h-=2);\ |
---|
1544 | }\ |
---|
1545 | static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ |
---|
1546 | {\ |
---|
1547 | OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ |
---|
1548 | OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ |
---|
1549 | }\ |
---|
1550 | |
---|
1551 | |
---|
1552 | #if ARCH_X86_64 |
---|
1553 | #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ |
---|
1554 | static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
---|
1555 | int h=16;\ |
---|
1556 | __asm__ volatile(\ |
---|
1557 | "pxor %%xmm15, %%xmm15 \n\t"\ |
---|
1558 | "movdqa %6, %%xmm14 \n\t"\ |
---|
1559 | "movdqa %7, %%xmm13 \n\t"\ |
---|
1560 | "1: \n\t"\ |
---|
1561 | "lddqu 6(%0), %%xmm1 \n\t"\ |
---|
1562 | "lddqu -2(%0), %%xmm7 \n\t"\ |
---|
1563 | "movdqa %%xmm1, %%xmm0 \n\t"\ |
---|
1564 | "punpckhbw %%xmm15, %%xmm1 \n\t"\ |
---|
1565 | "punpcklbw %%xmm15, %%xmm0 \n\t"\ |
---|
1566 | "punpcklbw %%xmm15, %%xmm7 \n\t"\ |
---|
1567 | "movdqa %%xmm1, %%xmm2 \n\t"\ |
---|
1568 | "movdqa %%xmm0, %%xmm6 \n\t"\ |
---|
1569 | "movdqa %%xmm1, %%xmm3 \n\t"\ |
---|
1570 | "movdqa %%xmm0, %%xmm8 \n\t"\ |
---|
1571 | "movdqa %%xmm1, %%xmm4 \n\t"\ |
---|
1572 | "movdqa %%xmm0, %%xmm9 \n\t"\ |
---|
1573 | "movdqa %%xmm0, %%xmm12 \n\t"\ |
---|
1574 | "movdqa %%xmm1, %%xmm11 \n\t"\ |
---|
1575 | "palignr $10,%%xmm0, %%xmm11\n\t"\ |
---|
1576 | "palignr $10,%%xmm7, %%xmm12\n\t"\ |
---|
1577 | "palignr $2, %%xmm0, %%xmm4 \n\t"\ |
---|
1578 | "palignr $2, %%xmm7, %%xmm9 \n\t"\ |
---|
1579 | "palignr $4, %%xmm0, %%xmm3 \n\t"\ |
---|
1580 | "palignr $4, %%xmm7, %%xmm8 \n\t"\ |
---|
1581 | "palignr $6, %%xmm0, %%xmm2 \n\t"\ |
---|
1582 | "palignr $6, %%xmm7, %%xmm6 \n\t"\ |
---|
1583 | "paddw %%xmm0 ,%%xmm11 \n\t"\ |
---|
1584 | "palignr $8, %%xmm0, %%xmm1 \n\t"\ |
---|
1585 | "palignr $8, %%xmm7, %%xmm0 \n\t"\ |
---|
1586 | "paddw %%xmm12,%%xmm7 \n\t"\ |
---|
1587 | "paddw %%xmm3, %%xmm2 \n\t"\ |
---|
1588 | "paddw %%xmm8, %%xmm6 \n\t"\ |
---|
1589 | "paddw %%xmm4, %%xmm1 \n\t"\ |
---|
1590 | "paddw %%xmm9, %%xmm0 \n\t"\ |
---|
1591 | "psllw $2, %%xmm2 \n\t"\ |
---|
1592 | "psllw $2, %%xmm6 \n\t"\ |
---|
1593 | "psubw %%xmm1, %%xmm2 \n\t"\ |
---|
1594 | "psubw %%xmm0, %%xmm6 \n\t"\ |
---|
1595 | "paddw %%xmm13,%%xmm11 \n\t"\ |
---|
1596 | "paddw %%xmm13,%%xmm7 \n\t"\ |
---|
1597 | "pmullw %%xmm14,%%xmm2 \n\t"\ |
---|
1598 | "pmullw %%xmm14,%%xmm6 \n\t"\ |
---|
1599 | "lddqu (%2), %%xmm3 \n\t"\ |
---|
1600 | "paddw %%xmm11,%%xmm2 \n\t"\ |
---|
1601 | "paddw %%xmm7, %%xmm6 \n\t"\ |
---|
1602 | "psraw $5, %%xmm2 \n\t"\ |
---|
1603 | "psraw $5, %%xmm6 \n\t"\ |
---|
1604 | "packuswb %%xmm2,%%xmm6 \n\t"\ |
---|
1605 | "pavgb %%xmm3, %%xmm6 \n\t"\ |
---|
1606 | OP(%%xmm6, (%1), %%xmm4, dqa)\ |
---|
1607 | "add %5, %0 \n\t"\ |
---|
1608 | "add %5, %1 \n\t"\ |
---|
1609 | "add %4, %2 \n\t"\ |
---|
1610 | "decl %3 \n\t"\ |
---|
1611 | "jg 1b \n\t"\ |
---|
1612 | : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\ |
---|
1613 | : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ |
---|
1614 | "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1615 | : "memory"\ |
---|
1616 | );\ |
---|
1617 | } |
---|
1618 | #else // ARCH_X86_64 |
---|
1619 | #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ |
---|
1620 | static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
---|
1621 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ |
---|
1622 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ |
---|
1623 | src += 8*dstStride;\ |
---|
1624 | dst += 8*dstStride;\ |
---|
1625 | src2 += 8*src2Stride;\ |
---|
1626 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ |
---|
1627 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ |
---|
1628 | } |
---|
1629 | #endif // ARCH_X86_64 |
---|
1630 | |
---|
1631 | #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\ |
---|
1632 | static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
---|
1633 | int h=8;\ |
---|
1634 | __asm__ volatile(\ |
---|
1635 | "pxor %%xmm7, %%xmm7 \n\t"\ |
---|
1636 | "movdqa %0, %%xmm6 \n\t"\ |
---|
1637 | :: "m"(ff_pw_5)\ |
---|
1638 | );\ |
---|
1639 | do{\ |
---|
1640 | __asm__ volatile(\ |
---|
1641 | "lddqu -2(%0), %%xmm1 \n\t"\ |
---|
1642 | "movdqa %%xmm1, %%xmm0 \n\t"\ |
---|
1643 | "punpckhbw %%xmm7, %%xmm1 \n\t"\ |
---|
1644 | "punpcklbw %%xmm7, %%xmm0 \n\t"\ |
---|
1645 | "movdqa %%xmm1, %%xmm2 \n\t"\ |
---|
1646 | "movdqa %%xmm1, %%xmm3 \n\t"\ |
---|
1647 | "movdqa %%xmm1, %%xmm4 \n\t"\ |
---|
1648 | "movdqa %%xmm1, %%xmm5 \n\t"\ |
---|
1649 | "palignr $2, %%xmm0, %%xmm4 \n\t"\ |
---|
1650 | "palignr $4, %%xmm0, %%xmm3 \n\t"\ |
---|
1651 | "palignr $6, %%xmm0, %%xmm2 \n\t"\ |
---|
1652 | "palignr $8, %%xmm0, %%xmm1 \n\t"\ |
---|
1653 | "palignr $10,%%xmm0, %%xmm5 \n\t"\ |
---|
1654 | "paddw %%xmm5, %%xmm0 \n\t"\ |
---|
1655 | "paddw %%xmm3, %%xmm2 \n\t"\ |
---|
1656 | "paddw %%xmm4, %%xmm1 \n\t"\ |
---|
1657 | "psllw $2, %%xmm2 \n\t"\ |
---|
1658 | "movq (%2), %%xmm3 \n\t"\ |
---|
1659 | "psubw %%xmm1, %%xmm2 \n\t"\ |
---|
1660 | "paddw %5, %%xmm0 \n\t"\ |
---|
1661 | "pmullw %%xmm6, %%xmm2 \n\t"\ |
---|
1662 | "paddw %%xmm0, %%xmm2 \n\t"\ |
---|
1663 | "psraw $5, %%xmm2 \n\t"\ |
---|
1664 | "packuswb %%xmm2, %%xmm2 \n\t"\ |
---|
1665 | "pavgb %%xmm3, %%xmm2 \n\t"\ |
---|
1666 | OP(%%xmm2, (%1), %%xmm4, q)\ |
---|
1667 | "add %4, %0 \n\t"\ |
---|
1668 | "add %4, %1 \n\t"\ |
---|
1669 | "add %3, %2 \n\t"\ |
---|
1670 | : "+a"(src), "+c"(dst), "+d"(src2)\ |
---|
1671 | : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ |
---|
1672 | "m"(ff_pw_16)\ |
---|
1673 | : "memory"\ |
---|
1674 | );\ |
---|
1675 | }while(--h);\ |
---|
1676 | }\ |
---|
1677 | QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ |
---|
1678 | \ |
---|
1679 | static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1680 | int h=8;\ |
---|
1681 | __asm__ volatile(\ |
---|
1682 | "pxor %%xmm7, %%xmm7 \n\t"\ |
---|
1683 | "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\ |
---|
1684 | "1: \n\t"\ |
---|
1685 | "lddqu -2(%0), %%xmm1 \n\t"\ |
---|
1686 | "movdqa %%xmm1, %%xmm0 \n\t"\ |
---|
1687 | "punpckhbw %%xmm7, %%xmm1 \n\t"\ |
---|
1688 | "punpcklbw %%xmm7, %%xmm0 \n\t"\ |
---|
1689 | "movdqa %%xmm1, %%xmm2 \n\t"\ |
---|
1690 | "movdqa %%xmm1, %%xmm3 \n\t"\ |
---|
1691 | "movdqa %%xmm1, %%xmm4 \n\t"\ |
---|
1692 | "movdqa %%xmm1, %%xmm5 \n\t"\ |
---|
1693 | "palignr $2, %%xmm0, %%xmm4 \n\t"\ |
---|
1694 | "palignr $4, %%xmm0, %%xmm3 \n\t"\ |
---|
1695 | "palignr $6, %%xmm0, %%xmm2 \n\t"\ |
---|
1696 | "palignr $8, %%xmm0, %%xmm1 \n\t"\ |
---|
1697 | "palignr $10,%%xmm0, %%xmm5 \n\t"\ |
---|
1698 | "paddw %%xmm5, %%xmm0 \n\t"\ |
---|
1699 | "paddw %%xmm3, %%xmm2 \n\t"\ |
---|
1700 | "paddw %%xmm4, %%xmm1 \n\t"\ |
---|
1701 | "psllw $2, %%xmm2 \n\t"\ |
---|
1702 | "psubw %%xmm1, %%xmm2 \n\t"\ |
---|
1703 | "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\ |
---|
1704 | "pmullw %%xmm6, %%xmm2 \n\t"\ |
---|
1705 | "paddw %%xmm0, %%xmm2 \n\t"\ |
---|
1706 | "psraw $5, %%xmm2 \n\t"\ |
---|
1707 | "packuswb %%xmm2, %%xmm2 \n\t"\ |
---|
1708 | OP(%%xmm2, (%1), %%xmm4, q)\ |
---|
1709 | "add %3, %0 \n\t"\ |
---|
1710 | "add %4, %1 \n\t"\ |
---|
1711 | "decl %2 \n\t"\ |
---|
1712 | " jnz 1b \n\t"\ |
---|
1713 | : "+a"(src), "+c"(dst), "+g"(h)\ |
---|
1714 | : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ |
---|
1715 | : "memory"\ |
---|
1716 | );\ |
---|
1717 | }\ |
---|
1718 | static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1719 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ |
---|
1720 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ |
---|
1721 | src += 8*srcStride;\ |
---|
1722 | dst += 8*dstStride;\ |
---|
1723 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ |
---|
1724 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ |
---|
1725 | }\ |
---|
1726 | |
---|
1727 | #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\ |
---|
1728 | static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
---|
1729 | src -= 2*srcStride;\ |
---|
1730 | \ |
---|
1731 | __asm__ volatile(\ |
---|
1732 | "pxor %%xmm7, %%xmm7 \n\t"\ |
---|
1733 | "movq (%0), %%xmm0 \n\t"\ |
---|
1734 | "add %2, %0 \n\t"\ |
---|
1735 | "movq (%0), %%xmm1 \n\t"\ |
---|
1736 | "add %2, %0 \n\t"\ |
---|
1737 | "movq (%0), %%xmm2 \n\t"\ |
---|
1738 | "add %2, %0 \n\t"\ |
---|
1739 | "movq (%0), %%xmm3 \n\t"\ |
---|
1740 | "add %2, %0 \n\t"\ |
---|
1741 | "movq (%0), %%xmm4 \n\t"\ |
---|
1742 | "add %2, %0 \n\t"\ |
---|
1743 | "punpcklbw %%xmm7, %%xmm0 \n\t"\ |
---|
1744 | "punpcklbw %%xmm7, %%xmm1 \n\t"\ |
---|
1745 | "punpcklbw %%xmm7, %%xmm2 \n\t"\ |
---|
1746 | "punpcklbw %%xmm7, %%xmm3 \n\t"\ |
---|
1747 | "punpcklbw %%xmm7, %%xmm4 \n\t"\ |
---|
1748 | QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ |
---|
1749 | QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ |
---|
1750 | QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ |
---|
1751 | QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ |
---|
1752 | QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ |
---|
1753 | QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\ |
---|
1754 | QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ |
---|
1755 | QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ |
---|
1756 | \ |
---|
1757 | : "+a"(src), "+c"(dst)\ |
---|
1758 | : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1759 | : "memory"\ |
---|
1760 | );\ |
---|
1761 | if(h==16){\ |
---|
1762 | __asm__ volatile(\ |
---|
1763 | QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ |
---|
1764 | QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ |
---|
1765 | QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ |
---|
1766 | QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\ |
---|
1767 | QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ |
---|
1768 | QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ |
---|
1769 | QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ |
---|
1770 | QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ |
---|
1771 | \ |
---|
1772 | : "+a"(src), "+c"(dst)\ |
---|
1773 | : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ |
---|
1774 | : "memory"\ |
---|
1775 | );\ |
---|
1776 | }\ |
---|
1777 | }\ |
---|
1778 | static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1779 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ |
---|
1780 | }\ |
---|
1781 | static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
---|
1782 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ |
---|
1783 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ |
---|
1784 | } |
---|
1785 | |
---|
1786 | static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){ |
---|
1787 | int w = (size+8)>>3; |
---|
1788 | src -= 2*srcStride+2; |
---|
1789 | while(w--){ |
---|
1790 | __asm__ volatile( |
---|
1791 | "pxor %%xmm7, %%xmm7 \n\t" |
---|
1792 | "movq (%0), %%xmm0 \n\t" |
---|
1793 | "add %2, %0 \n\t" |
---|
1794 | "movq (%0), %%xmm1 \n\t" |
---|
1795 | "add %2, %0 \n\t" |
---|
1796 | "movq (%0), %%xmm2 \n\t" |
---|
1797 | "add %2, %0 \n\t" |
---|
1798 | "movq (%0), %%xmm3 \n\t" |
---|
1799 | "add %2, %0 \n\t" |
---|
1800 | "movq (%0), %%xmm4 \n\t" |
---|
1801 | "add %2, %0 \n\t" |
---|
1802 | "punpcklbw %%xmm7, %%xmm0 \n\t" |
---|
1803 | "punpcklbw %%xmm7, %%xmm1 \n\t" |
---|
1804 | "punpcklbw %%xmm7, %%xmm2 \n\t" |
---|
1805 | "punpcklbw %%xmm7, %%xmm3 \n\t" |
---|
1806 | "punpcklbw %%xmm7, %%xmm4 \n\t" |
---|
1807 | QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48) |
---|
1808 | QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48) |
---|
1809 | QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48) |
---|
1810 | QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48) |
---|
1811 | QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48) |
---|
1812 | QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48) |
---|
1813 | QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48) |
---|
1814 | QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48) |
---|
1815 | : "+a"(src) |
---|
1816 | : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) |
---|
1817 | : "memory" |
---|
1818 | ); |
---|
1819 | if(size==16){ |
---|
1820 | __asm__ volatile( |
---|
1821 | QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48) |
---|
1822 | QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48) |
---|
1823 | QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48) |
---|
1824 | QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48) |
---|
1825 | QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48) |
---|
1826 | QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48) |
---|
1827 | QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48) |
---|
1828 | QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48) |
---|
1829 | : "+a"(src) |
---|
1830 | : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) |
---|
1831 | : "memory" |
---|
1832 | ); |
---|
1833 | } |
---|
1834 | tmp += 8; |
---|
1835 | src += 8 - (size+5)*srcStride; |
---|
1836 | } |
---|
1837 | } |
---|
1838 | |
---|
1839 | #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\ |
---|
1840 | static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ |
---|
1841 | int h = size;\ |
---|
1842 | if(size == 16){\ |
---|
1843 | __asm__ volatile(\ |
---|
1844 | "1: \n\t"\ |
---|
1845 | "movdqa 32(%0), %%xmm4 \n\t"\ |
---|
1846 | "movdqa 16(%0), %%xmm5 \n\t"\ |
---|
1847 | "movdqa (%0), %%xmm7 \n\t"\ |
---|
1848 | "movdqa %%xmm4, %%xmm3 \n\t"\ |
---|
1849 | "movdqa %%xmm4, %%xmm2 \n\t"\ |
---|
1850 | "movdqa %%xmm4, %%xmm1 \n\t"\ |
---|
1851 | "movdqa %%xmm4, %%xmm0 \n\t"\ |
---|
1852 | "palignr $10, %%xmm5, %%xmm0 \n\t"\ |
---|
1853 | "palignr $8, %%xmm5, %%xmm1 \n\t"\ |
---|
1854 | "palignr $6, %%xmm5, %%xmm2 \n\t"\ |
---|
1855 | "palignr $4, %%xmm5, %%xmm3 \n\t"\ |
---|
1856 | "palignr $2, %%xmm5, %%xmm4 \n\t"\ |
---|
1857 | "paddw %%xmm5, %%xmm0 \n\t"\ |
---|
1858 | "paddw %%xmm4, %%xmm1 \n\t"\ |
---|
1859 | "paddw %%xmm3, %%xmm2 \n\t"\ |
---|
1860 | "movdqa %%xmm5, %%xmm6 \n\t"\ |
---|
1861 | "movdqa %%xmm5, %%xmm4 \n\t"\ |
---|
1862 | "movdqa %%xmm5, %%xmm3 \n\t"\ |
---|
1863 | "palignr $8, %%xmm7, %%xmm4 \n\t"\ |
---|
1864 | "palignr $2, %%xmm7, %%xmm6 \n\t"\ |
---|
1865 | "palignr $10, %%xmm7, %%xmm3 \n\t"\ |
---|
1866 | "paddw %%xmm6, %%xmm4 \n\t"\ |
---|
1867 | "movdqa %%xmm5, %%xmm6 \n\t"\ |
---|
1868 | "palignr $6, %%xmm7, %%xmm5 \n\t"\ |
---|
1869 | "palignr $4, %%xmm7, %%xmm6 \n\t"\ |
---|
1870 | "paddw %%xmm7, %%xmm3 \n\t"\ |
---|
1871 | "paddw %%xmm6, %%xmm5 \n\t"\ |
---|
1872 | \ |
---|
1873 | "psubw %%xmm1, %%xmm0 \n\t"\ |
---|
1874 | "psubw %%xmm4, %%xmm3 \n\t"\ |
---|
1875 | "psraw $2, %%xmm0 \n\t"\ |
---|
1876 | "psraw $2, %%xmm3 \n\t"\ |
---|
1877 | "psubw %%xmm1, %%xmm0 \n\t"\ |
---|
1878 | "psubw %%xmm4, %%xmm3 \n\t"\ |
---|
1879 | "paddw %%xmm2, %%xmm0 \n\t"\ |
---|
1880 | "paddw %%xmm5, %%xmm3 \n\t"\ |
---|
1881 | "psraw $2, %%xmm0 \n\t"\ |
---|
1882 | "psraw $2, %%xmm3 \n\t"\ |
---|
1883 | "paddw %%xmm2, %%xmm0 \n\t"\ |
---|
1884 | "paddw %%xmm5, %%xmm3 \n\t"\ |
---|
1885 | "psraw $6, %%xmm0 \n\t"\ |
---|
1886 | "psraw $6, %%xmm3 \n\t"\ |
---|
1887 | "packuswb %%xmm0, %%xmm3 \n\t"\ |
---|
1888 | OP(%%xmm3, (%1), %%xmm7, dqa)\ |
---|
1889 | "add $48, %0 \n\t"\ |
---|
1890 | "add %3, %1 \n\t"\ |
---|
1891 | "decl %2 \n\t"\ |
---|
1892 | " jnz 1b \n\t"\ |
---|
1893 | : "+a"(tmp), "+c"(dst), "+g"(h)\ |
---|
1894 | : "S"((x86_reg)dstStride)\ |
---|
1895 | : "memory"\ |
---|
1896 | );\ |
---|
1897 | }else{\ |
---|
1898 | __asm__ volatile(\ |
---|
1899 | "1: \n\t"\ |
---|
1900 | "movdqa 16(%0), %%xmm1 \n\t"\ |
---|
1901 | "movdqa (%0), %%xmm0 \n\t"\ |
---|
1902 | "movdqa %%xmm1, %%xmm2 \n\t"\ |
---|
1903 | "movdqa %%xmm1, %%xmm3 \n\t"\ |
---|
1904 | "movdqa %%xmm1, %%xmm4 \n\t"\ |
---|
1905 | "movdqa %%xmm1, %%xmm5 \n\t"\ |
---|
1906 | "palignr $10, %%xmm0, %%xmm5 \n\t"\ |
---|
1907 | "palignr $8, %%xmm0, %%xmm4 \n\t"\ |
---|
1908 | "palignr $6, %%xmm0, %%xmm3 \n\t"\ |
---|
1909 | "palignr $4, %%xmm0, %%xmm2 \n\t"\ |
---|
1910 | "palignr $2, %%xmm0, %%xmm1 \n\t"\ |
---|
1911 | "paddw %%xmm5, %%xmm0 \n\t"\ |
---|
1912 | "paddw %%xmm4, %%xmm1 \n\t"\ |
---|
1913 | "paddw %%xmm3, %%xmm2 \n\t"\ |
---|
1914 | "psubw %%xmm1, %%xmm0 \n\t"\ |
---|
1915 | "psraw $2, %%xmm0 \n\t"\ |
---|
1916 | "psubw %%xmm1, %%xmm0 \n\t"\ |
---|
1917 | "paddw %%xmm2, %%xmm0 \n\t"\ |
---|
1918 | "psraw $2, %%xmm0 \n\t"\ |
---|
1919 | "paddw %%xmm2, %%xmm0 \n\t"\ |
---|
1920 | "psraw $6, %%xmm0 \n\t"\ |
---|
1921 | "packuswb %%xmm0, %%xmm0 \n\t"\ |
---|
1922 | OP(%%xmm0, (%1), %%xmm7, q)\ |
---|
1923 | "add $48, %0 \n\t"\ |
---|
1924 | "add %3, %1 \n\t"\ |
---|
1925 | "decl %2 \n\t"\ |
---|
1926 | " jnz 1b \n\t"\ |
---|
1927 | : "+a"(tmp), "+c"(dst), "+g"(h)\ |
---|
1928 | : "S"((x86_reg)dstStride)\ |
---|
1929 | : "memory"\ |
---|
1930 | );\ |
---|
1931 | }\ |
---|
1932 | } |
---|
1933 | |
---|
1934 | #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\ |
---|
1935 | static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ |
---|
1936 | put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\ |
---|
1937 | OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ |
---|
1938 | }\ |
---|
1939 | static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
---|
1940 | OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\ |
---|
1941 | }\ |
---|
1942 | static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
---|
1943 | OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\ |
---|
1944 | }\ |
---|
1945 | |
---|
1946 | #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2 |
---|
1947 | #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2 |
---|
1948 | #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2 |
---|
1949 | #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2 |
---|
1950 | #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2 |
---|
1951 | #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2 |
---|
1952 | #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2 |
---|
1953 | #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2 |
---|
1954 | |
---|
1955 | #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2 |
---|
1956 | #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2 |
---|
1957 | #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2 |
---|
1958 | #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2 |
---|
1959 | #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2 |
---|
1960 | #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2 |
---|
1961 | #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2 |
---|
1962 | #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2 |
---|
1963 | |
---|
1964 | #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2 |
---|
1965 | #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2 |
---|
1966 | #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2 |
---|
1967 | #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2 |
---|
1968 | |
---|
1969 | #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2 |
---|
1970 | #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2 |
---|
1971 | #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2 |
---|
1972 | #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2 |
---|
1973 | |
---|
1974 | #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2 |
---|
1975 | #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2 |
---|
1976 | |
---|
1977 | #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \ |
---|
1978 | H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\ |
---|
1979 | H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\ |
---|
1980 | H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\ |
---|
1981 | H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\ |
---|
1982 | |
---|
1983 | static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){ |
---|
1984 | put_pixels16_sse2(dst, src, stride, 16); |
---|
1985 | } |
---|
1986 | static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){ |
---|
1987 | avg_pixels16_sse2(dst, src, stride, 16); |
---|
1988 | } |
---|
1989 | #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2 |
---|
1990 | #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2 |
---|
1991 | |
---|
1992 | #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \ |
---|
1993 | static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ |
---|
1994 | OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\ |
---|
1995 | }\ |
---|
1996 | |
---|
1997 | #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \ |
---|
1998 | static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
1999 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\ |
---|
2000 | }\ |
---|
2001 | \ |
---|
2002 | static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2003 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\ |
---|
2004 | }\ |
---|
2005 | \ |
---|
2006 | static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2007 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\ |
---|
2008 | }\ |
---|
2009 | |
---|
2010 | #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \ |
---|
2011 | static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2012 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
---|
2013 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
---|
2014 | OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\ |
---|
2015 | }\ |
---|
2016 | \ |
---|
2017 | static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2018 | OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\ |
---|
2019 | }\ |
---|
2020 | \ |
---|
2021 | static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2022 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
---|
2023 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
---|
2024 | OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\ |
---|
2025 | }\ |
---|
2026 | |
---|
2027 | #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \ |
---|
2028 | static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2029 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
---|
2030 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
---|
2031 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\ |
---|
2032 | }\ |
---|
2033 | \ |
---|
2034 | static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2035 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
---|
2036 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\ |
---|
2037 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\ |
---|
2038 | }\ |
---|
2039 | \ |
---|
2040 | static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2041 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
---|
2042 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
---|
2043 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\ |
---|
2044 | }\ |
---|
2045 | \ |
---|
2046 | static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2047 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
---|
2048 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\ |
---|
2049 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\ |
---|
2050 | }\ |
---|
2051 | \ |
---|
2052 | static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2053 | DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\ |
---|
2054 | OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\ |
---|
2055 | }\ |
---|
2056 | \ |
---|
2057 | static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2058 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
---|
2059 | uint8_t * const halfHV= temp;\ |
---|
2060 | int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ |
---|
2061 | assert(((int)temp & 7) == 0);\ |
---|
2062 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ |
---|
2063 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\ |
---|
2064 | }\ |
---|
2065 | \ |
---|
2066 | static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2067 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
---|
2068 | uint8_t * const halfHV= temp;\ |
---|
2069 | int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ |
---|
2070 | assert(((int)temp & 7) == 0);\ |
---|
2071 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ |
---|
2072 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\ |
---|
2073 | }\ |
---|
2074 | \ |
---|
2075 | static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2076 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
---|
2077 | uint8_t * const halfHV= temp;\ |
---|
2078 | int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ |
---|
2079 | assert(((int)temp & 7) == 0);\ |
---|
2080 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ |
---|
2081 | OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\ |
---|
2082 | }\ |
---|
2083 | \ |
---|
2084 | static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
---|
2085 | DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
---|
2086 | uint8_t * const halfHV= temp;\ |
---|
2087 | int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ |
---|
2088 | assert(((int)temp & 7) == 0);\ |
---|
2089 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ |
---|
2090 | OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\ |
---|
2091 | }\ |
---|
2092 | |
---|
2093 | #define H264_MC_4816(MMX)\ |
---|
2094 | H264_MC(put_, 4, MMX, 8)\ |
---|
2095 | H264_MC(put_, 8, MMX, 8)\ |
---|
2096 | H264_MC(put_, 16,MMX, 8)\ |
---|
2097 | H264_MC(avg_, 4, MMX, 8)\ |
---|
2098 | H264_MC(avg_, 8, MMX, 8)\ |
---|
2099 | H264_MC(avg_, 16,MMX, 8)\ |
---|
2100 | |
---|
2101 | #define H264_MC_816(QPEL, XMM)\ |
---|
2102 | QPEL(put_, 8, XMM, 16)\ |
---|
2103 | QPEL(put_, 16,XMM, 16)\ |
---|
2104 | QPEL(avg_, 8, XMM, 16)\ |
---|
2105 | QPEL(avg_, 16,XMM, 16)\ |
---|
2106 | |
---|
2107 | |
---|
2108 | #define AVG_3DNOW_OP(a,b,temp, size) \ |
---|
2109 | "mov" #size " " #b ", " #temp " \n\t"\ |
---|
2110 | "pavgusb " #temp ", " #a " \n\t"\ |
---|
2111 | "mov" #size " " #a ", " #b " \n\t" |
---|
2112 | #define AVG_MMX2_OP(a,b,temp, size) \ |
---|
2113 | "mov" #size " " #b ", " #temp " \n\t"\ |
---|
2114 | "pavgb " #temp ", " #a " \n\t"\ |
---|
2115 | "mov" #size " " #a ", " #b " \n\t" |
---|
2116 | |
---|
2117 | #define PAVGB "pavgusb" |
---|
2118 | QPEL_H264(put_, PUT_OP, 3dnow) |
---|
2119 | QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow) |
---|
2120 | #undef PAVGB |
---|
2121 | #define PAVGB "pavgb" |
---|
2122 | QPEL_H264(put_, PUT_OP, mmx2) |
---|
2123 | QPEL_H264(avg_, AVG_MMX2_OP, mmx2) |
---|
2124 | QPEL_H264_V_XMM(put_, PUT_OP, sse2) |
---|
2125 | QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2) |
---|
2126 | QPEL_H264_HV_XMM(put_, PUT_OP, sse2) |
---|
2127 | QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2) |
---|
2128 | #if HAVE_SSSE3 |
---|
2129 | QPEL_H264_H_XMM(put_, PUT_OP, ssse3) |
---|
2130 | QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3) |
---|
2131 | QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3) |
---|
2132 | QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3) |
---|
2133 | QPEL_H264_HV_XMM(put_, PUT_OP, ssse3) |
---|
2134 | QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3) |
---|
2135 | #endif |
---|
2136 | #undef PAVGB |
---|
2137 | |
---|
2138 | H264_MC_4816(3dnow) |
---|
2139 | H264_MC_4816(mmx2) |
---|
2140 | H264_MC_816(H264_MC_V, sse2) |
---|
2141 | H264_MC_816(H264_MC_HV, sse2) |
---|
2142 | #if HAVE_SSSE3 |
---|
2143 | H264_MC_816(H264_MC_H, ssse3) |
---|
2144 | H264_MC_816(H264_MC_HV, ssse3) |
---|
2145 | #endif |
---|
2146 | |
---|
2147 | /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ |
---|
2148 | DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = { |
---|
2149 | 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL |
---|
2150 | }; |
---|
2151 | |
---|
2152 | #define H264_CHROMA_OP(S,D) |
---|
2153 | #define H264_CHROMA_OP4(S,D,T) |
---|
2154 | #define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx |
---|
2155 | #define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx |
---|
2156 | #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 |
---|
2157 | #define H264_CHROMA_MC8_MV0 put_pixels8_mmx |
---|
2158 | #include "dsputil_h264_template_mmx.c" |
---|
2159 | |
---|
2160 | static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2161 | { |
---|
2162 | put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg); |
---|
2163 | } |
---|
2164 | static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2165 | { |
---|
2166 | put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2); |
---|
2167 | } |
---|
2168 | static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2169 | { |
---|
2170 | put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg); |
---|
2171 | } |
---|
2172 | |
---|
2173 | #undef H264_CHROMA_OP |
---|
2174 | #undef H264_CHROMA_OP4 |
---|
2175 | #undef H264_CHROMA_MC8_TMPL |
---|
2176 | #undef H264_CHROMA_MC4_TMPL |
---|
2177 | #undef H264_CHROMA_MC2_TMPL |
---|
2178 | #undef H264_CHROMA_MC8_MV0 |
---|
2179 | |
---|
2180 | #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" |
---|
2181 | #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ |
---|
2182 | "pavgb " #T ", " #D " \n\t" |
---|
2183 | #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2 |
---|
2184 | #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2 |
---|
2185 | #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 |
---|
2186 | #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 |
---|
2187 | #include "dsputil_h264_template_mmx.c" |
---|
2188 | static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2189 | { |
---|
2190 | avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); |
---|
2191 | } |
---|
2192 | static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2193 | { |
---|
2194 | avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2); |
---|
2195 | } |
---|
2196 | static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2197 | { |
---|
2198 | avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); |
---|
2199 | } |
---|
2200 | #undef H264_CHROMA_OP |
---|
2201 | #undef H264_CHROMA_OP4 |
---|
2202 | #undef H264_CHROMA_MC8_TMPL |
---|
2203 | #undef H264_CHROMA_MC4_TMPL |
---|
2204 | #undef H264_CHROMA_MC2_TMPL |
---|
2205 | #undef H264_CHROMA_MC8_MV0 |
---|
2206 | |
---|
2207 | #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" |
---|
2208 | #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ |
---|
2209 | "pavgusb " #T ", " #D " \n\t" |
---|
2210 | #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow |
---|
2211 | #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow |
---|
2212 | #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow |
---|
2213 | #include "dsputil_h264_template_mmx.c" |
---|
2214 | static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2215 | { |
---|
2216 | avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); |
---|
2217 | } |
---|
2218 | static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2219 | { |
---|
2220 | avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); |
---|
2221 | } |
---|
2222 | #undef H264_CHROMA_OP |
---|
2223 | #undef H264_CHROMA_OP4 |
---|
2224 | #undef H264_CHROMA_MC8_TMPL |
---|
2225 | #undef H264_CHROMA_MC4_TMPL |
---|
2226 | #undef H264_CHROMA_MC8_MV0 |
---|
2227 | |
---|
2228 | #if HAVE_SSSE3 |
---|
2229 | #define AVG_OP(X) |
---|
2230 | #undef H264_CHROMA_MC8_TMPL |
---|
2231 | #undef H264_CHROMA_MC4_TMPL |
---|
2232 | #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3 |
---|
2233 | #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3 |
---|
2234 | #define H264_CHROMA_MC8_MV0 put_pixels8_mmx |
---|
2235 | #include "dsputil_h264_template_ssse3.c" |
---|
2236 | static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2237 | { |
---|
2238 | put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); |
---|
2239 | } |
---|
2240 | static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2241 | { |
---|
2242 | put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); |
---|
2243 | } |
---|
2244 | |
---|
2245 | #undef AVG_OP |
---|
2246 | #undef H264_CHROMA_MC8_TMPL |
---|
2247 | #undef H264_CHROMA_MC4_TMPL |
---|
2248 | #undef H264_CHROMA_MC8_MV0 |
---|
2249 | #define AVG_OP(X) X |
---|
2250 | #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3 |
---|
2251 | #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3 |
---|
2252 | #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 |
---|
2253 | #include "dsputil_h264_template_ssse3.c" |
---|
2254 | static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2255 | { |
---|
2256 | avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); |
---|
2257 | } |
---|
2258 | static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
---|
2259 | { |
---|
2260 | avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); |
---|
2261 | } |
---|
2262 | #undef AVG_OP |
---|
2263 | #undef H264_CHROMA_MC8_TMPL |
---|
2264 | #undef H264_CHROMA_MC4_TMPL |
---|
2265 | #undef H264_CHROMA_MC8_MV0 |
---|
2266 | #endif |
---|
2267 | |
---|
2268 | /***********************************/ |
---|
2269 | /* weighted prediction */ |
---|
2270 | |
---|
2271 | static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h) |
---|
2272 | { |
---|
2273 | int x, y; |
---|
2274 | offset <<= log2_denom; |
---|
2275 | offset += (1 << log2_denom) >> 1; |
---|
2276 | __asm__ volatile( |
---|
2277 | "movd %0, %%mm4 \n\t" |
---|
2278 | "movd %1, %%mm5 \n\t" |
---|
2279 | "movd %2, %%mm6 \n\t" |
---|
2280 | "pshufw $0, %%mm4, %%mm4 \n\t" |
---|
2281 | "pshufw $0, %%mm5, %%mm5 \n\t" |
---|
2282 | "pxor %%mm7, %%mm7 \n\t" |
---|
2283 | :: "g"(weight), "g"(offset), "g"(log2_denom) |
---|
2284 | ); |
---|
2285 | for(y=0; y<h; y+=2){ |
---|
2286 | for(x=0; x<w; x+=4){ |
---|
2287 | __asm__ volatile( |
---|
2288 | "movd %0, %%mm0 \n\t" |
---|
2289 | "movd %1, %%mm1 \n\t" |
---|
2290 | "punpcklbw %%mm7, %%mm0 \n\t" |
---|
2291 | "punpcklbw %%mm7, %%mm1 \n\t" |
---|
2292 | "pmullw %%mm4, %%mm0 \n\t" |
---|
2293 | "pmullw %%mm4, %%mm1 \n\t" |
---|
2294 | "paddsw %%mm5, %%mm0 \n\t" |
---|
2295 | "paddsw %%mm5, %%mm1 \n\t" |
---|
2296 | "psraw %%mm6, %%mm0 \n\t" |
---|
2297 | "psraw %%mm6, %%mm1 \n\t" |
---|
2298 | "packuswb %%mm7, %%mm0 \n\t" |
---|
2299 | "packuswb %%mm7, %%mm1 \n\t" |
---|
2300 | "movd %%mm0, %0 \n\t" |
---|
2301 | "movd %%mm1, %1 \n\t" |
---|
2302 | : "+m"(*(uint32_t*)(dst+x)), |
---|
2303 | "+m"(*(uint32_t*)(dst+x+stride)) |
---|
2304 | ); |
---|
2305 | } |
---|
2306 | dst += 2*stride; |
---|
2307 | } |
---|
2308 | } |
---|
2309 | |
---|
2310 | static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h) |
---|
2311 | { |
---|
2312 | int x, y; |
---|
2313 | offset = ((offset + 1) | 1) << log2_denom; |
---|
2314 | __asm__ volatile( |
---|
2315 | "movd %0, %%mm3 \n\t" |
---|
2316 | "movd %1, %%mm4 \n\t" |
---|
2317 | "movd %2, %%mm5 \n\t" |
---|
2318 | "movd %3, %%mm6 \n\t" |
---|
2319 | "pshufw $0, %%mm3, %%mm3 \n\t" |
---|
2320 | "pshufw $0, %%mm4, %%mm4 \n\t" |
---|
2321 | "pshufw $0, %%mm5, %%mm5 \n\t" |
---|
2322 | "pxor %%mm7, %%mm7 \n\t" |
---|
2323 | :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1) |
---|
2324 | ); |
---|
2325 | for(y=0; y<h; y++){ |
---|
2326 | for(x=0; x<w; x+=4){ |
---|
2327 | __asm__ volatile( |
---|
2328 | "movd %0, %%mm0 \n\t" |
---|
2329 | "movd %1, %%mm1 \n\t" |
---|
2330 | "punpcklbw %%mm7, %%mm0 \n\t" |
---|
2331 | "punpcklbw %%mm7, %%mm1 \n\t" |
---|
2332 | "pmullw %%mm3, %%mm0 \n\t" |
---|
2333 | "pmullw %%mm4, %%mm1 \n\t" |
---|
2334 | "paddsw %%mm1, %%mm0 \n\t" |
---|
2335 | "paddsw %%mm5, %%mm0 \n\t" |
---|
2336 | "psraw %%mm6, %%mm0 \n\t" |
---|
2337 | "packuswb %%mm0, %%mm0 \n\t" |
---|
2338 | "movd %%mm0, %0 \n\t" |
---|
2339 | : "+m"(*(uint32_t*)(dst+x)) |
---|
2340 | : "m"(*(uint32_t*)(src+x)) |
---|
2341 | ); |
---|
2342 | } |
---|
2343 | src += stride; |
---|
2344 | dst += stride; |
---|
2345 | } |
---|
2346 | } |
---|
2347 | |
---|
2348 | #define H264_WEIGHT(W,H) \ |
---|
2349 | static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ |
---|
2350 | ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ |
---|
2351 | } \ |
---|
2352 | static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \ |
---|
2353 | ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \ |
---|
2354 | } |
---|
2355 | |
---|
2356 | H264_WEIGHT(16,16) |
---|
2357 | H264_WEIGHT(16, 8) |
---|
2358 | H264_WEIGHT( 8,16) |
---|
2359 | H264_WEIGHT( 8, 8) |
---|
2360 | H264_WEIGHT( 8, 4) |
---|
2361 | H264_WEIGHT( 4, 8) |
---|
2362 | H264_WEIGHT( 4, 4) |
---|
2363 | H264_WEIGHT( 4, 2) |
---|
2364 | |
---|