[SCM] FFmpeg packaging branch, master, updated. debian/0.5+svn20090420-2-17-gc8d8dad
Reinhard Tartler
siretart at tauware.de
Wed May 13 05:44:17 UTC 2009
ceros-guest at users.alioth.debian.org writes:
> The following commit has been merged in the master branch:
> commit 1768760e24c881dcf232cf9c45488fcf913c4806
> Author: Andres Mejia <mcitadel at gmail.com>
> Date: Tue May 12 23:35:16 2009 -0400
>
> Add comment to 900_doxyfile patch.
> Add fpic patches but don't enable them for quilt.
again, 2 commits would have been better.
> diff --git a/debian/patches/900_doxyfile b/debian/patches/900_doxyfile
> index b224e19..04a2b74 100644
> --- a/debian/patches/900_doxyfile
> +++ b/debian/patches/900_doxyfile
> @@ -1,7 +1,7 @@
> -Index: ffmpeg-0.svn20080626/Doxyfile
> -===================================================================
> ---- ffmpeg-0.svn20080626.orig/Doxyfile 2008-06-26 15:36:33.000000000 +0100
> -+++ ffmpeg-0.svn20080626/Doxyfile 2008-06-26 15:37:40.000000000 +0100
> +Exclude some directories we use for packaging.
> +==========================================================================
> +--- a/Doxyfile
> ++++ b/Doxyfile
> @@ -359,7 +359,7 @@
> # excluded from the INPUT source files. This way you can easily exclude a
> # subdirectory from a directory tree whose root is specified with the INPUT tag.
terse, but ok.
> diff --git a/debian/patches/fpic-ftbfs-fix.patch b/debian/patches/fpic-ftbfs-fix.patch
> new file mode 100644
> index 0000000..70b654d
> --- /dev/null
> +++ b/debian/patches/fpic-ftbfs-fix.patch
> @@ -0,0 +1,261 @@
> +This patch fixes FTBFS issue when using -fPIC.
this misses links to the discussion of this patch. Has this been
discussed upstream? Have you benchmarked its effects? And most
importantly, what does the patch do? I see that it changes some lines,
but please explain in english what the intend of this patch actually is.
moreover this is really a huge patch and should really be split in
smaller hunks so that they can be more easily reviewed.
> +==========================================================================
> +--- a/libavcodec/x86/dsputil_mmx.c
> ++++ b/libavcodec/x86/dsputil_mmx.c
> +@@ -695,14 +695,14 @@
> + "punpckhdq %%mm1, %%mm1 \n\t"
> + "movd %%mm1, %3 \n\t"
> +
> +- : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
> +- "=m" (*(uint32_t*)(dst + 1*dst_stride)),
> +- "=m" (*(uint32_t*)(dst + 2*dst_stride)),
> +- "=m" (*(uint32_t*)(dst + 3*dst_stride))
> +- : "m" (*(uint32_t*)(src + 0*src_stride)),
> +- "m" (*(uint32_t*)(src + 1*src_stride)),
> +- "m" (*(uint32_t*)(src + 2*src_stride)),
> +- "m" (*(uint32_t*)(src + 3*src_stride))
> ++ : "=r" (*(uint32_t*)(dst + 0*dst_stride)),
> ++ "=r" (*(uint32_t*)(dst + 1*dst_stride)),
> ++ "=r" (*(uint32_t*)(dst + 2*dst_stride)),
> ++ "=r" (*(uint32_t*)(dst + 3*dst_stride))
> ++ : "r" (*(uint32_t*)(src + 0*src_stride)),
> ++ "r" (*(uint32_t*)(src + 1*src_stride)),
> ++ "r" (*(uint32_t*)(src + 2*src_stride)),
> ++ "r" (*(uint32_t*)(src + 3*src_stride))
> + );
> + }
> +
> +--- a/libavcodec/x86/h264dsp_mmx.c
> ++++ b/libavcodec/x86/h264dsp_mmx.c
> +@@ -943,8 +943,8 @@
> + \
> + __asm__ volatile(\
> + "pxor %%mm7, %%mm7 \n\t"\
> +- "movq %5, %%mm4 \n\t"\
> +- "movq %6, %%mm5 \n\t"\
> ++ "movq ff_pw_5, %%mm4 \n\t"\
> ++ "movq ff_pw_16, %%mm5 \n\t"\
> + "1: \n\t"\
> + "movd -1(%0), %%mm1 \n\t"\
> + "movd (%0), %%mm2 \n\t"\
> +@@ -974,17 +974,15 @@
> + "decl %2 \n\t"\
> + " jnz 1b \n\t"\
> + : "+a"(src), "+c"(dst), "+g"(h)\
> +- : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
> +- : "memory"\
> ++ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
> + );\
> + }\
> + static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
> + int h=4;\
> + __asm__ volatile(\
> +- "pxor %%mm7, %%mm7 \n\t"\
> +- "movq %0, %%mm4 \n\t"\
> +- "movq %1, %%mm5 \n\t"\
> +- :: "m"(ff_pw_5), "m"(ff_pw_16)\
> ++ "pxor %mm7, %mm7 \n\t"\
> ++ "movq ff_pw_5, %mm4 \n\t"\
> ++ "movq ff_pw_16, %mm5 \n\t"\
> + );\
> + do{\
> + __asm__ volatile(\
> +@@ -1117,7 +1115,7 @@
> + int h=8;\
> + __asm__ volatile(\
> + "pxor %%mm7, %%mm7 \n\t"\
> +- "movq %5, %%mm6 \n\t"\
> ++ "movq ff_pw_5, %%mm6 \n\t"\
> + "1: \n\t"\
> + "movq (%0), %%mm0 \n\t"\
> + "movq 1(%0), %%mm2 \n\t"\
> +@@ -1151,7 +1149,7 @@
> + "punpcklbw %%mm7, %%mm5 \n\t"\
> + "paddw %%mm3, %%mm2 \n\t"\
> + "paddw %%mm5, %%mm4 \n\t"\
> +- "movq %6, %%mm5 \n\t"\
> ++ "movq ff_pw_16, %%mm5 \n\t"\
> + "paddw %%mm5, %%mm2 \n\t"\
> + "paddw %%mm5, %%mm4 \n\t"\
> + "paddw %%mm2, %%mm0 \n\t"\
> +@@ -1165,17 +1163,15 @@
> + "decl %2 \n\t"\
> + " jnz 1b \n\t"\
> + : "+a"(src), "+c"(dst), "+g"(h)\
> +- : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
> +- : "memory"\
> ++ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
> + );\
> + }\
> + \
> + static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
> + int h=8;\
> + __asm__ volatile(\
> +- "pxor %%mm7, %%mm7 \n\t"\
> +- "movq %0, %%mm6 \n\t"\
> +- :: "m"(ff_pw_5)\
> ++ "pxor %mm7, %mm7 \n\t"\
> ++ "movq ff_pw_5, %mm6 \n\t"\
> + );\
> + do{\
> + __asm__ volatile(\
> +@@ -1211,7 +1207,7 @@
> + "punpcklbw %%mm7, %%mm5 \n\t"\
> + "paddw %%mm3, %%mm2 \n\t"\
> + "paddw %%mm5, %%mm4 \n\t"\
> +- "movq %5, %%mm5 \n\t"\
> ++ "movq ff_pw_16, %%mm5 \n\t"\
> + "paddw %%mm5, %%mm2 \n\t"\
> + "paddw %%mm5, %%mm4 \n\t"\
> + "paddw %%mm2, %%mm0 \n\t"\
> +@@ -1226,9 +1222,7 @@
> + "add %4, %1 \n\t"\
> + "add %3, %2 \n\t"\
> + : "+a"(src), "+c"(dst), "+d"(src2)\
> +- : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
> +- "m"(ff_pw_16)\
> +- : "memory"\
> ++ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
> + );\
> + }while(--h);\
> + }\
> +@@ -1494,8 +1488,8 @@
> + int h=16;\
> + __asm__ volatile(\
> + "pxor %%xmm15, %%xmm15 \n\t"\
> +- "movdqa %6, %%xmm14 \n\t"\
> +- "movdqa %7, %%xmm13 \n\t"\
> ++ "movdqa ff_pw_5, %%xmm14 \n\t"\
> ++ "movdqa ff_pw_16, %%xmm13 \n\t"\
> + "1: \n\t"\
> + "lddqu 3(%0), %%xmm1 \n\t"\
> + "lddqu -5(%0), %%xmm7 \n\t"\
> +@@ -1549,9 +1543,7 @@
> + "decl %3 \n\t"\
> + "jg 1b \n\t"\
> + : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
> +- : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
> +- "m"(ff_pw_5), "m"(ff_pw_16)\
> +- : "memory"\
> ++ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
> + );\
> + }
> + #else // ARCH_X86_64
> +@@ -1571,9 +1563,8 @@
> + static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
> + int h=8;\
> + __asm__ volatile(\
> +- "pxor %%xmm7, %%xmm7 \n\t"\
> +- "movdqa %0, %%xmm6 \n\t"\
> +- :: "m"(ff_pw_5)\
> ++ "pxor %xmm7, %xmm7 \n\t"\
> ++ "movdqa ff_pw_5, %xmm6 \n\t"\
> + );\
> + do{\
> + __asm__ volatile(\
> +@@ -1596,7 +1587,7 @@
> + "psllw $2, %%xmm2 \n\t"\
> + "movq (%2), %%xmm3 \n\t"\
> + "psubw %%xmm1, %%xmm2 \n\t"\
> +- "paddw %5, %%xmm5 \n\t"\
> ++ "paddw ff_pw_16,%%xmm5 \n\t"\
> + "pmullw %%xmm6, %%xmm2 \n\t"\
> + "paddw %%xmm5, %%xmm2 \n\t"\
> + "psraw $5, %%xmm2 \n\t"\
> +@@ -1607,9 +1598,7 @@
> + "add %4, %1 \n\t"\
> + "add %3, %2 \n\t"\
> + : "+a"(src), "+c"(dst), "+d"(src2)\
> +- : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
> +- "m"(ff_pw_16)\
> +- : "memory"\
> ++ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
> + );\
> + }while(--h);\
> + }\
> +@@ -1619,7 +1608,7 @@
> + int h=8;\
> + __asm__ volatile(\
> + "pxor %%xmm7, %%xmm7 \n\t"\
> +- "movdqa %5, %%xmm6 \n\t"\
> ++ "movdqa ff_pw_5, %%xmm6 \n\t"\
> + "1: \n\t"\
> + "lddqu -5(%0), %%xmm1 \n\t"\
> + "movdqa %%xmm1, %%xmm0 \n\t"\
> +@@ -1639,7 +1628,7 @@
> + "paddw %%xmm4, %%xmm1 \n\t"\
> + "psllw $2, %%xmm2 \n\t"\
> + "psubw %%xmm1, %%xmm2 \n\t"\
> +- "paddw %6, %%xmm5 \n\t"\
> ++ "paddw ff_pw_16, %%xmm5 \n\t"\
> + "pmullw %%xmm6, %%xmm2 \n\t"\
> + "paddw %%xmm5, %%xmm2 \n\t"\
> + "psraw $5, %%xmm2 \n\t"\
> +@@ -1650,9 +1639,7 @@
> + "decl %2 \n\t"\
> + " jnz 1b \n\t"\
> + : "+a"(src), "+c"(dst), "+g"(h)\
> +- : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride),\
> +- "m"(ff_pw_5), "m"(ff_pw_16)\
> +- : "memory"\
> ++ : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
> + );\
> + }\
> + static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
> +--- a/libavcodec/x86/flacdsp_mmx.c
> ++++ b/libavcodec/x86/flacdsp_mmx.c
> +@@ -89,12 +89,12 @@
> + "movsd "MANGLE(ff_pd_1)", %%xmm1 \n\t"
> + "movsd "MANGLE(ff_pd_1)", %%xmm2 \n\t"
> + "1: \n\t"
> +- "movapd (%4,%0), %%xmm3 \n\t"
> +- "movupd -8(%5,%0), %%xmm4 \n\t"
> +- "movapd (%5,%0), %%xmm5 \n\t"
> ++ "movapd (%2,%0), %%xmm3 \n\t"
> ++ "movupd -8(%3,%0), %%xmm4 \n\t"
> ++ "movapd (%3,%0), %%xmm5 \n\t"
> + "mulpd %%xmm3, %%xmm4 \n\t"
> + "mulpd %%xmm3, %%xmm5 \n\t"
> +- "mulpd -16(%5,%0), %%xmm3 \n\t"
> ++ "mulpd -16(%3,%0), %%xmm3 \n\t"
> + "addpd %%xmm4, %%xmm1 \n\t"
> + "addpd %%xmm5, %%xmm0 \n\t"
> + "addpd %%xmm3, %%xmm2 \n\t"
> +@@ -107,9 +107,9 @@
> + "addsd %%xmm4, %%xmm1 \n\t"
> + "addsd %%xmm5, %%xmm2 \n\t"
> + "movsd %%xmm0, %1 \n\t"
> +- "movsd %%xmm1, %2 \n\t"
> +- "movsd %%xmm2, %3 \n\t"
> +- :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1]), "=m"(autoc[j+2])
> ++ "movsd %%xmm1, 8%1 \n\t"
> ++ "movsd %%xmm2, 16%1 \n\t"
> ++ :"+&r"(i), "=m"(autoc[j])
> + :"r"(data1+len), "r"(data1+len-j)
> + );
> + } else {
> +@@ -117,10 +117,10 @@
> + "movsd "MANGLE(ff_pd_1)", %%xmm0 \n\t"
> + "movsd "MANGLE(ff_pd_1)", %%xmm1 \n\t"
> + "1: \n\t"
> +- "movapd (%3,%0), %%xmm3 \n\t"
> +- "movupd -8(%4,%0), %%xmm4 \n\t"
> ++ "movapd (%2,%0), %%xmm3 \n\t"
> ++ "movupd -8(%3,%0), %%xmm4 \n\t"
> + "mulpd %%xmm3, %%xmm4 \n\t"
> +- "mulpd (%4,%0), %%xmm3 \n\t"
> ++ "mulpd (%3,%0), %%xmm3 \n\t"
> + "addpd %%xmm4, %%xmm1 \n\t"
> + "addpd %%xmm3, %%xmm0 \n\t"
> + "add $16, %0 \n\t"
> +@@ -130,8 +130,8 @@
> + "addsd %%xmm3, %%xmm0 \n\t"
> + "addsd %%xmm4, %%xmm1 \n\t"
> + "movsd %%xmm0, %1 \n\t"
> +- "movsd %%xmm1, %2 \n\t"
> +- :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
> ++ "movsd %%xmm1, 8%1 \n\t"
> ++ :"+&r"(i), "=m"(autoc[j])
> + :"r"(data1+len), "r"(data1+len-j)
> + );
> + }
> diff --git a/debian/patches/fpic-libpostproc-fix.patch b/debian/patches/fpic-libpostproc-fix.patch
> new file mode 100644
> index 0000000..7fa9d91
> --- /dev/null
> +++ b/debian/patches/fpic-libpostproc-fix.patch
> @@ -0,0 +1,314 @@
> +This patch resolves all non-PIC issues from the libpostproc library.
> +==========================================================================
> +--- a/libpostproc/postprocess_template.c
> ++++ b/libpostproc/postprocess_template.c
> +@@ -369,16 +369,16 @@
> + // FIXME rounding
> + __asm__ volatile(
> + "pxor %%mm7, %%mm7 \n\t" // 0
> +- "movq "MANGLE(b80)", %%mm6 \n\t" // MIN_SIGNED_BYTE
> ++ "movq %2, %%mm6 \n\t" // MIN_SIGNED_BYTE
> + "leal (%0, %1), %%"REG_a" \n\t"
> + "leal (%%"REG_a", %1, 4), %%"REG_c" \n\t"
> + // 0 1 2 3 4 5 6 7 8 9
> + // %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
> +- "movq "MANGLE(pQPb)", %%mm0 \n\t" // QP,..., QP
> ++ "movq %3, %%mm0 \n\t" // QP,..., QP
> + "movq %%mm0, %%mm1 \n\t" // QP,..., QP
> +- "paddusb "MANGLE(b02)", %%mm0 \n\t"
> ++ "paddusb %4, %%mm0 \n\t"
> + "psrlw $2, %%mm0 \n\t"
> +- "pand "MANGLE(b3F)", %%mm0 \n\t" // QP/4,..., QP/4
> ++ "pand %5, %%mm0 \n\t" // QP/4,..., QP/4
> + "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
> + "movq (%0, %1, 4), %%mm2 \n\t" // line 4
> + "movq (%%"REG_c"), %%mm3 \n\t" // line 5
> +@@ -407,8 +407,8 @@
> +
> + "paddb %%mm6, %%mm5 \n\t"
> + "psrlw $2, %%mm5 \n\t"
> +- "pand "MANGLE(b3F)", %%mm5 \n\t"
> +- "psubb "MANGLE(b20)", %%mm5 \n\t" // (l5-l4)/8
> ++ "pand %5, %%mm5 \n\t"
> ++ "psubb %6, %%mm5 \n\t" // (l5-l4)/8
> +
> + "movq (%%"REG_a", %1, 2), %%mm2 \n\t"
> + "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
> +@@ -423,7 +423,8 @@
> + "movq %%mm2, (%%"REG_c", %1) \n\t"
> +
> + :
> +- : "r" (src), "r" ((x86_reg)stride)
> ++ : "r" (src), "r" ((x86_reg)stride), "m"(b80), "m"(pQPb), "m"(b02),
> ++ "m"(b3f), "m"(b20)
> + : "%"REG_a, "%"REG_c
> + );
> + #else //HAVE_MMX2 || HAVE_AMD3DNOW
> +@@ -496,7 +497,7 @@
> + "paddusb %%mm0, %%mm0 \n\t"
> + "psubusb %%mm0, %%mm4 \n\t"
> + "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
> +- "psubusb "MANGLE(b01)", %%mm3 \n\t"
> ++ "psubusb %3, %%mm3 \n\t"
> + "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
> +
> + PAVGB(%%mm7, %%mm3) // d/2
> +@@ -545,7 +546,7 @@
> + "movq %%mm0, (%%"REG_c", %1, 2) \n\t" // line 7
> +
> + :
> +- : "r" (src), "r" ((x86_reg)stride), "m" (co->pQPb)
> ++ : "r" (src), "r" ((x86_reg)stride), "m" (co->pQPb), "m"(b01)
> + : "%"REG_a, "%"REG_c
> + );
> + #else //HAVE_MMX2 || HAVE_AMD3DNOW
> +@@ -675,17 +676,17 @@
> +
> + PMINUB(%%mm2, %%mm1, %%mm4) // MIN(|lenergy|,|renergy|)/8
> + "movq %2, %%mm4 \n\t" // QP //FIXME QP+1 ?
> +- "paddusb "MANGLE(b01)", %%mm4 \n\t"
> ++ "paddusb %5, %%mm4 \n\t"
> + "pcmpgtb %%mm3, %%mm4 \n\t" // |menergy|/8 < QP
> + "psubusb %%mm1, %%mm3 \n\t" // d=|menergy|/8-MIN(|lenergy|,|renergy|)/8
> + "pand %%mm4, %%mm3 \n\t"
> +
> + "movq %%mm3, %%mm1 \n\t"
> +-// "psubusb "MANGLE(b01)", %%mm3 \n\t"
> ++// "psubusb %5, %%mm3 \n\t"
> + PAVGB(%%mm7, %%mm3)
> + PAVGB(%%mm7, %%mm3)
> + "paddusb %%mm1, %%mm3 \n\t"
> +-// "paddusb "MANGLE(b01)", %%mm3 \n\t"
> ++// "paddusb %5, %%mm3 \n\t"
> +
> + "movq (%%"REG_a", %1, 2), %%mm6 \n\t" //l3
> + "movq (%0, %1, 4), %%mm5 \n\t" //l4
> +@@ -698,7 +699,7 @@
> + "pand %%mm0, %%mm3 \n\t"
> + PMINUB(%%mm5, %%mm3, %%mm0)
> +
> +- "psubusb "MANGLE(b01)", %%mm3 \n\t"
> ++ "psubusb %5, %%mm3 \n\t"
> + PAVGB(%%mm7, %%mm3)
> +
> + "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
> +@@ -730,7 +731,7 @@
> + "movq (%%"REG_a", %1), %%mm3 \n\t" // l2
> + "pxor %%mm6, %%mm2 \n\t" // -l5-1
> + "movq %%mm2, %%mm5 \n\t" // -l5-1
> +- "movq "MANGLE(b80)", %%mm4 \n\t" // 128
> ++ "movq %3, %%mm4 \n\t" // 128
> + "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
> + PAVGB(%%mm3, %%mm2) // (l2-l5+256)/2
> + PAVGB(%%mm0, %%mm4) // ~(l4-l3)/4 + 128
> +@@ -742,7 +743,7 @@
> + "pxor %%mm6, %%mm2 \n\t" // -l1-1
> + PAVGB(%%mm3, %%mm2) // (l2-l1+256)/2
> + PAVGB((%0), %%mm1) // (l0-l3+256)/2
> +- "movq "MANGLE(b80)", %%mm3 \n\t" // 128
> ++ "movq %3, %%mm3 \n\t" // 128
> + PAVGB(%%mm2, %%mm3) // ~(l2-l1)/4 + 128
> + PAVGB(%%mm1, %%mm3) // ~(l0-l3)/4 +(l2-l1)/8 + 128
> + PAVGB(%%mm2, %%mm3) // ~(l0-l3)/8 +5(l2-l1)/16 + 128
> +@@ -752,14 +753,14 @@
> + "movq (%%"REG_c", %1, 2), %%mm1 \n\t" // l7
> + "pxor %%mm6, %%mm1 \n\t" // -l7-1
> + PAVGB((%0, %1, 4), %%mm1) // (l4-l7+256)/2
> +- "movq "MANGLE(b80)", %%mm2 \n\t" // 128
> ++ "movq %3, %%mm2 \n\t" // 128
> + PAVGB(%%mm5, %%mm2) // ~(l6-l5)/4 + 128
> + PAVGB(%%mm1, %%mm2) // ~(l4-l7)/4 +(l6-l5)/8 + 128
> + PAVGB(%%mm5, %%mm2) // ~(l4-l7)/8 +5(l6-l5)/16 + 128
> + // mm0=128-q, mm2=renergy/16 + 128, mm3=lenergy/16 + 128, mm4= menergy/16 + 128
> +
> +- "movq "MANGLE(b00)", %%mm1 \n\t" // 0
> +- "movq "MANGLE(b00)", %%mm5 \n\t" // 0
> ++ "movq %4, %%mm1 \n\t" // 0
> ++ "movq %4, %%mm5 \n\t" // 0
> + "psubb %%mm2, %%mm1 \n\t" // 128 - renergy/16
> + "psubb %%mm3, %%mm5 \n\t" // 128 - lenergy/16
> + PMAXUB(%%mm1, %%mm2) // 128 + |renergy/16|
> +@@ -768,7 +769,7 @@
> +
> + // mm0=128-q, mm3=128 + MIN(|lenergy|,|renergy|)/16, mm4= menergy/16 + 128
> +
> +- "movq "MANGLE(b00)", %%mm7 \n\t" // 0
> ++ "movq %4, %%mm7 \n\t" // 0
> + "movq %2, %%mm2 \n\t" // QP
> + PAVGB(%%mm6, %%mm2) // 128 + QP/2
> + "psubb %%mm6, %%mm2 \n\t"
> +@@ -782,13 +783,13 @@
> + // mm0=128-q, mm1= SIGN(menergy), mm2= |menergy|/16 < QP/2, mm4= d/16
> +
> + "movq %%mm4, %%mm3 \n\t" // d
> +- "psubusb "MANGLE(b01)", %%mm4 \n\t"
> ++ "psubusb %5, %%mm4 \n\t"
> + PAVGB(%%mm7, %%mm4) // d/32
> + PAVGB(%%mm7, %%mm4) // (d + 32)/64
> + "paddb %%mm3, %%mm4 \n\t" // 5d/64
> + "pand %%mm2, %%mm4 \n\t"
> +
> +- "movq "MANGLE(b80)", %%mm5 \n\t" // 128
> ++ "movq %3, %%mm5 \n\t" // 128
> + "psubb %%mm0, %%mm5 \n\t" // q
> + "paddsb %%mm6, %%mm5 \n\t" // fix bad rounding
> + "pcmpgtb %%mm5, %%mm7 \n\t" // SIGN(q)
> +@@ -810,7 +811,8 @@
> + "movq %%mm2, (%0, %1, 4) \n\t"
> +
> + :
> +- : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb)
> ++ : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb), "m"(b80),
> ++ "m"(b00), "m"(b01)
> + : "%"REG_a, "%"REG_c
> + );
> +
> +@@ -1045,10 +1047,10 @@
> + "psubusw %%mm1, %%mm5 \n\t" // ld
> +
> +
> +- "movq "MANGLE(w05)", %%mm2 \n\t" // 5
> ++ "movq %3, %%mm2 \n\t" // 5
> + "pmullw %%mm2, %%mm4 \n\t"
> + "pmullw %%mm2, %%mm5 \n\t"
> +- "movq "MANGLE(w20)", %%mm2 \n\t" // 32
> ++ "movq %4, %%mm2 \n\t" // 32
> + "paddw %%mm2, %%mm4 \n\t"
> + "paddw %%mm2, %%mm5 \n\t"
> + "psrlw $6, %%mm4 \n\t"
> +@@ -1098,7 +1100,7 @@
> + "movq %%mm0, (%0, %1) \n\t"
> +
> + : "+r" (src)
> +- : "r" ((x86_reg)stride), "m" (c->pQPb)
> ++ : "r" ((x86_reg)stride), "m" (c->pQPb), "m"(w05), "m"(w20)
> + : "%"REG_a, "%"REG_c
> + );
> + #else //HAVE_MMX2 || HAVE_AMD3DNOW
> +@@ -1237,7 +1239,7 @@
> + "movq %%mm6, %%mm0 \n\t" // max
> + "psubb %%mm7, %%mm6 \n\t" // max - min
> + "movd %%mm6, %%ecx \n\t"
> +- "cmpb "MANGLE(deringThreshold)", %%cl \n\t"
> ++ "cmpb %4, %%cl \n\t"
> + " jb 1f \n\t"
> + "lea -24(%%"REG_SP"), %%"REG_c" \n\t"
> + "and "ALIGN_MASK", %%"REG_c" \n\t"
> +@@ -1264,9 +1266,9 @@
> + "psubusb %%mm7, %%mm0 \n\t"
> + "psubusb %%mm7, %%mm2 \n\t"
> + "psubusb %%mm7, %%mm3 \n\t"
> +- "pcmpeqb "MANGLE(b00)", %%mm0 \n\t" // L10 > a ? 0 : -1
> +- "pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L20 > a ? 0 : -1
> +- "pcmpeqb "MANGLE(b00)", %%mm3 \n\t" // L00 > a ? 0 : -1
> ++ "pcmpeqb %5, %%mm0 \n\t" // L10 > a ? 0 : -1
> ++ "pcmpeqb %5, %%mm2 \n\t" // L20 > a ? 0 : -1
> ++ "pcmpeqb %5, %%mm3 \n\t" // L00 > a ? 0 : -1
> + "paddb %%mm2, %%mm0 \n\t"
> + "paddb %%mm3, %%mm0 \n\t"
> +
> +@@ -1287,9 +1289,9 @@
> + "psubusb %%mm7, %%mm2 \n\t"
> + "psubusb %%mm7, %%mm4 \n\t"
> + "psubusb %%mm7, %%mm5 \n\t"
> +- "pcmpeqb "MANGLE(b00)", %%mm2 \n\t" // L11 > a ? 0 : -1
> +- "pcmpeqb "MANGLE(b00)", %%mm4 \n\t" // L21 > a ? 0 : -1
> +- "pcmpeqb "MANGLE(b00)", %%mm5 \n\t" // L01 > a ? 0 : -1
> ++ "pcmpeqb %5, %%mm2 \n\t" // L11 > a ? 0 : -1
> ++ "pcmpeqb %5, %%mm4 \n\t" // L21 > a ? 0 : -1
> ++ "pcmpeqb %5, %%mm5 \n\t" // L01 > a ? 0 : -1
> + "paddb %%mm4, %%mm2 \n\t"
> + "paddb %%mm5, %%mm2 \n\t"
> + // 0, 2, 3, 1
> +@@ -1314,7 +1316,7 @@
> + "psubusb " #lx ", " #t1 " \n\t"\
> + "psubusb " #lx ", " #t0 " \n\t"\
> + "psubusb " #lx ", " #sx " \n\t"\
> +- "movq "MANGLE(b00)", " #lx " \n\t"\
> ++ "movq %5, " #lx " \n\t"\
> + "pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
> + "pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
> + "pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
> +@@ -1330,8 +1332,8 @@
> + PMINUB(t1, pplx, t0)\
> + "paddb " #sx ", " #ppsx " \n\t"\
> + "paddb " #psx ", " #ppsx " \n\t"\
> +- "#paddb "MANGLE(b02)", " #ppsx " \n\t"\
> +- "pand "MANGLE(b08)", " #ppsx " \n\t"\
> ++ "#paddb %6, " #ppsx " \n\t"\
> ++ "pand %7, " #ppsx " \n\t"\
> + "pcmpeqb " #lx ", " #ppsx " \n\t"\
> + "pand " #ppsx ", " #pplx " \n\t"\
> + "pandn " #dst ", " #ppsx " \n\t"\
> +@@ -1367,7 +1369,8 @@
> + DERING_CORE((%0, %1, 8) ,(%%REGd, %1, 4),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
> +
> + "1: \n\t"
> +- : : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb), "m"(c->pQPb2)
> ++ : : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb), "m"(c->pQPb2),
> ++ "m"(deringThreshold), "m"(b00), "m"(b02), "m"(b08)
> + : "%"REG_a, "%"REG_d, "%"REG_c
> + );
> + #else //HAVE_MMX2 || HAVE_AMD3DNOW
> +@@ -2227,7 +2230,7 @@
> + #else //L1_DIFF
> + #if defined (FAST_L2_DIFF)
> + "pcmpeqb %%mm7, %%mm7 \n\t"
> +- "movq "MANGLE(b80)", %%mm6 \n\t"
> ++ "movq %4, %%mm6 \n\t"
> + "pxor %%mm0, %%mm0 \n\t"
> + #define REAL_L2_DIFF_CORE(a, b)\
> + "movq " #a ", %%mm5 \n\t"\
> +@@ -2476,7 +2479,8 @@
> +
> + "4: \n\t"
> +
> +- :: "r" (src), "r" (tempBlurred), "r"((x86_reg)stride), "m" (tempBlurredPast)
> ++ :: "r" (src), "r" (tempBlurred), "r"((x86_reg)stride),
> ++ "m" (tempBlurredPast), "m"(b80)
> + : "%"REG_a, "%"REG_d, "%"REG_c, "memory"
> + );
> + #else //HAVE_MMX2 || HAVE_AMD3DNOW
> +@@ -2730,8 +2734,8 @@
> + "movq %%mm6, %%mm1 \n\t"
> + "psllw $2, %%mm0 \n\t"
> + "psllw $2, %%mm1 \n\t"
> +- "paddw "MANGLE(w04)", %%mm0 \n\t"
> +- "paddw "MANGLE(w04)", %%mm1 \n\t"
> ++ "paddw %5, %%mm0 \n\t"
> ++ "paddw %5, %%mm1 \n\t"
> +
> + #define NEXT\
> + "movq (%0), %%mm2 \n\t"\
> +@@ -2820,7 +2824,7 @@
> + "mov %4, %0 \n\t" //FIXME
> +
> + : "+&r"(src)
> +- : "r" ((x86_reg)step), "m" (c->pQPb), "r"(sums), "g"(src)
> ++ : "r" ((x86_reg)step), "m" (c->pQPb), "r"(sums), "g"(src), "m"(w04)
> + );
> +
> + src+= step; // src points to begin of the 8x8 Block
> +@@ -3037,10 +3041,10 @@
> + "psubusw %%mm1, %%mm5 \n\t" // ld
> +
> +
> +- "movq "MANGLE(w05)", %%mm2 \n\t" // 5
> ++ "movq %4, %%mm2 \n\t" // 5
> + "pmullw %%mm2, %%mm4 \n\t"
> + "pmullw %%mm2, %%mm5 \n\t"
> +- "movq "MANGLE(w20)", %%mm2 \n\t" // 32
> ++ "movq %5, %%mm2 \n\t" // 32
> + "paddw %%mm2, %%mm4 \n\t"
> + "paddw %%mm2, %%mm5 \n\t"
> + "psrlw $6, %%mm4 \n\t"
> +@@ -3092,7 +3096,8 @@
> + "movq %%mm0, (%0, %1) \n\t"
> +
> + : "+r" (temp_src)
> +- : "r" ((x86_reg)step), "m" (c->pQPb), "m"(eq_mask)
> ++ : "r" ((x86_reg)step), "m" (c->pQPb), "m"(eq_mask), "m"(w05),
> ++ "m"(w20)
> + : "%"REG_a, "%"REG_c
> + );
> + }
> diff --git a/debian/patches/fpic-libswscale-fix.patch b/debian/patches/fpic-libswscale-fix.patch
> new file mode 100644
> index 0000000..1144176
> --- /dev/null
> +++ b/debian/patches/fpic-libswscale-fix.patch
> @@ -0,0 +1,577 @@
> +This patch is currently being worked on to resolve all non-PIC issues with
> +the libswscale library.
> +==========================================================================
> +--- a/libswscale/rgb2rgb_template.c
> ++++ b/libswscale/rgb2rgb_template.c
> +@@ -1424,9 +1424,12 @@
> + __asm__ volatile (
> + "test %%"REG_a", %%"REG_a" \n\t"
> + "jns 2f \n\t"
> +- "movq "MANGLE(mask24r)", %%mm5 \n\t"
> +- "movq "MANGLE(mask24g)", %%mm6 \n\t"
> +- "movq "MANGLE(mask24b)", %%mm7 \n\t"
> ++ "movq %0, %%mm5 \n\t"
> ++ "movq %1, %%mm6 \n\t"
> ++ "movq %2, %%mm7 \n\t"
> ++ : : "m"(mask24r), "m"(mask24g), "m"(mask24b)
> ++ );
> ++ __asm__ volatile (
> + ASMALIGN(4)
> + "1: \n\t"
> + PREFETCH" 32(%1, %%"REG_a") \n\t"
> +@@ -2147,8 +2150,8 @@
> + {
> + __asm__ volatile(
> + "mov %2, %%"REG_a" \n\t"
> +- "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
> +- "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
> ++ "movq %3, %%mm6 \n\t"
> ++ "movq %4, %%mm5 \n\t"
> + "pxor %%mm7, %%mm7 \n\t"
> + "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
> + ASMALIGN(4)
> +@@ -2206,12 +2209,13 @@
> + "psraw $7, %%mm4 \n\t"
> +
> + "packuswb %%mm4, %%mm0 \n\t"
> +- "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
> ++ "paddusb %5, %%mm0 \n\t"
> +
> + MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
> + "add $8, %%"REG_a" \n\t"
> + " js 1b \n\t"
> +- : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
> ++ : : "r" (src+width*3), "r" (ydst+width), "g" (-width),
> ++ "m"(ff_bgr2YCoeff), "m"(ff_w1111), "m"(ff_bgr2YOffset)
> + : "%"REG_a, "%"REG_d
> + );
> + ydst += lumStride;
> +@@ -2220,8 +2224,8 @@
> + src -= srcStride*2;
> + __asm__ volatile(
> + "mov %4, %%"REG_a" \n\t"
> +- "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
> +- "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
> ++ "movq %5, %%mm5 \n\t"
> ++ "movq %6, %%mm6 \n\t"
> + "pxor %%mm7, %%mm7 \n\t"
> + "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
> + "add %%"REG_d", %%"REG_d" \n\t"
> +@@ -2270,8 +2274,8 @@
> + "psrlw $2, %%mm0 \n\t"
> + "psrlw $2, %%mm2 \n\t"
> + #endif
> +- "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
> +- "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
> ++ "movq %7, %%mm1 \n\t"
> ++ "movq %7, %%mm3 \n\t"
> +
> + "pmaddwd %%mm0, %%mm1 \n\t"
> + "pmaddwd %%mm2, %%mm3 \n\t"
> +@@ -2328,12 +2332,12 @@
> + "paddw %%mm1, %%mm5 \n\t"
> + "paddw %%mm3, %%mm2 \n\t"
> + "paddw %%mm5, %%mm2 \n\t"
> +- "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
> ++ "movq %5, %%mm5 \n\t"
> + "psrlw $2, %%mm4 \n\t"
> + "psrlw $2, %%mm2 \n\t"
> + #endif
> +- "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
> +- "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
> ++ "movq %7, %%mm1 \n\t"
> ++ "movq %7, %%mm3 \n\t"
> +
> + "pmaddwd %%mm4, %%mm1 \n\t"
> + "pmaddwd %%mm2, %%mm3 \n\t"
> +@@ -2357,13 +2361,16 @@
> + "punpckldq %%mm4, %%mm0 \n\t"
> + "punpckhdq %%mm4, %%mm1 \n\t"
> + "packsswb %%mm1, %%mm0 \n\t"
> +- "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
> ++ "paddb %8, %%mm0 \n\t"
> + "movd %%mm0, (%2, %%"REG_a") \n\t"
> + "punpckhdq %%mm0, %%mm0 \n\t"
> + "movd %%mm0, (%3, %%"REG_a") \n\t"
> + "add $4, %%"REG_a" \n\t"
> + " js 1b \n\t"
> +- : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
> ++ : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6),
> ++ "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth),
> ++ "m"(ff_w1111), "m"(ff_bgr2UCoeff), "m"(ff_bgr2VCoeff),
> ++ "m"(ff_bgr2UVOffset)
> + : "%"REG_a, "%"REG_d
> + );
> +
> +--- a/libswscale/rgb2rgb.c
> ++++ b/libswscale/rgb2rgb.c
> +@@ -123,6 +123,18 @@
> + DECLARE_ASM_CONST(8, uint64_t, red_15mask) = 0x00007c0000007c00ULL;
> + DECLARE_ASM_CONST(8, uint64_t, green_15mask) = 0x000003e0000003e0ULL;
> + DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL;
> ++
> ++// Some constants from swscale.c that are used here
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_bgr2YCoeff;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_bgr2UCoeff;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_bgr2VCoeff;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_bgr2YOffset;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_bgr2UVOffset;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_w1111;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_M24A;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_M24B;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_M24C;
> ++
> + #endif /* ARCH_X86 */
> +
> + #define RGB2YUV_SHIFT 8
> +--- a/libswscale/swscale_template.c
> ++++ b/libswscale/swscale_template.c
> +@@ -669,9 +669,9 @@
> + #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
> +
> + #define REAL_WRITERGB16(dst, dstw, index) \
> +- "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
> +- "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
> +- "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
> ++ "pand %6, %%mm2 \n\t" /* B */\
> ++ "pand %7, %%mm4 \n\t" /* G */\
> ++ "pand %6, %%mm5 \n\t" /* R */\
> + "psrlq $3, %%mm2 \n\t"\
> + \
> + "movq %%mm2, %%mm1 \n\t"\
> +@@ -695,11 +695,18 @@
> + "cmp "#dstw", "#index" \n\t"\
> + " jb 1b \n\t"
> + #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
> ++#define WRITERGB16_END \
> ++ :: "r" (&c->redDither), \
> ++ "m" (dummy), "m" (dummy), "m" (dummy),\
> ++ "r" (dest), "m" (dstW), "m" (bF8), \
> ++ "m" (bFC) \
> ++ : "%"REG_a, "%"REG_d, "%"REG_S \
> ++ );
> +
> + #define REAL_WRITERGB15(dst, dstw, index) \
> +- "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
> +- "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
> +- "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
> ++ "pand %6, %%mm2 \n\t" /* B */\
> ++ "pand %6, %%mm4 \n\t" /* G */\
> ++ "pand %6, %%mm5 \n\t" /* R */\
> + "psrlq $3, %%mm2 \n\t"\
> + "psrlq $1, %%mm5 \n\t"\
> + \
> +@@ -724,6 +731,12 @@
> + "cmp "#dstw", "#index" \n\t"\
> + " jb 1b \n\t"
> + #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
> ++#define WRITERGB15_END \
> ++ :: "r" (&c->redDither), \
> ++ "m" (dummy), "m" (dummy), "m" (dummy),\
> ++ "r" (dest), "m" (dstW), "m" (bF8) \
> ++ : "%"REG_a, "%"REG_d, "%"REG_S \
> ++ );
> +
> + #define WRITEBGR24OLD(dst, dstw, index) \
> + /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
> +@@ -1063,7 +1076,7 @@
> + #endif
> +
> + WRITERGB15(%4, %5, %%REGa)
> +- YSCALEYUV2PACKEDX_END
> ++ WRITERGB15_END
> + return;
> + case PIX_FMT_RGB565:
> + YSCALEYUV2PACKEDX_ACCURATE
> +@@ -1077,7 +1090,7 @@
> + #endif
> +
> + WRITERGB16(%4, %5, %%REGa)
> +- YSCALEYUV2PACKEDX_END
> ++ WRITERGB16_END
> + return;
> + case PIX_FMT_YUYV422:
> + YSCALEYUV2PACKEDX_ACCURATE
> +@@ -1127,7 +1140,7 @@
> + #endif
> +
> + WRITERGB15(%4, %5, %%REGa)
> +- YSCALEYUV2PACKEDX_END
> ++ WRITERGB15_END
> + return;
> + case PIX_FMT_RGB565:
> + YSCALEYUV2PACKEDX
> +@@ -1141,7 +1154,7 @@
> + #endif
> +
> + WRITERGB16(%4, %5, %%REGa)
> +- YSCALEYUV2PACKEDX_END
> ++ WRITERGB16_END
> + return;
> + case PIX_FMT_YUYV422:
> + YSCALEYUV2PACKEDX
> +@@ -1238,7 +1251,7 @@
> + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
> +
> + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
> +- "a" (&c->redDither)
> ++ "a" (&c->redDither), "m" (bF8)
> + );
> + return;
> + case PIX_FMT_RGB565:
> +@@ -1259,7 +1272,7 @@
> + "pop %%"REG_BP" \n\t"
> + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
> + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
> +- "a" (&c->redDither)
> ++ "a" (&c->redDither), "m" (bF8), "m" (bFC)
> + );
> + return;
> + case PIX_FMT_YUYV422:
> +@@ -1354,7 +1367,7 @@
> + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
> +
> + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
> +- "a" (&c->redDither)
> ++ "a" (&c->redDither), "m" (bF8)
> + );
> + return;
> + case PIX_FMT_RGB565:
> +@@ -1376,7 +1389,7 @@
> + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
> +
> + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
> +- "a" (&c->redDither)
> ++ "a" (&c->redDither), "m" (bF8), "m" (bFC)
> + );
> + return;
> + case PIX_FMT_YUYV422:
> +@@ -1447,7 +1460,7 @@
> + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
> +
> + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
> +- "a" (&c->redDither)
> ++ "a" (&c->redDither), "m" (bF8)
> + );
> + return;
> + case PIX_FMT_RGB565:
> +@@ -1469,7 +1482,7 @@
> + "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
> +
> + :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
> +- "a" (&c->redDither)
> ++ "a" (&c->redDither), "m" (bF8), "m" (bFC)
> + );
> + return;
> + case PIX_FMT_YUYV422:
> +@@ -1504,7 +1517,7 @@
> + {
> + #if HAVE_MMX
> + __asm__ volatile(
> +- "movq "MANGLE(bm01010101)", %%mm2 \n\t"
> ++ "movq %3, %%mm2 \n\t"
> + "mov %0, %%"REG_a" \n\t"
> + "1: \n\t"
> + "movq (%1, %%"REG_a",2), %%mm0 \n\t"
> +@@ -1515,7 +1528,7 @@
> + "movq %%mm0, (%2, %%"REG_a") \n\t"
> + "add $8, %%"REG_a" \n\t"
> + " js 1b \n\t"
> +- : : "g" (-width), "r" (src+width*2), "r" (dst+width)
> ++ : : "g" (-width), "r" (src+width*2), "r" (dst+width), "m"(bm01010101)
> + : "%"REG_a
> + );
> + #else
> +@@ -1529,7 +1542,7 @@
> + {
> + #if HAVE_MMX
> + __asm__ volatile(
> +- "movq "MANGLE(bm01010101)", %%mm4 \n\t"
> ++ "movq %4, %%mm4 \n\t"
> + "mov %0, %%"REG_a" \n\t"
> + "1: \n\t"
> + "movq (%1, %%"REG_a",4), %%mm0 \n\t"
> +@@ -1546,7 +1559,8 @@
> + "movd %%mm1, (%2, %%"REG_a") \n\t"
> + "add $4, %%"REG_a" \n\t"
> + " js 1b \n\t"
> +- : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
> ++ : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width),
> ++ "m"(bm01010101)
> + : "%"REG_a
> + );
> + #else
> +@@ -1590,7 +1604,7 @@
> + {
> + #if HAVE_MMX
> + __asm__ volatile(
> +- "movq "MANGLE(bm01010101)", %%mm4 \n\t"
> ++ "movq %4, %%mm4 \n\t"
> + "mov %0, %%"REG_a" \n\t"
> + "1: \n\t"
> + "movq (%1, %%"REG_a",4), %%mm0 \n\t"
> +@@ -1607,7 +1621,8 @@
> + "movd %%mm1, (%2, %%"REG_a") \n\t"
> + "add $4, %%"REG_a" \n\t"
> + " js 1b \n\t"
> +- : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
> ++ : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width),
> ++ "m"(bm01010101)
> + : "%"REG_a
> + );
> + #else
> +@@ -1688,20 +1703,20 @@
> +
> + if(srcFormat == PIX_FMT_BGR24){
> + __asm__ volatile(
> +- "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
> +- "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
> +- :
> ++ "movq %0, %%mm5 \n\t"
> ++ "movq %1, %%mm6 \n\t"
> ++ : : "m"(ff_bgr24toY1Coeff), "m"(ff_bgr24toY2Coeff)
> + );
> + }else{
> + __asm__ volatile(
> +- "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
> +- "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
> +- :
> ++ "movq %0, %%mm5 \n\t"
> ++ "movq %1, %%mm6 \n\t"
> ++ : : "m"(ff_rgb24toY1Coeff), "m"(ff_rgb24toY2Coeff)
> + );
> + }
> +
> + __asm__ volatile(
> +- "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
> ++ "movq %3, %%mm4 \n\t"
> + "mov %2, %%"REG_a" \n\t"
> + "pxor %%mm7, %%mm7 \n\t"
> + "1: \n\t"
> +@@ -1731,7 +1746,7 @@
> + "add $4, %%"REG_a" \n\t"
> + " js 1b \n\t"
> + : "+r" (src)
> +- : "r" (dst+width), "g" (-width)
> ++ : "r" (dst+width), "g" (-width), "m"(ff_bgr24toYOffset)
> + : "%"REG_a
> + );
> + }
> +@@ -1771,7 +1786,7 @@
> + "paddd %%mm3, %%mm1 \n\t"
> + "paddd %%mm5, %%mm4 \n\t"
> +
> +- "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
> ++ "movq %4, %%mm3 \n\t"
> + "paddd %%mm3, %%mm0 \n\t"
> + "paddd %%mm3, %%mm2 \n\t"
> + "paddd %%mm3, %%mm1 \n\t"
> +@@ -1789,7 +1804,8 @@
> + "add $4, %%"REG_a" \n\t"
> + " js 1b \n\t"
> + : "+r" (src)
> +- : "r" (dstU+width), "r" (dstV+width), "g" (-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
> ++ : "r" (dstU+width), "r" (dstV+width), "g" (-width),
> ++ "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0]), "m"(ff_bgr24toUVOffset)
> + : "%"REG_a
> + );
> + }
> +--- a/libswscale/yuv2rgb_template.c
> ++++ b/libswscale/yuv2rgb_template.c
> +@@ -74,7 +74,7 @@
> + \
> + /* convert the luma part */\
> + "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
> +- "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
> ++ "pand %9, %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
> + \
> + "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
> + \
> +@@ -158,21 +158,63 @@
> + PREFETCH" 64(%2) \n\t" \
> + */ \
> +
> +-#define YUV2RGB_ENDLOOP(depth) \
> ++#define YUV2RGB16_ENDLOOP(depth) \
> + "add $"AV_STRINGIFY(depth*8)", %1 \n\t" \
> + "add $4, %0 \n\t" \
> + " js 1b \n\t" \
> + \
> + : "+r" (index), "+r" (image) \
> +- : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index) \
> ++ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index), \
> ++ "m"(mmx_redmask), "m"(mmx_grnmask), "m"(dummy), "m"(mmx_00ffw) \
> + ); \
> + } \
> + __asm__ volatile (EMMS); \
> + return srcSliceH; \
> +
> ++#define YUV2RGB15_ENDLOOP(depth) \
> ++ "add $"AV_STRINGIFY(depth*8)", %1 \n\t" \
> ++ "add $4, %0 \n\t" \
> ++ " js 1b \n\t" \
> ++\
> ++ : "+r" (index), "+r" (image) \
> ++ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index), \
> ++ "m"(mmx_redmask), "m"(dummy), "m"(dummy), "m"(mmx_00ffw) \
> ++ ); \
> ++ } \
> ++ __asm__ volatile (EMMS); \
> ++ return srcSliceH; \
> ++
> ++#define YUV2RGB24_ENDLOOP(depth) \
> ++ "add $"AV_STRINGIFY(depth*8)", %1 \n\t" \
> ++ "add $4, %0 \n\t" \
> ++ " js 1b \n\t" \
> ++\
> ++ : "+r" (index), "+r" (image) \
> ++ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index), \
> ++ "m"(ff_M24A), "m"(ff_M24C), "m"(ff_M24B), "m"(mmx_00ffw) \
> ++ ); \
> ++ } \
> ++ __asm__ volatile (EMMS); \
> ++ return srcSliceH; \
> ++
> ++#define YUV2RGB32_ENDLOOP(depth) \
> ++ "add $"AV_STRINGIFY(depth*8)", %1 \n\t" \
> ++ "add $4, %0 \n\t" \
> ++ " js 1b \n\t" \
> ++\
> ++ : "+r" (index), "+r" (image) \
> ++ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index), \
> ++ "m"(dummy), "m"(dummy), "m"(dummy), "m"(mmx_00ffw) \
> ++ ); \
> ++ } \
> ++ __asm__ volatile (EMMS); \
> ++ return srcSliceH; \
> ++
> ++
> + static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
> + int srcSliceH, uint8_t* dst[], int dstStride[]){
> + int y, h_size;
> ++ long dummy=0;
> +
> + YUV422_UNSHIFT
> + YUV2RGB_LOOP(2)
> +@@ -190,9 +232,9 @@
> + "paddusb "RED_DITHER"(%4), %%mm1;"
> + #endif
> + /* mask unneeded bits off */
> +- "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
> +- "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
> +- "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
> ++ "pand %6, %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
> ++ "pand %7, %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
> ++ "pand %6, %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
> +
> + "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
> + "pxor %%mm4, %%mm4;" /* zero mm4 */
> +@@ -222,12 +264,13 @@
> +
> + MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
> +
> +- YUV2RGB_ENDLOOP(2)
> ++ YUV2RGB16_ENDLOOP(2)
> + }
> +
> + static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
> + int srcSliceH, uint8_t* dst[], int dstStride[]){
> + int y, h_size;
> ++ long dummy=0;
> +
> + YUV422_UNSHIFT
> + YUV2RGB_LOOP(2)
> +@@ -246,9 +289,9 @@
> + #endif
> +
> + /* mask unneeded bits off */
> +- "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
> +- "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
> +- "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
> ++ "pand %6, %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
> ++ "pand %6, %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
> ++ "pand %6, %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
> +
> + "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
> + "psrlw $1, %%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */
> +@@ -279,12 +322,13 @@
> +
> + MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
> +
> +- YUV2RGB_ENDLOOP(2)
> ++ YUV2RGB15_ENDLOOP(2)
> + }
> +
> + static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
> + int srcSliceH, uint8_t* dst[], int dstStride[]){
> + int y, h_size;
> ++ long dummy=0;
> +
> + YUV422_UNSHIFT
> + YUV2RGB_LOOP(3)
> +@@ -293,8 +337,8 @@
> + YUV2RGB
> + /* mm0=B, %%mm2=G, %%mm1=R */
> + #if HAVE_MMX2
> +- "movq "MANGLE(ff_M24A)", %%mm4 \n\t"
> +- "movq "MANGLE(ff_M24C)", %%mm7 \n\t"
> ++ "movq %6, %%mm4 \n\t"
> ++ "movq %7, %%mm7 \n\t"
> + "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */
> + "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */
> + "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */
> +@@ -313,7 +357,7 @@
> + "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
> + "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
> +
> +- "pand "MANGLE(ff_M24B)", %%mm5 \n\t" /* B5 B4 B3 */
> ++ "pand %8, %%mm5 \n\t" /* B5 B4 B3 */
> + "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
> + "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
> +
> +@@ -328,7 +372,7 @@
> +
> + "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
> + "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
> +- "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */
> ++ "pand %8, %%mm6 \n\t" /* R7 R6 R5 */
> + "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
> + \
> + "por %%mm5, %%mm3 \n\t"
> +@@ -393,7 +437,7 @@
> + "pxor %%mm4, %%mm4 \n\t"
> + #endif
> +
> +- YUV2RGB_ENDLOOP(3)
> ++ YUV2RGB24_ENDLOOP(3)
> + }
> +
> + #define RGB_PLANAR2PACKED32 \
> +@@ -440,6 +484,7 @@
> + static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
> + int srcSliceH, uint8_t* dst[], int dstStride[]){
> + int y, h_size;
> ++ long dummy=0;
> +
> + YUV422_UNSHIFT
> + YUV2RGB_LOOP(4)
> +@@ -449,5 +494,5 @@
> + "pcmpeqd %%mm3, %%mm3;" /* fill mm3 */
> + RGB_PLANAR2PACKED32
> +
> +- YUV2RGB_ENDLOOP(4)
> ++ YUV2RGB32_ENDLOOP(4)
> + }
> +--- a/libswscale/yuv2rgb.c
> ++++ b/libswscale/yuv2rgb.c
> +@@ -47,6 +47,10 @@
> + DECLARE_ASM_CONST(8, uint64_t, mmx_redmask) = 0xf8f8f8f8f8f8f8f8ULL;
> + DECLARE_ASM_CONST(8, uint64_t, mmx_grnmask) = 0xfcfcfcfcfcfcfcfcULL;
> +
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_M24A;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_M24B;
> ++extern const uint64_t __attribute__((visibility("hidden"))) ff_M24C;
> ++
> + //MMX versions
> + #undef RENAME
> + #undef HAVE_MMX2
> diff --git a/debian/patches/i386-fpic-workaround.patch b/debian/patches/i386-fpic-workaround.patch
> new file mode 100644
> index 0000000..30ea952
> --- /dev/null
> +++ b/debian/patches/i386-fpic-workaround.patch
> @@ -0,0 +1,21 @@
> +Workaround to get ffmpeg packages built with i386 using -fPIC.
> +This disables -fPIC for the problematic files.
> +==========================================================================
> +--- a/Makefile
> ++++ b/Makefile
> +@@ -83,6 +83,15 @@
> +
> + VHOOKCFLAGS += $(filter-out -mdynamic-no-pic,$(CFLAGS))
> +
> ++# Disable use of -fPIC on problematic files for i386
> ++ifeq (,$(findstring i386 i486 i586 i686,$(shell uname -m)))
> ++I386CFLAGS += $(filter-out -fPIC -DPIC,$(CFLAGS))
> ++libavcodec/x86/dsputil_mmx.o: libavcodec/x86/dsputil_mmx.c
> ++ $(CC) $(I386CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
> ++libavcodec/x86/flacdsp_mmx.o: libavcodec/x86/flacdsp_mmx.c
> ++ $(CC) $(I386CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
> ++endif
> ++
> + BASEHOOKS = fish null watermark
> + ALLHOOKS = $(BASEHOOKS) drawtext imlib2 ppm
> + ALLHOOKS_SRCS = $(addprefix vhook/, $(addsuffix .c, $(ALLHOOKS)))
> diff --git a/debian/patches/series b/debian/patches/series
> index 2e7230a..afe47da 100644
> --- a/debian/patches/series
> +++ b/debian/patches/series
> @@ -1 +1,4 @@
> 900_doxyfile
> +# fpic-ftbfs-fix.patch
> +# fpic-libpostproc-fix.patch
> +# fpic-libswscale-fix.patch
--
Gruesse/greetings,
Reinhard Tartler, KeyID 945348A4
More information about the pkg-multimedia-maintainers
mailing list