[Pkg-openssl-changes] r769 - in openssl/branches/wheezy/debian: . patches

Kurt Roeckx kroeckx at moszumanska.debian.org
Tue Mar 1 18:32:33 UTC 2016


Author: kroeckx
Date: 2016-03-01 18:32:33 +0000 (Tue, 01 Mar 2016)
New Revision: 769

Added:
   openssl/branches/wheezy/debian/patches/CVE-2015-7575.patch
   openssl/branches/wheezy/debian/patches/CVE-2016-0702.patch
   openssl/branches/wheezy/debian/patches/CVE-2016-0705.patch
   openssl/branches/wheezy/debian/patches/CVE-2016-0797.patch
   openssl/branches/wheezy/debian/patches/CVE-2016-0798.patch
   openssl/branches/wheezy/debian/patches/CVE-2016-0799.patch
   openssl/branches/wheezy/debian/patches/Disable-EXPORT-and-LOW-ciphers.patch
Modified:
   openssl/branches/wheezy/debian/changelog
   openssl/branches/wheezy/debian/libssl1.0.0.symbols
   openssl/branches/wheezy/debian/patches/series
   openssl/branches/wheezy/debian/patches/version-script.patch
Log:
Changes for wheezy


Modified: openssl/branches/wheezy/debian/changelog
===================================================================
--- openssl/branches/wheezy/debian/changelog	2016-03-01 18:32:07 UTC (rev 768)
+++ openssl/branches/wheezy/debian/changelog	2016-03-01 18:32:33 UTC (rev 769)
@@ -1,3 +1,25 @@
+openssl (1.0.1e-2+deb7u20) wheezy-security; urgency=medium
+
+  * Fix CVE-2016-0797
+  * Fix CVE-2016-0798
+  * Fix CVE-2016-0799
+  * Fix CVE-2016-0702
+  * Fix CVE-2016-0705
+  * Disable EXPORT and LOW ciphers: The DROWN attack (CVE-2016-0800)
+    makes use of those, and SLOTH attack (CVE-2015-7575) can make use of them
+    too.
+
+ -- Kurt Roeckx <kurt at roeckx.be>  Sun, 28 Feb 2016 23:36:32 +0100
+
+openssl (1.0.1e-2+deb7u19) wheezy-security; urgency=high
+
+  * Non-maintainer upload by the Security Team.
+  * Add CVE-2015-7575.patch patch.
+    CVE-2015-7575: SLOTH: Security Losses from Obsolete and Truncated
+    Transcript Hashes.
+
+ -- Salvatore Bonaccorso <carnil at debian.org>  Thu, 07 Jan 2016 21:10:00 +0100
+
 openssl (1.0.1e-2+deb7u18) wheezy-security; urgency=medium
 
   * Fix CVE-2015-3194

Modified: openssl/branches/wheezy/debian/libssl1.0.0.symbols
===================================================================
--- openssl/branches/wheezy/debian/libssl1.0.0.symbols	2016-03-01 18:32:07 UTC (rev 768)
+++ openssl/branches/wheezy/debian/libssl1.0.0.symbols	2016-03-01 18:32:33 UTC (rev 769)
@@ -2,7 +2,9 @@
  *@OPENSSL_1.0.0 1.0.0
  *@OPENSSL_1.0.1 1.0.1
  *@OPENSSL_1.0.1d 1.0.1d
+ *@OPENSSL_1.0.1s 1.0.1e-2+deb7u20
 libssl.so.1.0.0 libssl1.0.0 #MINVER#
  *@OPENSSL_1.0.0 1.0.0
  *@OPENSSL_1.0.1 1.0.1
  *@OPENSSL_1.0.1d 1.0.1d
+ *@OPENSSL_1.0.1s 1.0.1e-2+deb7u20

Added: openssl/branches/wheezy/debian/patches/CVE-2015-7575.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/CVE-2015-7575.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/CVE-2015-7575.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,59 @@
+From 5e1ff664f95ab4c9176b3e86b5111e5777bad61a Mon Sep 17 00:00:00 2001
+From: "Dr. Stephen Henson" <steve at openssl.org>
+Date: Tue, 15 Oct 2013 14:15:54 +0100
+Subject: [PATCH] Don't use RSA+MD5 with TLS 1.2
+
+Since the TLS 1.2 supported signature algorithms extension is less
+sophisticaed in OpenSSL 1.0.1 this has to be done in two stages.
+
+RSA+MD5 is removed from supported signature algorithms extension:
+any compliant implementation should never use RSA+MD5 as a result.
+
+To cover the case of a broken implementation using RSA+MD5 anyway
+disable lookup of MD5 algorithm in TLS 1.2.
+---
+ ssl/t1_lib.c | 16 ----------------
+ 1 file changed, 16 deletions(-)
+
+diff --git a/ssl/t1_lib.c b/ssl/t1_lib.c
+index f93216d..33afdeb 100644
+--- a/ssl/t1_lib.c
++++ b/ssl/t1_lib.c
+@@ -342,19 +342,11 @@ static unsigned char tls12_sigalgs[] = {
+ #ifndef OPENSSL_NO_SHA
+ 	tlsext_sigalg(TLSEXT_hash_sha1)
+ #endif
+-#ifndef OPENSSL_NO_MD5
+-	tlsext_sigalg_rsa(TLSEXT_hash_md5)
+-#endif
+ };
+ 
+ int tls12_get_req_sig_algs(SSL *s, unsigned char *p)
+ 	{
+ 	size_t slen = sizeof(tls12_sigalgs);
+-#ifdef OPENSSL_FIPS
+-	/* If FIPS mode don't include MD5 which is last */
+-	if (FIPS_mode())
+-		slen -= 2;
+-#endif
+ 	if (p)
+ 		memcpy(p, tls12_sigalgs, slen);
+ 	return (int)slen;
+@@ -2452,14 +2444,6 @@ const EVP_MD *tls12_get_hash(unsigned char hash_alg)
+ 	{
+ 	switch(hash_alg)
+ 		{
+-#ifndef OPENSSL_NO_MD5
+-		case TLSEXT_hash_md5:
+-#ifdef OPENSSL_FIPS
+-		if (FIPS_mode())
+-			return NULL;
+-#endif
+-		return EVP_md5();
+-#endif
+ #ifndef OPENSSL_NO_SHA
+ 		case TLSEXT_hash_sha1:
+ 		return EVP_sha1();
+-- 
+2.1.4
+

Added: openssl/branches/wheezy/debian/patches/CVE-2016-0702.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/CVE-2016-0702.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/CVE-2016-0702.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,1103 @@
+Index: openssl-1.0.1e/crypto/bn/asm/x86_64-mont5.pl
+===================================================================
+--- openssl-1.0.1e.orig/crypto/bn/asm/x86_64-mont5.pl
++++ openssl-1.0.1e/crypto/bn/asm/x86_64-mont5.pl
+@@ -66,60 +66,113 @@ bn_mul_mont_gather5:
+ .align	16
+ .Lmul_enter:
+ 	mov	${num}d,${num}d
+-	mov	`($win64?56:8)`(%rsp),%r10d	# load 7th argument
++	movd	`($win64?56:8)`(%rsp),%xmm5	# load 7th argument
++	lea	.Linc(%rip),%r10
+ 	push	%rbx
+ 	push	%rbp
+ 	push	%r12
+ 	push	%r13
+ 	push	%r14
+ 	push	%r15
+-___
+-$code.=<<___ if ($win64);
+-	lea	-0x28(%rsp),%rsp
+-	movaps	%xmm6,(%rsp)
+-	movaps	%xmm7,0x10(%rsp)
++
+ .Lmul_alloca:
+-___
+-$code.=<<___;
+ 	mov	%rsp,%rax
+ 	lea	2($num),%r11
+ 	neg	%r11
+-	lea	(%rsp,%r11,8),%rsp	# tp=alloca(8*(num+2))
++	lea	-264(%rsp,%r11,8),%rsp	# tp=alloca(8*(num+2)+256+8)
+ 	and	\$-1024,%rsp		# minimize TLB usage
+ 
+ 	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
+ .Lmul_body:
+-	mov	$bp,%r12		# reassign $bp
++	lea	128($bp),%r12		# reassign $bp (+size optimization)
+ ___
+ 		$bp="%r12";
+ 		$STRIDE=2**5*8;		# 5 is "window size"
+ 		$N=$STRIDE/4;		# should match cache line size
+ $code.=<<___;
+-	mov	%r10,%r11
+-	shr	\$`log($N/8)/log(2)`,%r10
+-	and	\$`$N/8-1`,%r11
+-	not	%r10
+-	lea	.Lmagic_masks(%rip),%rax
+-	and	\$`2**5/($N/8)-1`,%r10	# 5 is "window size"
+-	lea	96($bp,%r11,8),$bp	# pointer within 1st cache line
+-	movq	0(%rax,%r10,8),%xmm4	# set of masks denoting which
+-	movq	8(%rax,%r10,8),%xmm5	# cache line contains element
+-	movq	16(%rax,%r10,8),%xmm6	# denoted by 7th argument
+-	movq	24(%rax,%r10,8),%xmm7
+-
+-	movq	`0*$STRIDE/4-96`($bp),%xmm0
+-	movq	`1*$STRIDE/4-96`($bp),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($bp),%xmm2
+-	pand	%xmm5,%xmm1
+-	movq	`3*$STRIDE/4-96`($bp),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
++	movdqa	0(%r10),%xmm0		# 00000001000000010000000000000000
++	movdqa	16(%r10),%xmm1		# 00000002000000020000000200000002
++	lea	24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
++	and	\$-16,%r10
++
++	pshufd	\$0,%xmm5,%xmm5		# broadcast index
++	movdqa	%xmm1,%xmm4
++	movdqa	%xmm1,%xmm2
++___
++########################################################################
++# calculate mask by comparing 0..31 to index and save result to stack
++#
++$code.=<<___;
++	paddd	%xmm0,%xmm1
++	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
++	.byte	0x67
++	movdqa	%xmm4,%xmm3
++___
++for($k=0;$k<$STRIDE/16-4;$k+=4) {
++$code.=<<___;
++	paddd	%xmm1,%xmm2
++	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
++	movdqa	%xmm0,`16*($k+0)+112`(%r10)
++	movdqa	%xmm4,%xmm0
++
++	paddd	%xmm2,%xmm3
++	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
++	movdqa	%xmm1,`16*($k+1)+112`(%r10)
++	movdqa	%xmm4,%xmm1
++
++	paddd	%xmm3,%xmm0
++	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
++	movdqa	%xmm2,`16*($k+2)+112`(%r10)
++	movdqa	%xmm4,%xmm2
++
++	paddd	%xmm0,%xmm1
++	pcmpeqd	%xmm5,%xmm0
++	movdqa	%xmm3,`16*($k+3)+112`(%r10)
++	movdqa	%xmm4,%xmm3
++___
++}
++$code.=<<___;				# last iteration can be optimized
++	paddd	%xmm1,%xmm2
++	pcmpeqd	%xmm5,%xmm1
++	movdqa	%xmm0,`16*($k+0)+112`(%r10)
++
++	paddd	%xmm2,%xmm3
++	.byte	0x67
++	pcmpeqd	%xmm5,%xmm2
++	movdqa	%xmm1,`16*($k+1)+112`(%r10)
++
++	pcmpeqd	%xmm5,%xmm3
++	movdqa	%xmm2,`16*($k+2)+112`(%r10)
++	pand	`16*($k+0)-128`($bp),%xmm0	# while it's still in register
++
++	pand	`16*($k+1)-128`($bp),%xmm1
++	pand	`16*($k+2)-128`($bp),%xmm2
++	movdqa	%xmm3,`16*($k+3)+112`(%r10)
++	pand	`16*($k+3)-128`($bp),%xmm3
+ 	por	%xmm2,%xmm0
++	por	%xmm3,%xmm1
++___
++for($k=0;$k<$STRIDE/16-4;$k+=4) {
++$code.=<<___;
++	movdqa	`16*($k+0)-128`($bp),%xmm4
++	movdqa	`16*($k+1)-128`($bp),%xmm5
++	movdqa	`16*($k+2)-128`($bp),%xmm2
++	pand	`16*($k+0)+112`(%r10),%xmm4
++	movdqa	`16*($k+3)-128`($bp),%xmm3
++	pand	`16*($k+1)+112`(%r10),%xmm5
++	por	%xmm4,%xmm0
++	pand	`16*($k+2)+112`(%r10),%xmm2
++	por	%xmm5,%xmm1
++	pand	`16*($k+3)+112`(%r10),%xmm3
++	por	%xmm2,%xmm0
++	por	%xmm3,%xmm1
++___
++}
++$code.=<<___;
++	por	%xmm1,%xmm0
++	pshufd	\$0x4e,%xmm0,%xmm1
++	por	%xmm1,%xmm0
+ 	lea	$STRIDE($bp),$bp
+-	por	%xmm3,%xmm0
+-
+ 	movq	%xmm0,$m0		# m0=bp[0]
+ 
+ 	mov	($n0),$n0		# pull n0[0] value
+@@ -128,29 +181,14 @@ $code.=<<___;
+ 	xor	$i,$i			# i=0
+ 	xor	$j,$j			# j=0
+ 
+-	movq	`0*$STRIDE/4-96`($bp),%xmm0
+-	movq	`1*$STRIDE/4-96`($bp),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($bp),%xmm2
+-	pand	%xmm5,%xmm1
+-
+ 	mov	$n0,$m1
+ 	mulq	$m0			# ap[0]*bp[0]
+ 	mov	%rax,$lo0
+ 	mov	($np),%rax
+ 
+-	movq	`3*$STRIDE/4-96`($bp),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
+-
+ 	imulq	$lo0,$m1		# "tp[0]"*n0
+ 	mov	%rdx,$hi0
+ 
+-	por	%xmm2,%xmm0
+-	lea	$STRIDE($bp),$bp
+-	por	%xmm3,%xmm0
+-
+ 	mulq	$m1			# np[0]*m1
+ 	add	%rax,$lo0		# discarded
+ 	mov	8($ap),%rax
+@@ -183,8 +221,6 @@ $code.=<<___;
+ 	cmp	$num,$j
+ 	jne	.L1st
+ 
+-	movq	%xmm0,$m0		# bp[1]
+-
+ 	add	%rax,$hi1
+ 	mov	($ap),%rax		# ap[0]
+ 	adc	\$0,%rdx
+@@ -204,33 +240,46 @@ $code.=<<___;
+ 	jmp	.Louter
+ .align	16
+ .Louter:
++	lea	24+128(%rsp,$num,8),%rdx	# where 256-byte mask is (+size optimization)
++	and	\$-16,%rdx
++	pxor	%xmm4,%xmm4
++	pxor	%xmm5,%xmm5
++___
++for($k=0;$k<$STRIDE/16;$k+=4) {
++$code.=<<___;
++	movdqa	`16*($k+0)-128`($bp),%xmm0
++	movdqa	`16*($k+1)-128`($bp),%xmm1
++	movdqa	`16*($k+2)-128`($bp),%xmm2
++	movdqa	`16*($k+3)-128`($bp),%xmm3
++	pand	`16*($k+0)-128`(%rdx),%xmm0
++	pand	`16*($k+1)-128`(%rdx),%xmm1
++	por	%xmm0,%xmm4
++	pand	`16*($k+2)-128`(%rdx),%xmm2
++	por	%xmm1,%xmm5
++	pand	`16*($k+3)-128`(%rdx),%xmm3
++	por	%xmm2,%xmm4
++	por	%xmm3,%xmm5
++___
++}
++$code.=<<___;
++	por	%xmm5,%xmm4
++	pshufd	\$0x4e,%xmm4,%xmm0
++	por	%xmm4,%xmm0
++	lea	$STRIDE($bp),$bp
++	movq	%xmm0,$m0		# m0=bp[i]
++
+ 	xor	$j,$j			# j=0
+ 	mov	$n0,$m1
+ 	mov	(%rsp),$lo0
+ 
+-	movq	`0*$STRIDE/4-96`($bp),%xmm0
+-	movq	`1*$STRIDE/4-96`($bp),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($bp),%xmm2
+-	pand	%xmm5,%xmm1
+-
+ 	mulq	$m0			# ap[0]*bp[i]
+ 	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
+ 	mov	($np),%rax
+ 	adc	\$0,%rdx
+ 
+-	movq	`3*$STRIDE/4-96`($bp),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
+-
+ 	imulq	$lo0,$m1		# tp[0]*n0
+ 	mov	%rdx,$hi0
+ 
+-	por	%xmm2,%xmm0
+-	lea	$STRIDE($bp),$bp
+-	por	%xmm3,%xmm0
+-
+ 	mulq	$m1			# np[0]*m1
+ 	add	%rax,$lo0		# discarded
+ 	mov	8($ap),%rax
+@@ -266,8 +315,6 @@ $code.=<<___;
+ 	cmp	$num,$j
+ 	jne	.Linner
+ 
+-	movq	%xmm0,$m0		# bp[i+1]
+-
+ 	add	%rax,$hi1
+ 	mov	($ap),%rax		# ap[0]
+ 	adc	\$0,%rdx
+@@ -321,13 +368,7 @@ $code.=<<___;
+ 
+ 	mov	8(%rsp,$num,8),%rsi	# restore %rsp
+ 	mov	\$1,%rax
+-___
+-$code.=<<___ if ($win64);
+-	movaps	(%rsi),%xmm6
+-	movaps	0x10(%rsi),%xmm7
+-	lea	0x28(%rsi),%rsi
+-___
+-$code.=<<___;
++
+ 	mov	(%rsi),%r15
+ 	mov	8(%rsi),%r14
+ 	mov	16(%rsi),%r13
+@@ -348,91 +389,130 @@ $code.=<<___;
+ bn_mul4x_mont_gather5:
+ .Lmul4x_enter:
+ 	mov	${num}d,${num}d
+-	mov	`($win64?56:8)`(%rsp),%r10d	# load 7th argument
++	movd	`($win64?56:8)`(%rsp),%xmm5	# load 7th argument
++	lea	.Linc(%rip),%r10
+ 	push	%rbx
+ 	push	%rbp
+ 	push	%r12
+ 	push	%r13
+ 	push	%r14
+ 	push	%r15
+-___
+-$code.=<<___ if ($win64);
+-	lea	-0x28(%rsp),%rsp
+-	movaps	%xmm6,(%rsp)
+-	movaps	%xmm7,0x10(%rsp)
++
+ .Lmul4x_alloca:
+-___
+-$code.=<<___;
+ 	mov	%rsp,%rax
+ 	lea	4($num),%r11
+ 	neg	%r11
+-	lea	(%rsp,%r11,8),%rsp	# tp=alloca(8*(num+4))
++	lea	-256(%rsp,%r11,8),%rsp	# tp=alloca(8*(num+4)+256)
+ 	and	\$-1024,%rsp		# minimize TLB usage
+ 
+ 	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
+ .Lmul4x_body:
+ 	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
+-	mov	%rdx,%r12		# reassign $bp
++	lea	128(%rdx),%r12		# reassign $bp (+size optimization)
+ ___
+ 		$bp="%r12";
+ 		$STRIDE=2**5*8;		# 5 is "window size"
+ 		$N=$STRIDE/4;		# should match cache line size
+ $code.=<<___;
+-	mov	%r10,%r11
+-	shr	\$`log($N/8)/log(2)`,%r10
+-	and	\$`$N/8-1`,%r11
+-	not	%r10
+-	lea	.Lmagic_masks(%rip),%rax
+-	and	\$`2**5/($N/8)-1`,%r10	# 5 is "window size"
+-	lea	96($bp,%r11,8),$bp	# pointer within 1st cache line
+-	movq	0(%rax,%r10,8),%xmm4	# set of masks denoting which
+-	movq	8(%rax,%r10,8),%xmm5	# cache line contains element
+-	movq	16(%rax,%r10,8),%xmm6	# denoted by 7th argument
+-	movq	24(%rax,%r10,8),%xmm7
+-
+-	movq	`0*$STRIDE/4-96`($bp),%xmm0
+-	movq	`1*$STRIDE/4-96`($bp),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($bp),%xmm2
+-	pand	%xmm5,%xmm1
+-	movq	`3*$STRIDE/4-96`($bp),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
++	movdqa	0(%r10),%xmm0		# 00000001000000010000000000000000
++	movdqa	16(%r10),%xmm1		# 00000002000000020000000200000002
++	lea	32-112(%rsp,$num,8),%r10# place the mask after tp[num+4] (+ICache optimization)
++
++	pshufd	\$0,%xmm5,%xmm5		# broadcast index
++	movdqa	%xmm1,%xmm4
++	.byte	0x67,0x67
++	movdqa	%xmm1,%xmm2
++___
++########################################################################
++# calculate mask by comparing 0..31 to index and save result to stack
++#
++$code.=<<___;
++	paddd	%xmm0,%xmm1
++	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
++	.byte	0x67
++	movdqa	%xmm4,%xmm3
++___
++for($k=0;$k<$STRIDE/16-4;$k+=4) {
++$code.=<<___;
++	paddd	%xmm1,%xmm2
++	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
++	movdqa	%xmm0,`16*($k+0)+112`(%r10)
++	movdqa	%xmm4,%xmm0
++
++	paddd	%xmm2,%xmm3
++	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
++	movdqa	%xmm1,`16*($k+1)+112`(%r10)
++	movdqa	%xmm4,%xmm1
++
++	paddd	%xmm3,%xmm0
++	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
++	movdqa	%xmm2,`16*($k+2)+112`(%r10)
++	movdqa	%xmm4,%xmm2
++
++	paddd	%xmm0,%xmm1
++	pcmpeqd	%xmm5,%xmm0
++	movdqa	%xmm3,`16*($k+3)+112`(%r10)
++	movdqa	%xmm4,%xmm3
++___
++}
++$code.=<<___;				# last iteration can be optimized
++	paddd	%xmm1,%xmm2
++	pcmpeqd	%xmm5,%xmm1
++	movdqa	%xmm0,`16*($k+0)+112`(%r10)
++
++	paddd	%xmm2,%xmm3
++	.byte	0x67
++	pcmpeqd	%xmm5,%xmm2
++	movdqa	%xmm1,`16*($k+1)+112`(%r10)
++
++	pcmpeqd	%xmm5,%xmm3
++	movdqa	%xmm2,`16*($k+2)+112`(%r10)
++	pand	`16*($k+0)-128`($bp),%xmm0	# while it's still in register
++
++	pand	`16*($k+1)-128`($bp),%xmm1
++	pand	`16*($k+2)-128`($bp),%xmm2
++	movdqa	%xmm3,`16*($k+3)+112`(%r10)
++	pand	`16*($k+3)-128`($bp),%xmm3
++	por	%xmm2,%xmm0
++	por	%xmm3,%xmm1
++___
++for($k=0;$k<$STRIDE/16-4;$k+=4) {
++$code.=<<___;
++	movdqa	`16*($k+0)-128`($bp),%xmm4
++	movdqa	`16*($k+1)-128`($bp),%xmm5
++	movdqa	`16*($k+2)-128`($bp),%xmm2
++	pand	`16*($k+0)+112`(%r10),%xmm4
++	movdqa	`16*($k+3)-128`($bp),%xmm3
++	pand	`16*($k+1)+112`(%r10),%xmm5
++	por	%xmm4,%xmm0
++	pand	`16*($k+2)+112`(%r10),%xmm2
++	por	%xmm5,%xmm1
++	pand	`16*($k+3)+112`(%r10),%xmm3
+ 	por	%xmm2,%xmm0
++	por	%xmm3,%xmm1
++___
++}
++$code.=<<___;
++	por	%xmm1,%xmm0
++	pshufd	\$0x4e,%xmm0,%xmm1
++	por	%xmm1,%xmm0
+ 	lea	$STRIDE($bp),$bp
+-	por	%xmm3,%xmm0
+-
+ 	movq	%xmm0,$m0		# m0=bp[0]
++
+ 	mov	($n0),$n0		# pull n0[0] value
+ 	mov	($ap),%rax
+ 
+ 	xor	$i,$i			# i=0
+ 	xor	$j,$j			# j=0
+ 
+-	movq	`0*$STRIDE/4-96`($bp),%xmm0
+-	movq	`1*$STRIDE/4-96`($bp),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($bp),%xmm2
+-	pand	%xmm5,%xmm1
+-
+ 	mov	$n0,$m1
+ 	mulq	$m0			# ap[0]*bp[0]
+ 	mov	%rax,$A[0]
+ 	mov	($np),%rax
+ 
+-	movq	`3*$STRIDE/4-96`($bp),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
+-
+ 	imulq	$A[0],$m1		# "tp[0]"*n0
+ 	mov	%rdx,$A[1]
+ 
+-	por	%xmm2,%xmm0
+-	lea	$STRIDE($bp),$bp
+-	por	%xmm3,%xmm0
+-
+ 	mulq	$m1			# np[0]*m1
+ 	add	%rax,$A[0]		# discarded
+ 	mov	8($ap),%rax
+@@ -550,8 +630,6 @@ $code.=<<___;
+ 	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
+ 	mov	%rdx,$N[0]
+ 
+-	movq	%xmm0,$m0		# bp[1]
+-
+ 	xor	$N[1],$N[1]
+ 	add	$A[0],$N[0]
+ 	adc	\$0,$N[1]
+@@ -561,12 +639,34 @@ $code.=<<___;
+ 	lea	1($i),$i		# i++
+ .align	4
+ .Louter4x:
++	lea	32+128(%rsp,$num,8),%rdx	# where 256-byte mask is (+size optimization)
++	pxor	%xmm4,%xmm4
++	pxor	%xmm5,%xmm5
++___
++for($k=0;$k<$STRIDE/16;$k+=4) {
++$code.=<<___;
++	movdqa	`16*($k+0)-128`($bp),%xmm0
++	movdqa	`16*($k+1)-128`($bp),%xmm1
++	movdqa	`16*($k+2)-128`($bp),%xmm2
++	movdqa	`16*($k+3)-128`($bp),%xmm3
++	pand	`16*($k+0)-128`(%rdx),%xmm0
++	pand	`16*($k+1)-128`(%rdx),%xmm1
++	por	%xmm0,%xmm4
++	pand	`16*($k+2)-128`(%rdx),%xmm2
++	por	%xmm1,%xmm5
++	pand	`16*($k+3)-128`(%rdx),%xmm3
++	por	%xmm2,%xmm4
++	por	%xmm3,%xmm5
++___
++}
++$code.=<<___;
++	por	%xmm5,%xmm4
++	pshufd	\$0x4e,%xmm4,%xmm0
++	por	%xmm4,%xmm0
++	lea	$STRIDE($bp),$bp
++	movq	%xmm0,$m0		# m0=bp[i]
++
+ 	xor	$j,$j			# j=0
+-	movq	`0*$STRIDE/4-96`($bp),%xmm0
+-	movq	`1*$STRIDE/4-96`($bp),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($bp),%xmm2
+-	pand	%xmm5,%xmm1
+ 
+ 	mov	(%rsp),$A[0]
+ 	mov	$n0,$m1
+@@ -575,18 +675,9 @@ $code.=<<___;
+ 	mov	($np),%rax
+ 	adc	\$0,%rdx
+ 
+-	movq	`3*$STRIDE/4-96`($bp),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
+-
+ 	imulq	$A[0],$m1		# tp[0]*n0
+ 	mov	%rdx,$A[1]
+ 
+-	por	%xmm2,%xmm0
+-	lea	$STRIDE($bp),$bp
+-	por	%xmm3,%xmm0
+-
+ 	mulq	$m1			# np[0]*m1
+ 	add	%rax,$A[0]		# "$N[0]", discarded
+ 	mov	8($ap),%rax
+@@ -718,7 +809,6 @@ $code.=<<___;
+ 	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
+ 	mov	%rdx,$N[0]
+ 
+-	movq	%xmm0,$m0		# bp[i+1]
+ 	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
+ 
+ 	xor	$N[1],$N[1]
+@@ -809,13 +899,7 @@ ___
+ $code.=<<___;
+ 	mov	8(%rsp,$num,8),%rsi	# restore %rsp
+ 	mov	\$1,%rax
+-___
+-$code.=<<___ if ($win64);
+-	movaps	(%rsi),%xmm6
+-	movaps	0x10(%rsi),%xmm7
+-	lea	0x28(%rsi),%rsi
+-___
+-$code.=<<___;
++
+ 	mov	(%rsi),%r15
+ 	mov	8(%rsi),%r14
+ 	mov	16(%rsi),%r13
+@@ -830,8 +914,8 @@ ___
+ }}}
+ 
+ {
+-my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
+-				("%rdi","%rsi","%rdx","%rcx"); # Unix order
++my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9d") : # Win64 order
++				("%rdi","%rsi","%rdx","%ecx"); # Unix order
+ my $out=$inp;
+ my $STRIDE=2**5*8;
+ my $N=$STRIDE/4;
+@@ -859,53 +943,89 @@ bn_scatter5:
+ .type	bn_gather5,\@abi-omnipotent
+ .align	16
+ bn_gather5:
+-___
+-$code.=<<___ if ($win64);
+-.LSEH_begin_bn_gather5:
++.LSEH_begin_bn_gather5:                        # Win64 thing, but harmless in other cases
+ 	# I can't trust assembler to use specific encoding:-(
+-	.byte	0x48,0x83,0xec,0x28		#sub	\$0x28,%rsp
+-	.byte	0x0f,0x29,0x34,0x24		#movaps	%xmm6,(%rsp)
+-	.byte	0x0f,0x29,0x7c,0x24,0x10	#movdqa	%xmm7,0x10(%rsp)
+-___
+-$code.=<<___;
+-	mov	$idx,%r11
+-	shr	\$`log($N/8)/log(2)`,$idx
+-	and	\$`$N/8-1`,%r11
+-	not	$idx
+-	lea	.Lmagic_masks(%rip),%rax
+-	and	\$`2**5/($N/8)-1`,$idx	# 5 is "window size"
+-	lea	96($tbl,%r11,8),$tbl	# pointer within 1st cache line
+-	movq	0(%rax,$idx,8),%xmm4	# set of masks denoting which
+-	movq	8(%rax,$idx,8),%xmm5	# cache line contains element
+-	movq	16(%rax,$idx,8),%xmm6	# denoted by 7th argument
+-	movq	24(%rax,$idx,8),%xmm7
++	.byte	0x4c,0x8d,0x14,0x24			# lea    (%rsp),%r10
++	.byte	0x48,0x81,0xec,0x08,0x01,0x00,0x00	# sub	$0x108,%rsp
++	lea	.Linc(%rip),%rax
++	and	\$-16,%rsp		# shouldn't be formally required
++
++	movd	$idx,%xmm5
++	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
++	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
++	lea	128($tbl),%r11		# size optimization
++	lea	128(%rsp),%rax		# size optimization
++
++	pshufd	\$0,%xmm5,%xmm5		# broadcast $idx
++	movdqa	%xmm1,%xmm4
++	movdqa	%xmm1,%xmm2
++___
++########################################################################
++# calculate mask by comparing 0..31 to $idx and save result to stack
++#
++for($i=0;$i<$STRIDE/16;$i+=4) {
++$code.=<<___;
++	paddd	%xmm0,%xmm1
++	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
++___
++$code.=<<___	if ($i);
++	movdqa	%xmm3,`16*($i-1)-128`(%rax)
++___
++$code.=<<___;
++	movdqa	%xmm4,%xmm3
++
++	paddd	%xmm1,%xmm2
++	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
++	movdqa	%xmm0,`16*($i+0)-128`(%rax)
++	movdqa	%xmm4,%xmm0
++
++	paddd	%xmm2,%xmm3
++	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
++	movdqa	%xmm1,`16*($i+1)-128`(%rax)
++	movdqa	%xmm4,%xmm1
++
++	paddd	%xmm3,%xmm0
++	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
++	movdqa	%xmm2,`16*($i+2)-128`(%rax)
++	movdqa	%xmm4,%xmm2
++___
++}
++$code.=<<___;
++	movdqa	%xmm3,`16*($i-1)-128`(%rax)
+ 	jmp	.Lgather
+-.align	16
+-.Lgather:
+-	movq	`0*$STRIDE/4-96`($tbl),%xmm0
+-	movq	`1*$STRIDE/4-96`($tbl),%xmm1
+-	pand	%xmm4,%xmm0
+-	movq	`2*$STRIDE/4-96`($tbl),%xmm2
+-	pand	%xmm5,%xmm1
+-	movq	`3*$STRIDE/4-96`($tbl),%xmm3
+-	pand	%xmm6,%xmm2
+-	por	%xmm1,%xmm0
+-	pand	%xmm7,%xmm3
+-	por	%xmm2,%xmm0
+-	lea	$STRIDE($tbl),$tbl
+-	por	%xmm3,%xmm0
+ 
++.align	32
++.Lgather:
++	pxor	%xmm4,%xmm4
++	pxor	%xmm5,%xmm5
++___
++for($i=0;$i<$STRIDE/16;$i+=4) {
++$code.=<<___;
++	movdqa	`16*($i+0)-128`(%r11),%xmm0
++	movdqa	`16*($i+1)-128`(%r11),%xmm1
++	movdqa	`16*($i+2)-128`(%r11),%xmm2
++	pand	`16*($i+0)-128`(%rax),%xmm0
++	movdqa	`16*($i+3)-128`(%r11),%xmm3
++	pand	`16*($i+1)-128`(%rax),%xmm1
++	por	%xmm0,%xmm4
++	pand	`16*($i+2)-128`(%rax),%xmm2
++	por	%xmm1,%xmm5
++	pand	`16*($i+3)-128`(%rax),%xmm3
++	por	%xmm2,%xmm4
++	por	%xmm3,%xmm5
++___
++}
++$code.=<<___;
++	por	%xmm5,%xmm4
++	lea	$STRIDE(%r11),%r11
++	pshufd	\$0x4e,%xmm4,%xmm0
++	por	%xmm4,%xmm0
+ 	movq	%xmm0,($out)		# m0=bp[0]
+ 	lea	8($out),$out
+ 	sub	\$1,$num
+ 	jnz	.Lgather
+-___
+-$code.=<<___ if ($win64);
+-	movaps	%xmm6,(%rsp)
+-	movaps	%xmm7,0x10(%rsp)
+-	lea	0x28(%rsp),%rsp
+-___
+-$code.=<<___;
++
++	lea	(%r10),%rsp
+ 	ret
+ .LSEH_end_bn_gather5:
+ .size	bn_gather5,.-bn_gather5
+@@ -913,9 +1033,9 @@ ___
+ }
+ $code.=<<___;
+ .align	64
+-.Lmagic_masks:
+-	.long	0,0, 0,0, 0,0, -1,-1
+-	.long	0,0, 0,0, 0,0,  0,0
++.Linc:
++	.long	0,0, 1,1
++	.long	2,2, 2,2
+ .asciz	"Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+ ___
+ 
+@@ -954,7 +1074,7 @@ mul_handler:
+ 	cmp	%r10,%rbx		# context->Rip<end of prologue label
+ 	jb	.Lcommon_seh_tail
+ 
+-	lea	`40+48`(%rax),%rax
++	lea	48(%rax),%rax
+ 
+ 	mov	4(%r11),%r10d		# HandlerData[1]
+ 	lea	(%rsi,%r10),%r10	# end of alloca label
+@@ -971,9 +1091,7 @@ mul_handler:
+ 	mov	192($context),%r10	# pull $num
+ 	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
+ 
+-	movaps	(%rax),%xmm0
+-	movaps	16(%rax),%xmm1
+-	lea	`40+48`(%rax),%rax
++	lea	48(%rax),%rax
+ 
+ 	mov	-8(%rax),%rbx
+ 	mov	-16(%rax),%rbp
+@@ -987,8 +1105,6 @@ mul_handler:
+ 	mov	%r13,224($context)	# restore context->R13
+ 	mov	%r14,232($context)	# restore context->R14
+ 	mov	%r15,240($context)	# restore context->R15
+-	movups	%xmm0,512($context)	# restore context->Xmm6
+-	movups	%xmm1,528($context)	# restore context->Xmm7
+ 
+ .Lcommon_seh_tail:
+ 	mov	8(%rax),%rdi
+@@ -1057,10 +1173,9 @@ mul_handler:
+ 	.rva	.Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
+ .align	8
+ .LSEH_info_bn_gather5:
+-        .byte   0x01,0x0d,0x05,0x00
+-        .byte   0x0d,0x78,0x01,0x00	#movaps	0x10(rsp),xmm7
+-        .byte   0x08,0x68,0x00,0x00	#movaps	(rsp),xmm6
+-        .byte   0x04,0x42,0x00,0x00	#sub	rsp,0x28
++	.byte	0x01,0x0b,0x03,0x0a
++	.byte	0x0b,0x01,0x21,0x00	# sub	rsp,0x108
++	.byte	0x04,0xa3,0x00,0x00	# lea	r10,(rsp), set_frame r10
+ .align	8
+ ___
+ }
+Index: openssl-1.0.1e/crypto/bn/bn_exp.c
+===================================================================
+--- openssl-1.0.1e.orig/crypto/bn/bn_exp.c
++++ openssl-1.0.1e/crypto/bn/bn_exp.c
+@@ -111,6 +111,7 @@
+ 
+ 
+ #include "cryptlib.h"
++#include "constant_time_locl.h"
+ #include "bn_lcl.h"
+ 
+ #include <stdlib.h>
+@@ -534,31 +535,67 @@ err:
+  * as cache lines are concerned.  The following functions are used to transfer a BIGNUM
+  * from/to that table. */
+ 
+-static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int width)
++static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int window)
+ 	{
+-	size_t i, j;
++        int i, j;
++        int width = 1 << window;
++        BN_ULONG *table = (BN_ULONG *)buf;
+ 
+ 	if (top > b->top)
+ 		top = b->top; /* this works because 'buf' is explicitly zeroed */
+-	for (i = 0, j=idx; i < top * sizeof b->d[0]; i++, j+=width)
+-		{
+-		buf[j] = ((unsigned char*)b->d)[i];
+-		}
++        for (i = 0, j = idx; i < top; i++, j += width) {
++            table[j] = b->d[i];
++        }
+ 
+ 	return 1;
+ 	}
+ 
+-static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int width)
++static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int window)
+ 	{
+-	size_t i, j;
++        int i, j;
++        int width = 1 << window;
++        volatile BN_ULONG *table = (volatile BN_ULONG *)buf;
+ 
+ 	if (bn_wexpand(b, top) == NULL)
+ 		return 0;
+ 
+-	for (i=0, j=idx; i < top * sizeof b->d[0]; i++, j+=width)
+-		{
+-		((unsigned char*)b->d)[i] = buf[j];
+-		}
++        if (window <= 3) {
++            for (i = 0; i < top; i++, table += width) {
++                BN_ULONG acc = 0;
++
++                for (j = 0; j < width; j++) {
++                    acc |= table[j] &
++                           ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
++               }
++
++                b->d[i] = acc;
++            }
++        } else {
++            int xstride = 1 << (window - 2);
++            BN_ULONG y0, y1, y2, y3;
++
++            i = idx >> (window - 2);        /* equivalent of idx / xstride */
++            idx &= xstride - 1;             /* equivalent of idx % xstride */
++
++            y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1);
++            y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1);
++            y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1);
++            y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1);
++
++            for (i = 0; i < top; i++, table += width) {
++                BN_ULONG acc = 0;
++
++                for (j = 0; j < xstride; j++) {
++                    acc |= ( (table[j + 0 * xstride] & y0) |
++                             (table[j + 1 * xstride] & y1) |
++                             (table[j + 2 * xstride] & y2) |
++                             (table[j + 3 * xstride] & y3) )
++                           & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
++                }
++
++                b->d[i] = acc;
++            }
++        }
+ 
+ 	b->top = top;
+ 	bn_correct_top(b);
+@@ -767,8 +804,8 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+     else
+ #endif
+ 	{
+-	if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err;
+-	if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am,  top, powerbuf, 1, numPowers)) goto err;
++	if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window)) goto err;
++	if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am,  top, powerbuf, 1, window)) goto err;
+ 
+ 	/* If the window size is greater than 1, then calculate
+ 	 * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
+@@ -778,20 +815,20 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+ 	if (window > 1)
+ 		{
+ 		if (!BN_mod_mul_montgomery(&tmp,&am,&am,mont,ctx))	goto err;
+-		if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, numPowers)) goto err;
++		if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, window)) goto err;
+ 		for (i=3; i<numPowers; i++)
+ 			{
+ 			/* Calculate a^i = a^(i-1) * a */
+ 			if (!BN_mod_mul_montgomery(&tmp,&am,&tmp,mont,ctx))
+ 				goto err;
+-			if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, numPowers)) goto err;
++			if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, window)) goto err;
+ 			}
+ 		}
+ 
+ 	bits--;
+ 	for (wvalue=0, i=bits%window; i>=0; i--,bits--)
+ 		wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+-	if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp,top,powerbuf,wvalue,numPowers)) goto err;
++	if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp,top,powerbuf,wvalue,window)) goto err;
+  
+ 	/* Scan the exponent one window at a time starting from the most
+ 	 * significant bits.
+@@ -808,7 +845,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr
+   			}
+  		
+ 		/* Fetch the appropriate pre-computed value from the pre-buf */
+-		if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, numPowers)) goto err;
++		if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, window)) goto err;
+ 
+  		/* Multiply the result into the intermediate result */
+  		if (!BN_mod_mul_montgomery(&tmp,&tmp,&am,mont,ctx)) goto err;
+Index: openssl-1.0.1e/crypto/perlasm/x86_64-xlate.pl
+===================================================================
+--- openssl-1.0.1e.orig/crypto/perlasm/x86_64-xlate.pl
++++ openssl-1.0.1e/crypto/perlasm/x86_64-xlate.pl
+@@ -121,7 +121,7 @@ my %globals;
+ 		$self->{sz} = "";
+ 	    } elsif ($self->{op} =~ /^v/) { # VEX
+ 		$self->{sz} = "";
+-	    } elsif ($self->{op} =~ /movq/ && $line =~ /%xmm/) {
++	    } elsif ($self->{op} =~ /mov[dq]/ && $line =~ /%xmm/) {
+ 		$self->{sz} = "";
+ 	    } elsif ($self->{op} =~ /([a-z]{3,})([qlwb])$/) {
+ 		$self->{op} = $1;
+Index: openssl-1.0.1e/crypto/constant_time_locl.h
+===================================================================
+--- /dev/null
++++ openssl-1.0.1e/crypto/constant_time_locl.h
+@@ -0,0 +1,206 @@
++/* crypto/constant_time_locl.h */
++/*
++ * Utilities for constant-time cryptography.
++ *
++ * Author: Emilia Kasper (emilia at openssl.org)
++ * Based on previous work by Bodo Moeller, Emilia Kasper, Adam Langley
++ * (Google).
++ * ====================================================================
++ * Copyright (c) 2014 The OpenSSL Project.  All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. All advertising materials mentioning features or use of this software
++ *    must display the following acknowledgement:
++ *    "This product includes cryptographic software written by
++ *     Eric Young (eay at cryptsoft.com)"
++ *    The word 'cryptographic' can be left out if the rouines from the library
++ *    being used are not cryptographic related :-).
++ * 4. If you include any Windows specific code (or a derivative thereof) from
++ *    the apps directory (application code) you must include an acknowledgement:
++ *    "This product includes software written by Tim Hudson (tjh at cryptsoft.com)"
++ *
++ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * The licence and distribution terms for any publically available version or
++ * derivative of this code cannot be changed.  i.e. this code cannot simply be
++ * copied and put under another distribution licence
++ * [including the GNU Public Licence.]
++ */
++
++#ifndef HEADER_CONSTANT_TIME_LOCL_H
++#define HEADER_CONSTANT_TIME_LOCL_H
++
++#include "e_os.h"  /* For 'inline' */
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++ * The boolean methods return a bitmask of all ones (0xff...f) for true
++ * and 0 for false. This is useful for choosing a value based on the result
++ * of a conditional in constant time. For example,
++ *
++ * if (a < b) {
++ *   c = a;
++ * } else {
++ *   c = b;
++ * }
++ *
++ * can be written as
++ *
++ * unsigned int lt = constant_time_lt(a, b);
++ * c = constant_time_select(lt, a, b);
++ */
++
++/*
++ * Returns the given value with the MSB copied to all the other
++ * bits. Uses the fact that arithmetic shift shifts-in the sign bit.
++ * However, this is not ensured by the C standard so you may need to
++ * replace this with something else on odd CPUs.
++ */
++static inline unsigned int constant_time_msb(unsigned int a);
++
++/*
++ * Returns 0xff..f if a < b and 0 otherwise.
++ */
++static inline unsigned int constant_time_lt(unsigned int a, unsigned int b);
++/* Convenience method for getting an 8-bit mask. */
++static inline unsigned char constant_time_lt_8(unsigned int a, unsigned int b);
++
++/*
++ * Returns 0xff..f if a >= b and 0 otherwise.
++ */
++static inline unsigned int constant_time_ge(unsigned int a, unsigned int b);
++/* Convenience method for getting an 8-bit mask. */
++static inline unsigned char constant_time_ge_8(unsigned int a, unsigned int b);
++
++/*
++ * Returns 0xff..f if a == 0 and 0 otherwise.
++ */
++static inline unsigned int constant_time_is_zero(unsigned int a);
++/* Convenience method for getting an 8-bit mask. */
++static inline unsigned char constant_time_is_zero_8(unsigned int a);
++
++
++/*
++ * Returns 0xff..f if a == b and 0 otherwise.
++ */
++static inline unsigned int constant_time_eq(unsigned int a, unsigned int b);
++/* Convenience method for getting an 8-bit mask. */
++static inline unsigned char constant_time_eq_8(unsigned int a, unsigned int b);
++/* Signed integers. */
++static inline unsigned int constant_time_eq_int(int a, int b);
++/* Convenience method for getting an 8-bit mask. */
++static inline unsigned char constant_time_eq_int_8(int a, int b);
++
++
++/*
++ * Returns (mask & a) | (~mask & b).
++ *
++ * When |mask| is all 1s or all 0s (as returned by the methods above),
++ * the select methods return either |a| (if |mask| is nonzero) or |b|
++ * (if |mask| is zero).
++ */
++static inline unsigned int constant_time_select(unsigned int mask,
++	unsigned int a, unsigned int b);
++/* Convenience method for unsigned chars. */
++static inline unsigned char constant_time_select_8(unsigned char mask,
++	unsigned char a, unsigned char b);
++/* Convenience method for signed integers. */
++static inline int constant_time_select_int(unsigned int mask, int a, int b);
++
++static inline unsigned int constant_time_msb(unsigned int a)
++	{
++	return 0-(a >> (sizeof(a) * 8 - 1));
++	}
++
++static inline unsigned int constant_time_lt(unsigned int a, unsigned int b)
++	{
++	return constant_time_msb(a^((a^b)|((a-b)^b)));
++	}
++
++static inline unsigned char constant_time_lt_8(unsigned int a, unsigned int b)
++	{
++	return (unsigned char)(constant_time_lt(a, b));
++	}
++
++static inline unsigned int constant_time_ge(unsigned int a, unsigned int b)
++	{
++	return ~constant_time_lt(a, b);
++	}
++
++static inline unsigned char constant_time_ge_8(unsigned int a, unsigned int b)
++	{
++	return (unsigned char)(constant_time_ge(a, b));
++	}
++
++static inline unsigned int constant_time_is_zero(unsigned int a)
++	{
++	return constant_time_msb(~a & (a - 1));
++	}
++
++static inline unsigned char constant_time_is_zero_8(unsigned int a)
++	{
++	return (unsigned char)(constant_time_is_zero(a));
++	}
++
++static inline unsigned int constant_time_eq(unsigned int a, unsigned int b)
++	{
++	return constant_time_is_zero(a ^ b);
++	}
++
++static inline unsigned char constant_time_eq_8(unsigned int a, unsigned int b)
++	{
++	return (unsigned char)(constant_time_eq(a, b));
++	}
++
++static inline unsigned int constant_time_eq_int(int a, int b)
++	{
++	return constant_time_eq((unsigned)(a), (unsigned)(b));
++	}
++
++static inline unsigned char constant_time_eq_int_8(int a, int b)
++	{
++	return constant_time_eq_8((unsigned)(a), (unsigned)(b));
++	}
++
++static inline unsigned int constant_time_select(unsigned int mask,
++	unsigned int a, unsigned int b)
++	{
++	return (mask & a) | (~mask & b);
++	}
++
++static inline unsigned char constant_time_select_8(unsigned char mask,
++	unsigned char a, unsigned char b)
++	{
++	return (unsigned char)(constant_time_select(mask, a, b));
++	}
++
++static inline int constant_time_select_int(unsigned int mask, int a, int b)
++	{
++	return (int)(constant_time_select(mask, (unsigned)(a), (unsigned)(b)));
++	}
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif  /* HEADER_CONSTANT_TIME_LOCL_H */

Added: openssl/branches/wheezy/debian/patches/CVE-2016-0705.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/CVE-2016-0705.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/CVE-2016-0705.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,66 @@
+From 6c88c71b4e4825c7bc0489306d062d017634eb88 Mon Sep 17 00:00:00 2001
+From: "Dr. Stephen Henson" <steve at openssl.org>
+Date: Thu, 18 Feb 2016 12:47:23 +0000
+Subject: [PATCH] Fix double free in DSA private key parsing.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix double free bug when parsing malformed DSA private keys.
+
+Thanks to Adam Langley (Google/BoringSSL) for discovering this bug using
+libFuzzer.
+
+CVE-2016-0705
+
+Reviewed-by: Emilia Käsper <emilia at openssl.org>
+---
+ crypto/dsa/dsa_ameth.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+Index: openssl-1.0.1k/crypto/dsa/dsa_ameth.c
+===================================================================
+--- openssl-1.0.1k.orig/crypto/dsa/dsa_ameth.c
++++ openssl-1.0.1k/crypto/dsa/dsa_ameth.c
+@@ -201,6 +201,8 @@ static int dsa_priv_decode(EVP_PKEY *pke
+ 	STACK_OF(ASN1_TYPE) *ndsa = NULL;
+ 	DSA *dsa = NULL;
+ 
++    int ret = 0;
++
+ 	if (!PKCS8_pkey_get0(NULL, &p, &pklen, &palg, p8))
+ 		return 0;
+ 	X509_ALGOR_get0(NULL, &ptype, &pval, palg);
+@@ -281,23 +283,21 @@ static int dsa_priv_decode(EVP_PKEY *pke
+ 		}
+ 
+ 	EVP_PKEY_assign_DSA(pkey, dsa);
+-	BN_CTX_free (ctx);
+-	if(ndsa)
+-		sk_ASN1_TYPE_pop_free(ndsa, ASN1_TYPE_free);
+-	else
+-		ASN1_INTEGER_free(privkey);
+ 
+-	return 1;
++	ret = 1;
++        goto done;
+ 
+ 	decerr:
+ 	DSAerr(DSA_F_DSA_PRIV_DECODE, EVP_R_DECODE_ERROR);
+ 	dsaerr:
+-	BN_CTX_free (ctx);
+-	if (privkey)
+-		ASN1_INTEGER_free(privkey);
+-	sk_ASN1_TYPE_pop_free(ndsa, ASN1_TYPE_free);
+ 	DSA_free(dsa);
+-	return 0;
++        done:
++	BN_CTX_free (ctx);
++        if (ndsa)
++            sk_ASN1_TYPE_pop_free(ndsa, ASN1_TYPE_free);
++        else
++            ASN1_INTEGER_free(privkey);
++	return ret;
+ 	}
+ 
+ static int dsa_priv_encode(PKCS8_PRIV_KEY_INFO *p8, const EVP_PKEY *pkey)

Added: openssl/branches/wheezy/debian/patches/CVE-2016-0797.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/CVE-2016-0797.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/CVE-2016-0797.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,98 @@
+From 8f8d7d2796ca710184453ba4a300ad7d54d7f1a1 Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt at openssl.org>
+Date: Mon, 22 Feb 2016 10:27:18 +0000
+Subject: [PATCH] Fix BN_hex2bn/BN_dec2bn NULL ptr/heap corruption
+
+---
+ crypto/bn/bn.h       | 14 ++++++++++++--
+ crypto/bn/bn_print.c | 13 +++++++++----
+ 2 files changed, 21 insertions(+), 6 deletions(-)
+
+Index: openssl-1.0.1k/crypto/bn/bn.h
+===================================================================
+--- openssl-1.0.1k.orig/crypto/bn/bn.h
++++ openssl-1.0.1k/crypto/bn/bn.h
+@@ -125,6 +125,7 @@
+ #ifndef HEADER_BN_H
+ #define HEADER_BN_H
+ 
++#include <limits.h>
+ #include <openssl/e_os2.h>
+ #ifndef OPENSSL_NO_FP_API
+ #include <stdio.h> /* FILE */
+@@ -696,8 +697,17 @@ const BIGNUM *BN_get0_nist_prime_521(voi
+ 
+ /* library internal functions */
+ 
+-#define bn_expand(a,bits) ((((((bits+BN_BITS2-1))/BN_BITS2)) <= (a)->dmax)?\
+-	(a):bn_expand2((a),(bits+BN_BITS2-1)/BN_BITS2))
++# define bn_expand(a,bits) \
++    ( \
++        bits > (INT_MAX - BN_BITS2 + 1) ? \
++            NULL \
++        : \
++            (((bits+BN_BITS2-1)/BN_BITS2) <= (a)->dmax) ? \
++                (a) \
++            : \
++                bn_expand2((a),(bits+BN_BITS2-1)/BN_BITS2) \
++    )
++
+ #define bn_wexpand(a,words) (((words) <= (a)->dmax)?(a):bn_expand2((a),(words)))
+ BIGNUM *bn_expand2(BIGNUM *a, int words);
+ #ifndef OPENSSL_NO_DEPRECATED
+Index: openssl-1.0.1k/crypto/bn/bn_print.c
+===================================================================
+--- openssl-1.0.1k.orig/crypto/bn/bn_print.c
++++ openssl-1.0.1k/crypto/bn/bn_print.c
+@@ -58,6 +58,7 @@
+ 
+ #include <stdio.h>
+ #include <ctype.h>
++#include <limits.h>
+ #include "cryptlib.h"
+ #include <openssl/buffer.h>
+ #include "bn_lcl.h"
+@@ -180,8 +181,10 @@ int BN_hex2bn(BIGNUM **bn, const char *a
+ 
+ 	if (*a == '-') { neg=1; a++; }
+ 
+-	for (i=0; isxdigit((unsigned char) a[i]); i++)
+-		;
++        for (i = 0; i <= (INT_MAX/4) && isxdigit((unsigned char)a[i]); i++)
++            ;
++        if (i > INT_MAX/4)
++            goto err;
+ 
+ 	num=i+neg;
+ 	if (bn == NULL) return(num);
+@@ -197,7 +200,7 @@ int BN_hex2bn(BIGNUM **bn, const char *a
+ 		BN_zero(ret);
+ 		}
+ 
+-	/* i is the number of hex digests; */
++        /* i is the number of hex digits */
+ 	if (bn_expand(ret,i*4) == NULL) goto err;
+ 
+ 	j=i; /* least significant 'hex' */
+@@ -246,8 +249,10 @@ int BN_dec2bn(BIGNUM **bn, const char *a
+ 	if ((a == NULL) || (*a == '\0')) return(0);
+ 	if (*a == '-') { neg=1; a++; }
+ 
+-	for (i=0; isdigit((unsigned char) a[i]); i++)
+-		;
++        for (i = 0; i <= (INT_MAX/4) && isdigit((unsigned char)a[i]); i++)
++            ;
++        if (i > INT_MAX/4)
++            goto err;
+ 
+ 	num=i+neg;
+ 	if (bn == NULL) return(num);
+@@ -264,7 +269,7 @@ int BN_dec2bn(BIGNUM **bn, const char *a
+ 		BN_zero(ret);
+ 		}
+ 
+-	/* i is the number of digests, a bit of an over expand; */
++        /* i is the number of digits, a bit of an over expand */
+ 	if (bn_expand(ret,i*4) == NULL) goto err;
+ 
+ 	j=BN_DEC_NUM-(i%BN_DEC_NUM);

Added: openssl/branches/wheezy/debian/patches/CVE-2016-0798.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/CVE-2016-0798.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/CVE-2016-0798.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,263 @@
+From 259b664f950c2ba66fbf4b0fe5281327904ead21 Mon Sep 17 00:00:00 2001
+From: Emilia Kasper <emilia at openssl.org>
+Date: Wed, 24 Feb 2016 12:59:59 +0100
+Subject: [PATCH] CVE-2016-0798: avoid memory leak in SRP
+
+The SRP user database lookup method SRP_VBASE_get_by_user had confusing
+memory management semantics; the returned pointer was sometimes newly
+allocated, and sometimes owned by the callee. The calling code has no
+way of distinguishing these two cases.
+
+Specifically, SRP servers that configure a secret seed to hide valid
+login information are vulnerable to a memory leak: an attacker
+connecting with an invalid username can cause a memory leak of around
+300 bytes per connection.
+
+Servers that do not configure SRP, or configure SRP but do not configure
+a seed are not vulnerable.
+
+In Apache, the seed directive is known as SSLSRPUnknownUserSeed.
+
+To mitigate the memory leak, the seed handling in SRP_VBASE_get_by_user
+is now disabled even if the user has configured a seed.
+
+Applications are advised to migrate to SRP_VBASE_get1_by_user. However,
+note that OpenSSL makes no strong guarantees about the
+indistinguishability of valid and invalid logins. In particular,
+computations are currently not carried out in constant time.
+
+Reviewed-by: Rich Salz <rsalz at openssl.org>
+---
+ CHANGES              | 19 ++++++++++++++++++
+ apps/s_server.c      | 49 +++++++++++++++++++++++++++-----------------
+ crypto/srp/srp.h     | 10 +++++++++
+ crypto/srp/srp_vfy.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++-----
+ util/libeay.num      |  2 ++
+ 5 files changed, 114 insertions(+), 23 deletions(-)
+
+Index: openssl-1.0.1e/apps/s_server.c
+===================================================================
+--- openssl-1.0.1e.orig/apps/s_server.c
++++ openssl-1.0.1e/apps/s_server.c
+@@ -395,6 +395,8 @@ typedef struct srpsrvparm_st
+ static int MS_CALLBACK ssl_srp_server_param_cb(SSL *s, int *ad, void *arg)
+ 	{
+ 	srpsrvparm *p = (srpsrvparm *)arg;
++        int ret = SSL3_AL_FATAL;
++
+ 	if (p->login == NULL && p->user == NULL )
+ 		{
+ 		p->login = SSL_get_srp_username(s);
+@@ -405,19 +407,22 @@ static int MS_CALLBACK ssl_srp_server_pa
+ 	if (p->user == NULL)
+ 		{
+ 		BIO_printf(bio_err, "User %s doesn't exist\n", p->login);
+-		return SSL3_AL_FATAL;
++                goto err;
+ 		}
+ 	if (SSL_set_srp_server_param(s, p->user->N, p->user->g, p->user->s, p->user->v,
+ 				     p->user->info) < 0)
+ 		{
+ 		*ad = SSL_AD_INTERNAL_ERROR;
+-		return SSL3_AL_FATAL;
++                goto err;
+ 		}
+ 	BIO_printf(bio_err, "SRP parameters set: username = \"%s\" info=\"%s\" \n", p->login,p->user->info);
+-	/* need to check whether there are memory leaks */
++        ret = SSL_ERROR_NONE;
++
++err:
++        SRP_user_pwd_free(p->user);
+ 	p->user = NULL;
+ 	p->login = NULL;
+-	return SSL_ERROR_NONE;
++	return ret;
+ 	}
+ 
+ #endif
+@@ -2254,7 +2259,8 @@ static int sv_body(char *hostname, int s
+ 				while (SSL_get_error(con,k) == SSL_ERROR_WANT_X509_LOOKUP)
+ 					{
+ 					BIO_printf(bio_s_out,"LOOKUP renego during write\n");
+-					srp_callback_parm.user = SRP_VBASE_get_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
++                                        SRP_user_pwd_free(srp_callback_parm.user);
++					srp_callback_parm.user = SRP_VBASE_get1_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
+ 					if (srp_callback_parm.user) 
+ 						BIO_printf(bio_s_out,"LOOKUP done %s\n",srp_callback_parm.user->info);
+ 					else 
+@@ -2313,7 +2319,8 @@ again:
+ 				while (SSL_get_error(con,i) == SSL_ERROR_WANT_X509_LOOKUP)
+ 					{
+ 					BIO_printf(bio_s_out,"LOOKUP renego during read\n");
+-					srp_callback_parm.user = SRP_VBASE_get_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
++                                        SRP_user_pwd_free(srp_callback_parm.user);
++					srp_callback_parm.user = SRP_VBASE_get1_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
+ 					if (srp_callback_parm.user) 
+ 						BIO_printf(bio_s_out,"LOOKUP done %s\n",srp_callback_parm.user->info);
+ 					else 
+@@ -2402,7 +2409,8 @@ static int init_ssl_connection(SSL *con)
+ 	while (i <= 0 &&  SSL_get_error(con,i) == SSL_ERROR_WANT_X509_LOOKUP) 
+ 		{
+ 			BIO_printf(bio_s_out,"LOOKUP during accept %s\n",srp_callback_parm.login);
+-			srp_callback_parm.user = SRP_VBASE_get_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
++                        SRP_user_pwd_free(srp_callback_parm.user);
++			srp_callback_parm.user = SRP_VBASE_get1_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
+ 			if (srp_callback_parm.user) 
+ 				BIO_printf(bio_s_out,"LOOKUP done %s\n",srp_callback_parm.user->info);
+ 			else 
+@@ -2644,7 +2652,8 @@ static int www_body(char *hostname, int
+ 			while (i <= 0 &&  SSL_get_error(con,i) == SSL_ERROR_WANT_X509_LOOKUP) 
+ 		{
+ 			BIO_printf(bio_s_out,"LOOKUP during accept %s\n",srp_callback_parm.login);
+-			srp_callback_parm.user = SRP_VBASE_get_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
++                        SRP_user_pwd_free(srp_callback_parm.user);
++			srp_callback_parm.user = SRP_VBASE_get1_by_user(srp_callback_parm.vb, srp_callback_parm.login); 
+ 			if (srp_callback_parm.user) 
+ 				BIO_printf(bio_s_out,"LOOKUP done %s\n",srp_callback_parm.user->info);
+ 			else 
+Index: openssl-1.0.1e/crypto/srp/srp.h
+===================================================================
+--- openssl-1.0.1e.orig/crypto/srp/srp.h
++++ openssl-1.0.1e/crypto/srp/srp.h
+@@ -83,16 +83,21 @@ DECLARE_STACK_OF(SRP_gN_cache)
+ 
+ typedef struct SRP_user_pwd_st
+ 	{
++        /* Owned by us. */
+ 	char *id;
+ 	BIGNUM *s;
+ 	BIGNUM *v;
++        /* Not owned by us. */
+ 	const BIGNUM *g;
+ 	const BIGNUM *N;
++        /* Owned by us. */
+ 	char *info;
+ 	} SRP_user_pwd;
+ 
+ DECLARE_STACK_OF(SRP_user_pwd)
+ 
++void SRP_user_pwd_free(SRP_user_pwd *user_pwd);
++
+ typedef struct SRP_VBASE_st
+ 	{
+ 	STACK_OF(SRP_user_pwd) *users_pwd;
+@@ -118,6 +123,12 @@ SRP_VBASE *SRP_VBASE_new(char *seed_key)
+ int SRP_VBASE_free(SRP_VBASE *vb);
+ int SRP_VBASE_init(SRP_VBASE *vb, char * verifier_file);
+ SRP_user_pwd *SRP_VBASE_get_by_user(SRP_VBASE *vb, char *username);
++
++/* This method ignores the configured seed and fails for an unknown user. */
++SRP_user_pwd *SRP_VBASE_get_by_user(SRP_VBASE *vb, char *username);
++/* NOTE: unlike in SRP_VBASE_get_by_user, caller owns the returned pointer.*/
++SRP_user_pwd *SRP_VBASE_get1_by_user(SRP_VBASE *vb, char *username);
++
+ char *SRP_create_verifier(const char *user, const char *pass, char **salt,
+ 			  char **verifier, const char *N, const char *g);
+ int SRP_create_verifier_BN(const char *user, const char *pass, BIGNUM **salt, BIGNUM **verifier, BIGNUM *N, BIGNUM *g);
+Index: openssl-1.0.1e/crypto/srp/srp_vfy.c
+===================================================================
+--- openssl-1.0.1e.orig/crypto/srp/srp_vfy.c
++++ openssl-1.0.1e/crypto/srp/srp_vfy.c
+@@ -179,7 +179,7 @@ static char *t_tob64(char *dst, const un
+ 	return olddst;
+ 	}
+ 
+-static void SRP_user_pwd_free(SRP_user_pwd *user_pwd)
++void SRP_user_pwd_free(SRP_user_pwd *user_pwd)
+ 	{
+ 	if (user_pwd == NULL) 
+ 		return;
+@@ -241,6 +241,24 @@ static int SRP_user_pwd_set_sv_BN(SRP_us
+ 	return (vinfo->s != NULL && vinfo->v != NULL) ;
+ 	}
+ 
++static SRP_user_pwd *srp_user_pwd_dup(SRP_user_pwd *src)
++{
++    SRP_user_pwd *ret;
++
++    if (src == NULL)
++        return NULL;
++    if ((ret = SRP_user_pwd_new()) == NULL)
++        return NULL;
++
++    SRP_user_pwd_set_gN(ret, src->g, src->N);
++    if (!SRP_user_pwd_set_ids(ret, src->id, src->info)
++        || !SRP_user_pwd_set_sv_BN(ret, BN_dup(src->s), BN_dup(src->v))) {
++            SRP_user_pwd_free(ret);
++            return NULL;
++    }
++    return ret;
++}
++
+ SRP_VBASE *SRP_VBASE_new(char *seed_key)
+ 	{
+ 	SRP_VBASE *vb = (SRP_VBASE *) OPENSSL_malloc(sizeof(SRP_VBASE));
+@@ -472,22 +490,51 @@ int SRP_VBASE_init(SRP_VBASE *vb, char *
+ 	}
+ 
+ 
+-SRP_user_pwd *SRP_VBASE_get_by_user(SRP_VBASE *vb, char *username)
++static SRP_user_pwd *find_user(SRP_VBASE *vb, char *username)
+ 	{
+ 	int i;
+ 	SRP_user_pwd *user;
+-	unsigned char digv[SHA_DIGEST_LENGTH];
+-	unsigned char digs[SHA_DIGEST_LENGTH];
+-	EVP_MD_CTX ctxt;
+ 
+ 	if (vb == NULL)
+ 		return NULL;
++
+ 	for(i = 0; i < sk_SRP_user_pwd_num(vb->users_pwd); i++)
+ 		{
+ 		user = sk_SRP_user_pwd_value(vb->users_pwd, i);
+ 		if (strcmp(user->id,username)==0)
+ 			return user;
+ 		}
++
++        return NULL;
++        }
++
++/*
++ * This method ignores the configured seed and fails for an unknown user.
++ * Ownership of the returned pointer is not released to the caller.
++ * In other words, caller must not free the result.
++ */
++SRP_user_pwd *SRP_VBASE_get_by_user(SRP_VBASE *vb, char *username)
++{
++    return find_user(vb, username);
++}
++
++/*
++ * Ownership of the returned pointer is released to the caller.
++ * In other words, caller must free the result once done.
++ */
++SRP_user_pwd *SRP_VBASE_get1_by_user(SRP_VBASE *vb, char *username)
++{
++        SRP_user_pwd *user;
++        unsigned char digv[SHA_DIGEST_LENGTH];
++        unsigned char digs[SHA_DIGEST_LENGTH];
++        EVP_MD_CTX ctxt;
++
++        if (vb == NULL)
++            return NULL;
++
++        if ((user = find_user(vb, username)) != NULL)
++            return srp_user_pwd_dup(user);
++
+ 	if ((vb->seed_key == NULL) ||
+ 		(vb->default_g == NULL) ||
+ 		(vb->default_N == NULL))
+Index: openssl-1.0.1e/util/libeay.num
+===================================================================
+--- openssl-1.0.1e.orig/util/libeay.num
++++ openssl-1.0.1e/util/libeay.num
+@@ -1806,6 +1806,8 @@ d2i_ASN1_SET_OF_PKCS12_SAFEBAG
+ ASN1_UTCTIME_get                        2350	NOEXIST::FUNCTION:
+ X509_REQ_digest                         2362	EXIST::FUNCTION:EVP
+ X509_CRL_digest                         2391	EXIST::FUNCTION:EVP
++SRP_VBASE_get1_by_user                  2393 	EXIST::FUNCTION:SRP
++SRP_user_pwd_free                       2394	EXIST::FUNCTION:SRP
+ d2i_ASN1_SET_OF_PKCS7                   2397	NOEXIST::FUNCTION:
+ EVP_CIPHER_CTX_set_key_length           2399	EXIST::FUNCTION:
+ EVP_CIPHER_CTX_ctrl                     2400	EXIST::FUNCTION:

Added: openssl/branches/wheezy/debian/patches/CVE-2016-0799.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/CVE-2016-0799.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/CVE-2016-0799.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,449 @@
+From 578b956fe741bf8e84055547b1e83c28dd902c73 Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt at openssl.org>
+Date: Thu, 25 Feb 2016 13:09:46 +0000
+Subject: [PATCH] Fix memory issues in BIO_*printf functions
+
+The internal |fmtstr| function used in processing a "%s" format string
+in the BIO_*printf functions could overflow while calculating the length
+of a string and cause an OOB read when printing very long strings.
+
+Additionally the internal |doapr_outch| function can attempt to write to
+an OOB memory location (at an offset from the NULL pointer) in the event of
+a memory allocation failure. In 1.0.2 and below this could be caused where
+the size of a buffer to be allocated is greater than INT_MAX. E.g. this
+could be in processing a very long "%s" format string. Memory leaks can also
+occur.
+
+These issues will only occur on certain platforms where sizeof(size_t) >
+sizeof(int). E.g. many 64 bit systems. The first issue may mask the second
+issue dependent on compiler behaviour.
+
+These problems could enable attacks where large amounts of untrusted data
+is passed to the BIO_*printf functions. If applications use these functions
+in this way then they could be vulnerable. OpenSSL itself uses these
+functions when printing out human-readable dumps of ASN.1 data. Therefore
+applications that print this data could be vulnerable if the data is from
+untrusted sources. OpenSSL command line applications could also be
+vulnerable where they print out ASN.1 data, or if untrusted data is passed
+as command line arguments.
+
+Libssl is not considered directly vulnerable. Additionally certificates etc
+received via remote connections via libssl are also unlikely to be able to
+trigger these issues because of message size limits enforced within libssl.
+
+CVE-2016-0799
+
+Issue reported by Guido Vranken.
+
+Reviewed-by: Andy Polyakov <appro at openssl.org>
+---
+ crypto/bio/b_print.c | 187 ++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 116 insertions(+), 71 deletions(-)
+
+Index: openssl-1.0.1k/crypto/bio/b_print.c
+===================================================================
+--- openssl-1.0.1k.orig/crypto/bio/b_print.c
++++ openssl-1.0.1k/crypto/bio/b_print.c
+@@ -125,14 +125,14 @@
+ #define LLONG long
+ #endif
+ 
+-static void fmtstr     (char **, char **, size_t *, size_t *,
++static int fmtstr     (char **, char **, size_t *, size_t *,
+ 			const char *, int, int, int);
+-static void fmtint     (char **, char **, size_t *, size_t *,
++static int fmtint     (char **, char **, size_t *, size_t *,
+ 			LLONG, int, int, int, int);
+-static void fmtfp      (char **, char **, size_t *, size_t *,
++static int fmtfp      (char **, char **, size_t *, size_t *,
+ 			LDOUBLE, int, int, int);
+-static void doapr_outch (char **, char **, size_t *, size_t *, int);
+-static void _dopr(char **sbuffer, char **buffer,
++static int doapr_outch (char **, char **, size_t *, size_t *, int);
++static int _dopr(char **sbuffer, char **buffer,
+ 		  size_t *maxlen, size_t *retlen, int *truncated,
+ 		  const char *format, va_list args);
+ 
+@@ -165,7 +165,7 @@ static void _dopr(char **sbuffer, char *
+ #define char_to_int(p) (p - '0')
+ #define OSSL_MAX(p,q) ((p >= q) ? p : q)
+ 
+-static void
++static int
+ _dopr(
+     char **sbuffer,
+     char **buffer,
+@@ -200,7 +200,8 @@ _dopr(
+             if (ch == '%')
+                 state = DP_S_FLAGS;
+             else
+-                doapr_outch(sbuffer,buffer, &currlen, maxlen, ch);
++                if(!doapr_outch(sbuffer, buffer, &currlen, maxlen, ch))
++                    return 0;
+             ch = *format++;
+             break;
+         case DP_S_FLAGS:
+@@ -306,8 +307,9 @@ _dopr(
+                     value = va_arg(args, int);
+                     break;
+                 }
+-                fmtint(sbuffer, buffer, &currlen, maxlen,
+-                       value, 10, min, max, flags);
++                if (!fmtint(sbuffer, buffer, &currlen, maxlen, value, 10, min,
++                            max, flags))
++                    return 0;
+                 break;
+             case 'X':
+                 flags |= DP_F_UP;
+@@ -332,17 +334,19 @@ _dopr(
+                         unsigned int);
+                     break;
+                 }
+-                fmtint(sbuffer, buffer, &currlen, maxlen, value,
+-                       ch == 'o' ? 8 : (ch == 'u' ? 10 : 16),
+-                       min, max, flags);
++                if (!fmtint(sbuffer, buffer, &currlen, maxlen, value,
++                            ch == 'o' ? 8 : (ch == 'u' ? 10 : 16),
++                            min, max, flags))
++                    return 0;
+                 break;
+             case 'f':
+                 if (cflags == DP_C_LDOUBLE)
+                     fvalue = va_arg(args, LDOUBLE);
+                 else
+                     fvalue = va_arg(args, double);
+-                fmtfp(sbuffer, buffer, &currlen, maxlen,
+-                      fvalue, min, max, flags);
++                if (!fmtfp(sbuffer, buffer, &currlen, maxlen, fvalue, min, max,
++                           flags))
++                    return 0;
+                 break;
+             case 'E':
+                 flags |= DP_F_UP;
+@@ -361,8 +365,9 @@ _dopr(
+                     fvalue = va_arg(args, double);
+                 break;
+             case 'c':
+-                doapr_outch(sbuffer, buffer, &currlen, maxlen,
+-                    va_arg(args, int));
++                if(!doapr_outch(sbuffer, buffer, &currlen, maxlen,
++                            va_arg(args, int)))
++                    return 0;
+                 break;
+             case 's':
+                 strvalue = va_arg(args, char *);
+@@ -372,13 +377,15 @@ _dopr(
+ 		    else
+ 			max = *maxlen;
+ 		}
+-                fmtstr(sbuffer, buffer, &currlen, maxlen, strvalue,
+-                       flags, min, max);
++                if (!fmtstr(sbuffer, buffer, &currlen, maxlen, strvalue,
++                            flags, min, max))
++                    return 0;
+                 break;
+             case 'p':
+                 value = (long)va_arg(args, void *);
+-                fmtint(sbuffer, buffer, &currlen, maxlen,
+-                    value, 16, min, max, flags|DP_F_NUM);
++                if (!fmtint(sbuffer, buffer, &currlen, maxlen,
++                            value, 16, min, max, flags | DP_F_NUM))
++                    return 0;
+                 break;
+             case 'n': /* XXX */
+                 if (cflags == DP_C_SHORT) {
+@@ -400,7 +407,8 @@ _dopr(
+                 }
+                 break;
+             case '%':
+-                doapr_outch(sbuffer, buffer, &currlen, maxlen, ch);
++                if(!doapr_outch(sbuffer, buffer, &currlen, maxlen, ch))
++                    return 0;
+                 break;
+             case 'w':
+                 /* not supported yet, treat as next char */
+@@ -424,12 +432,13 @@ _dopr(
+     *truncated = (currlen > *maxlen - 1);
+     if (*truncated)
+         currlen = *maxlen - 1;
+-    doapr_outch(sbuffer, buffer, &currlen, maxlen, '\0');
++    if(!doapr_outch(sbuffer, buffer, &currlen, maxlen, '\0'))
++        return 0;
+     *retlen = currlen - 1;
+-    return;
++    return 1;
+ }
+ 
+-static void
++static int
+ fmtstr(
+     char **sbuffer,
+     char **buffer,
+@@ -440,36 +449,44 @@ fmtstr(
+     int min,
+     int max)
+ {
+-    int padlen, strln;
++    int padlen;
++    size_t strln;
+     int cnt = 0;
+ 
+     if (value == 0)
+         value = "<NULL>";
+-    for (strln = 0; value[strln]; ++strln)
+-        ;
++
++    strln = strlen(value);
++    if (strln > INT_MAX)
++        strln = INT_MAX;
++
+     padlen = min - strln;
+-    if (padlen < 0)
++    if (min < 0 || padlen < 0)
+         padlen = 0;
+     if (flags & DP_F_MINUS)
+         padlen = -padlen;
+ 
+     while ((padlen > 0) && (cnt < max)) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, ' ');
++        if(!doapr_outch(sbuffer, buffer, currlen, maxlen, ' '))
++            return 0;
+         --padlen;
+         ++cnt;
+     }
+     while (*value && (cnt < max)) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, *value++);
++        if(!doapr_outch(sbuffer, buffer, currlen, maxlen, *value++))
++            return 0;
+         ++cnt;
+     }
+     while ((padlen < 0) && (cnt < max)) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, ' ');
++        if(!doapr_outch(sbuffer, buffer, currlen, maxlen, ' '))
++            return 0;
+         ++padlen;
+         ++cnt;
+     }
++    return 1;
+ }
+ 
+-static void
++static int
+ fmtint(
+     char **sbuffer,
+     char **buffer,
+@@ -533,37 +550,44 @@ fmtint(
+ 
+     /* spaces */
+     while (spadlen > 0) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, ' ');
++        if(!doapr_outch(sbuffer, buffer, currlen, maxlen, ' '))
++            return 0;
+         --spadlen;
+     }
+ 
+     /* sign */
+     if (signvalue)
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, signvalue);
++        if(!doapr_outch(sbuffer, buffer, currlen, maxlen, signvalue))
++            return 0;
+ 
+     /* prefix */
+     while (*prefix) {
+-	doapr_outch(sbuffer, buffer, currlen, maxlen, *prefix);
++        if(!doapr_outch(sbuffer, buffer, currlen, maxlen, *prefix))
++            return 0;
+ 	prefix++;
+     }
+ 
+     /* zeros */
+     if (zpadlen > 0) {
+         while (zpadlen > 0) {
+-            doapr_outch(sbuffer, buffer, currlen, maxlen, '0');
++            if(!doapr_outch(sbuffer, buffer, currlen, maxlen, '0'))
++                return 0;
+             --zpadlen;
+         }
+     }
+     /* digits */
+-    while (place > 0)
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, convert[--place]);
++    while (place > 0) {
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, convert[--place]))
++            return 0;
++    }
+ 
+     /* left justified spaces */
+     while (spadlen < 0) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, ' ');
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, ' '))
++            return 0;
+         ++spadlen;
+     }
+-    return;
++    return 1;
+ }
+ 
+ static LDOUBLE
+@@ -597,7 +621,7 @@ roundv(LDOUBLE value)
+     return intpart;
+ }
+ 
+-static void
++static int
+ fmtfp(
+     char **sbuffer,
+     char **buffer,
+@@ -682,47 +706,60 @@ fmtfp(
+ 
+     if ((flags & DP_F_ZERO) && (padlen > 0)) {
+         if (signvalue) {
+-            doapr_outch(sbuffer, buffer, currlen, maxlen, signvalue);
++            if (!doapr_outch(sbuffer, buffer, currlen, maxlen, signvalue))
++                return 0;
+             --padlen;
+             signvalue = 0;
+         }
+         while (padlen > 0) {
+-            doapr_outch(sbuffer, buffer, currlen, maxlen, '0');
++            if (!doapr_outch(sbuffer, buffer, currlen, maxlen, '0'))
++                return 0;
+             --padlen;
+         }
+     }
+     while (padlen > 0) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, ' ');
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, ' '))
++            return 0;
+         --padlen;
+     }
+-    if (signvalue)
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, signvalue);
++    if (signvalue && !doapr_outch(sbuffer, buffer, currlen, maxlen, signvalue))
++        return 0;
+ 
+-    while (iplace > 0)
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, iconvert[--iplace]);
++    while (iplace > 0) {
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, iconvert[--iplace]))
++            return 0;
++    }
+ 
+     /*
+      * Decimal point. This should probably use locale to find the correct
+      * char to print out.
+      */
+     if (max > 0 || (flags & DP_F_NUM)) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, '.');
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, '.'))
++            return 0;
+ 
+-        while (fplace > 0)
+-            doapr_outch(sbuffer, buffer, currlen, maxlen, fconvert[--fplace]);
++        while (fplace > 0) {
++            if (!doapr_outch(sbuffer, buffer, currlen, maxlen, fconvert[--fplace]))
++                return 0;
++        }
+     }
+     while (zpadlen > 0) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, '0');
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, '0'))
++            return 0;
+         --zpadlen;
+     }
+ 
+     while (padlen < 0) {
+-        doapr_outch(sbuffer, buffer, currlen, maxlen, ' ');
++        if (!doapr_outch(sbuffer, buffer, currlen, maxlen, ' '))
++            return 0;
+         ++padlen;
+     }
++    return 1;
+ }
+ 
+-static void
++#define BUFFER_INC  1024
++
++static int
+ doapr_outch(
+     char **sbuffer,
+     char **buffer,
+@@ -733,24 +770,27 @@ doapr_outch(
+     /* If we haven't at least one buffer, someone has doe a big booboo */
+     assert(*sbuffer != NULL || buffer != NULL);
+ 
+-    if (buffer) {
+-	while (*currlen >= *maxlen) {
+-	    if (*buffer == NULL) {
+-		if (*maxlen == 0)
+-		    *maxlen = 1024;
+-		*buffer = OPENSSL_malloc(*maxlen);
+-		if (*currlen > 0) {
+-		    assert(*sbuffer != NULL);
+-		    memcpy(*buffer, *sbuffer, *currlen);
+-		}
+-		*sbuffer = NULL;
+-	    } else {
+-		*maxlen += 1024;
+-		*buffer = OPENSSL_realloc(*buffer, *maxlen);
+-	    }
+-	}
+-	/* What to do if *buffer is NULL? */
+-	assert(*sbuffer != NULL || *buffer != NULL);
++    if (buffer && *currlen == *maxlen) {
++        if (*maxlen > INT_MAX - BUFFER_INC)
++            return 0;
++
++        *maxlen += BUFFER_INC;
++        if (*buffer == NULL) {
++            *buffer = OPENSSL_malloc(*maxlen);
++            if (*buffer == NULL)
++                return 0;
++            if (*currlen > 0) {
++                assert(*sbuffer != NULL);
++                memcpy(*buffer, *sbuffer, *currlen);
++            }
++            *sbuffer = NULL;
++        } else {
++            char *tmpbuf;
++            tmpbuf = OPENSSL_realloc(*buffer, *maxlen);
++            if (tmpbuf == NULL)
++                return 0;
++            *buffer = tmpbuf;
++        }
+     }
+ 
+     if (*currlen < *maxlen) {
+@@ -760,7 +800,7 @@ doapr_outch(
+ 	    (*buffer)[(*currlen)++] = (char)c;
+     }
+ 
+-    return;
++    return 1;
+ }
+ 
+ /***************************************************************************/
+@@ -792,8 +832,11 @@ int BIO_vprintf (BIO *bio, const char *f
+ 
+ 	dynbuf = NULL;
+ 	CRYPTO_push_info("doapr()");
+-	_dopr(&hugebufp, &dynbuf, &hugebufsize,
+-		&retlen, &ignored, format, args);
++	if (!_dopr(&hugebufp, &dynbuf, &hugebufsize,
++		&retlen, &ignored, format, args)) {
++            OPENSSL_free(dynbuf);
++            return -1;
++        }
+ 	if (dynbuf)
+ 		{
+ 		ret=BIO_write(bio, dynbuf, (int)retlen);
+@@ -829,7 +872,8 @@ int BIO_vsnprintf(char *buf, size_t n, c
+ 	size_t retlen;
+ 	int truncated;
+ 
+-	_dopr(&buf, NULL, &n, &retlen, &truncated, format, args);
++        if(!_dopr(&buf, NULL, &n, &retlen, &truncated, format, args))
++            return -1;
+ 
+ 	if (truncated)
+ 		/* In case of truncation, return -1 like traditional snprintf.

Added: openssl/branches/wheezy/debian/patches/Disable-EXPORT-and-LOW-ciphers.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/Disable-EXPORT-and-LOW-ciphers.patch	                        (rev 0)
+++ openssl/branches/wheezy/debian/patches/Disable-EXPORT-and-LOW-ciphers.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -0,0 +1,624 @@
+Index: openssl-1.0.1e/doc/apps/ciphers.pod
+===================================================================
+--- openssl-1.0.1e.orig/doc/apps/ciphers.pod
++++ openssl-1.0.1e/doc/apps/ciphers.pod
+@@ -139,34 +139,46 @@ than 128 bits, and some cipher suites wi
+ 
+ =item B<LOW>
+ 
+-"low" encryption cipher suites, currently those using 64 or 56 bit encryption algorithms
+-but excluding export cipher suites.
++Low strength encryption cipher suites, currently those using 64 or 56 bit
++encryption algorithms but excluding export cipher suites.
++These are disabled in default builds.
+ 
+ =item B<EXP>, B<EXPORT>
+ 
+-export encryption algorithms. Including 40 and 56 bits algorithms.
++Export strength encryption algorithms. Including 40 and 56 bits algorithms.
++These are disabled in default builds.
+ 
+ =item B<EXPORT40>
+ 
+-40 bit export encryption algorithms
++40-bit export encryption algorithms
++These are disabled in default builds.
+ 
+ =item B<EXPORT56>
+ 
+-56 bit export encryption algorithms. In OpenSSL 0.9.8c and later the set of
++56-bit export encryption algorithms. In OpenSSL 0.9.8c and later the set of
+ 56 bit export ciphers is empty unless OpenSSL has been explicitly configured
+ with support for experimental ciphers.
++These are disabled in default builds.
+ 
+ =item B<eNULL>, B<NULL>
+ 
+-the "NULL" ciphers that is those offering no encryption. Because these offer no
+-encryption at all and are a security risk they are disabled unless explicitly
+-included.
++The "NULL" ciphers that is those offering no encryption. Because these offer no
++encryption at all and are a security risk they are not enabled via either the
++B<DEFAULT> or B<ALL> cipher strings.
++Be careful when building cipherlists out of lower-level primitives such as
++B<kRSA> or B<aECDSA> as these do overlap with the B<eNULL> ciphers.
++When in doubt, include B<!eNULL> in your cipherlist.
+ 
+ =item B<aNULL>
+ 
+-the cipher suites offering no authentication. This is currently the anonymous
+-DH algorithms. These cipher suites are vulnerable to a "man in the middle"
+-attack and so their use is normally discouraged.
++The cipher suites offering no authentication. This is currently the anonymous
++DH algorithms and anonymous ECDH algorithms. These cipher suites are vulnerable
++to a "man in the middle" attack and so their use is normally discouraged.
++These are excluded from the B<DEFAULT> ciphers, but included in the B<ALL>
++ciphers.
++Be careful when building cipherlists out of lower-level primitives such as
++B<kDHE> or B<AES> as these do overlap with the B<aNULL> ciphers.
++When in doubt, include B<!aNULL> in your cipherlist.
+ 
+ =item B<kRSA>, B<RSA>
+ 
+Index: openssl-1.0.1e/ssl/s3_lib.c
+===================================================================
+--- openssl-1.0.1e.orig/ssl/s3_lib.c
++++ openssl-1.0.1e/ssl/s3_lib.c
+@@ -202,22 +202,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	0,
+ 	},
+ 
+-/* Cipher 03 */
+-	{
+-	1,
+-	SSL3_TXT_RSA_RC4_40_MD5,
+-	SSL3_CK_RSA_RC4_40_MD5,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_RC4,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+ /* Cipher 04 */
+ 	{
+ 	1,
+@@ -250,22 +234,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	128,
+ 	},
+ 
+-/* Cipher 06 */
+-	{
+-	1,
+-	SSL3_TXT_RSA_RC2_40_MD5,
+-	SSL3_CK_RSA_RC2_40_MD5,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_RC2,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+ /* Cipher 07 */
+ #ifndef OPENSSL_NO_IDEA
+ 	{
+@@ -284,38 +252,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	},
+ #endif
+ 
+-/* Cipher 08 */
+-	{
+-	1,
+-	SSL3_TXT_RSA_DES_40_CBC_SHA,
+-	SSL3_CK_RSA_DES_40_CBC_SHA,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 09 */
+-	{
+-	1,
+-	SSL3_TXT_RSA_DES_64_CBC_SHA,
+-	SSL3_CK_RSA_DES_64_CBC_SHA,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 0A */
+ 	{
+ 	1,
+@@ -332,39 +268,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	168,
+ 	},
+ 
+-/* The DH ciphers */
+-/* Cipher 0B */
+-	{
+-	0,
+-	SSL3_TXT_DH_DSS_DES_40_CBC_SHA,
+-	SSL3_CK_DH_DSS_DES_40_CBC_SHA,
+-	SSL_kDHd,
+-	SSL_aDH,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 0C */
+-	{
+-	0, /* not implemented (non-ephemeral DH) */
+-	SSL3_TXT_DH_DSS_DES_64_CBC_SHA,
+-	SSL3_CK_DH_DSS_DES_64_CBC_SHA,
+-	SSL_kDHd,
+-	SSL_aDH,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 0D */
+ 	{
+ 	0, /* not implemented (non-ephemeral DH) */
+@@ -381,38 +284,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	168,
+ 	},
+ 
+-/* Cipher 0E */
+-	{
+-	0, /* not implemented (non-ephemeral DH) */
+-	SSL3_TXT_DH_RSA_DES_40_CBC_SHA,
+-	SSL3_CK_DH_RSA_DES_40_CBC_SHA,
+-	SSL_kDHr,
+-	SSL_aDH,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 0F */
+-	{
+-	0, /* not implemented (non-ephemeral DH) */
+-	SSL3_TXT_DH_RSA_DES_64_CBC_SHA,
+-	SSL3_CK_DH_RSA_DES_64_CBC_SHA,
+-	SSL_kDHr,
+-	SSL_aDH,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 10 */
+ 	{
+ 	0, /* not implemented (non-ephemeral DH) */
+@@ -430,38 +301,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	},
+ 
+ /* The Ephemeral DH ciphers */
+-/* Cipher 11 */
+-	{
+-	1,
+-	SSL3_TXT_EDH_DSS_DES_40_CBC_SHA,
+-	SSL3_CK_EDH_DSS_DES_40_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aDSS,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 12 */
+-	{
+-	1,
+-	SSL3_TXT_EDH_DSS_DES_64_CBC_SHA,
+-	SSL3_CK_EDH_DSS_DES_64_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aDSS,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 13 */
+ 	{
+ 	1,
+@@ -478,38 +317,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	168,
+ 	},
+ 
+-/* Cipher 14 */
+-	{
+-	1,
+-	SSL3_TXT_EDH_RSA_DES_40_CBC_SHA,
+-	SSL3_CK_EDH_RSA_DES_40_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aRSA,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 15 */
+-	{
+-	1,
+-	SSL3_TXT_EDH_RSA_DES_64_CBC_SHA,
+-	SSL3_CK_EDH_RSA_DES_64_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aRSA,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 16 */
+ 	{
+ 	1,
+@@ -526,22 +333,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	168,
+ 	},
+ 
+-/* Cipher 17 */
+-	{
+-	1,
+-	SSL3_TXT_ADH_RC4_40_MD5,
+-	SSL3_CK_ADH_RC4_40_MD5,
+-	SSL_kEDH,
+-	SSL_aNULL,
+-	SSL_RC4,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+ /* Cipher 18 */
+ 	{
+ 	1,
+@@ -558,38 +349,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	128,
+ 	},
+ 
+-/* Cipher 19 */
+-	{
+-	1,
+-	SSL3_TXT_ADH_DES_40_CBC_SHA,
+-	SSL3_CK_ADH_DES_40_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aNULL,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+-/* Cipher 1A */
+-	{
+-	1,
+-	SSL3_TXT_ADH_DES_64_CBC_SHA,
+-	SSL3_CK_ADH_DES_64_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aNULL,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 1B */
+ 	{
+ 	1,
+@@ -659,22 +418,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 
+ #ifndef OPENSSL_NO_KRB5
+ /* The Kerberos ciphers*/
+-/* Cipher 1E */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_DES_64_CBC_SHA,
+-	SSL3_CK_KRB5_DES_64_CBC_SHA,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 1F */
+ 	{
+ 	1,
+@@ -723,22 +466,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	128,
+ 	},
+ 
+-/* Cipher 22 */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_DES_64_CBC_MD5,
+-	SSL3_CK_KRB5_DES_64_CBC_MD5,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_DES,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_NOT_EXP|SSL_LOW,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+ /* Cipher 23 */
+ 	{
+ 	1,
+@@ -786,102 +513,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 	128,
+ 	128,
+ 	},
+-
+-/* Cipher 26 */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_DES_40_CBC_SHA,
+-	SSL3_CK_KRB5_DES_40_CBC_SHA,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 27 */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_RC2_40_CBC_SHA,
+-	SSL3_CK_KRB5_RC2_40_CBC_SHA,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_RC2,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+-/* Cipher 28 */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_RC4_40_SHA,
+-	SSL3_CK_KRB5_RC4_40_SHA,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_RC4,
+-	SSL_SHA1,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+-/* Cipher 29 */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_DES_40_CBC_MD5,
+-	SSL3_CK_KRB5_DES_40_CBC_MD5,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_DES,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	56,
+-	},
+-
+-/* Cipher 2A */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_RC2_40_CBC_MD5,
+-	SSL3_CK_KRB5_RC2_40_CBC_MD5,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_RC2,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+-
+-/* Cipher 2B */
+-	{
+-	1,
+-	SSL3_TXT_KRB5_RC4_40_MD5,
+-	SSL3_CK_KRB5_RC4_40_MD5,
+-	SSL_kKRB5,
+-	SSL_aKRB5,
+-	SSL_RC4,
+-	SSL_MD5,
+-	SSL_SSLV3,
+-	SSL_EXPORT|SSL_EXP40,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	40,
+-	128,
+-	},
+ #endif	/* OPENSSL_NO_KRB5 */
+ 
+ /* New AES ciphersuites */
+@@ -1270,104 +901,6 @@ OPENSSL_GLOBAL SSL_CIPHER ssl3_ciphers[]
+ 
+ #if TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES
+ 	/* New TLS Export CipherSuites from expired ID */
+-#if 0
+-	/* Cipher 60 */
+-	{
+-	1,
+-	TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_MD5,
+-	TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_MD5,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_RC4,
+-	SSL_MD5,
+-	SSL_TLSV1,
+-	SSL_EXPORT|SSL_EXP56,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	128,
+-	},
+-
+-	/* Cipher 61 */
+-	{
+-	1,
+-	TLS1_TXT_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5,
+-	TLS1_CK_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_RC2,
+-	SSL_MD5,
+-	SSL_TLSV1,
+-	SSL_EXPORT|SSL_EXP56,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	128,
+-	},
+-#endif
+-
+-	/* Cipher 62 */
+-	{
+-	1,
+-	TLS1_TXT_RSA_EXPORT1024_WITH_DES_CBC_SHA,
+-	TLS1_CK_RSA_EXPORT1024_WITH_DES_CBC_SHA,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_TLSV1,
+-	SSL_EXPORT|SSL_EXP56,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+-	/* Cipher 63 */
+-	{
+-	1,
+-	TLS1_TXT_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA,
+-	TLS1_CK_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA,
+-	SSL_kEDH,
+-	SSL_aDSS,
+-	SSL_DES,
+-	SSL_SHA1,
+-	SSL_TLSV1,
+-	SSL_EXPORT|SSL_EXP56,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	56,
+-	},
+-
+-	/* Cipher 64 */
+-	{
+-	1,
+-	TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_SHA,
+-	TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_SHA,
+-	SSL_kRSA,
+-	SSL_aRSA,
+-	SSL_RC4,
+-	SSL_SHA1,
+-	SSL_TLSV1,
+-	SSL_EXPORT|SSL_EXP56,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	128,
+-	},
+-
+-	/* Cipher 65 */
+-	{
+-	1,
+-	TLS1_TXT_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA,
+-	TLS1_CK_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA,
+-	SSL_kEDH,
+-	SSL_aDSS,
+-	SSL_RC4,
+-	SSL_SHA1,
+-	SSL_TLSV1,
+-	SSL_EXPORT|SSL_EXP56,
+-	SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF,
+-	56,
+-	128,
+-	},
+-
+ 	/* Cipher 66 */
+ 	{
+ 	1,

Modified: openssl/branches/wheezy/debian/patches/series
===================================================================
--- openssl/branches/wheezy/debian/patches/series	2016-03-01 18:32:07 UTC (rev 768)
+++ openssl/branches/wheezy/debian/patches/series	2016-03-01 18:32:33 UTC (rev 769)
@@ -108,4 +108,11 @@
 CVE-2015-3194.patch
 CVE-2015-3195.patch
 CVE-2015-3196.patch
+CVE-2015-7575.patch
+Disable-EXPORT-and-LOW-ciphers.patch
+CVE-2016-0797.patch
+CVE-2016-0798.patch
+CVE-2016-0799.patch
+CVE-2016-0702.patch
+CVE-2016-0705.patch
 

Modified: openssl/branches/wheezy/debian/patches/version-script.patch
===================================================================
--- openssl/branches/wheezy/debian/patches/version-script.patch	2016-03-01 18:32:07 UTC (rev 768)
+++ openssl/branches/wheezy/debian/patches/version-script.patch	2016-03-01 18:32:33 UTC (rev 769)
@@ -15,7 +15,7 @@
 ===================================================================
 --- /dev/null	1970-01-01 00:00:00.000000000 +0000
 +++ openssl-1.0.1d/openssl.ld	2013-02-06 19:44:25.000000000 +0100
-@@ -0,0 +1,4620 @@
+@@ -0,0 +1,4626 @@
 +OPENSSL_1.0.0 {
 +	global:
 +		BIO_f_ssl;
@@ -4636,6 +4636,12 @@
 +		CRYPTO_memcmp;
 +} OPENSSL_1.0.1;
 +
++OPENSSL_1.0.1s {
++       global:
++               SRP_VBASE_get1_by_user;
++               SRP_user_pwd_free;
++} OPENSSL_1.0.1d;
++
 Index: openssl-1.0.1d/engines/openssl.ld
 ===================================================================
 --- /dev/null	1970-01-01 00:00:00.000000000 +0000




More information about the Pkg-openssl-changes mailing list