Please test and review libgcrypt11 1.4.5-2+squeeze2

Raphael Hertzog hertzog at debian.org
Thu Nov 20 15:02:51 UTC 2014


Hello,

I have a prepared a new version of libgcrypt11 for squeeze-lts that fixes
CVE-2014-5270:

 libgcrypt11 (1.4.5-2+squeeze2) squeeze-lts; urgency=medium
 .
   * Non-maintainer upload by the Debian LTS team.
   * Add 37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch patch.
     Replace deliberate division by zero with _gcry_divide_by_zero.
   * Add 38_CVE-2014-5270.patch patch.
     CVE-2014-5270: side-channel attack on Elgamal encryption subkeys.
     Cryptanalysis attack as described by Genkin, Pipman and Tromer. See
     <http://www.cs.tau.ac.il/~tromer/handsoff/>
   * Both patches have been backported from the 1.5.0-5+deb7u2 wheezy
     security update.

I grabbed the patches in the wheezy package. Both patches required some
small adjustments to apply on the libgcrypt version in squeeze. The debdiff is
attached if you want to review it.

You can grab some .debs to test here:
dget https://people.debian.org/~hertzog/packages/libgcrypt11_1.4.5-2+squeeze2_amd64.changes
dget https://people.debian.org/~hertzog/packages/libgcrypt11_1.4.5-2+squeeze2_i386.changes

Please install the packages and let me know whether everything still works
fine. I'm not quite sure what a good test entails. I did some basic tests with
gpg2. There are many reverse dependencies on libgcrypt11 but many of those
packages are libraries (that can't be easily tested).

Thank you!
-- 
Raphaël Hertzog ◈ Debian Developer

Support Debian LTS: http://www.freexian.com/services/debian-lts.html
Learn to master Debian: http://debian-handbook.info/get/
-------------- next part --------------
diff -Nru libgcrypt11-1.4.5/debian/changelog libgcrypt11-1.4.5/debian/changelog
--- libgcrypt11-1.4.5/debian/changelog	2013-07-27 13:43:00.000000000 +0200
+++ libgcrypt11-1.4.5/debian/changelog	2014-11-20 14:50:00.000000000 +0100
@@ -1,3 +1,17 @@
+libgcrypt11 (1.4.5-2+squeeze2) squeeze-lts; urgency=medium
+
+  * Non-maintainer upload by the Debian LTS team.
+  * Add 37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch patch.
+    Replace deliberate division by zero with _gcry_divide_by_zero.
+  * Add 38_CVE-2014-5270.patch patch.
+    CVE-2014-5270: side-channel attack on Elgamal encryption subkeys.
+    Cryptanalysis attack as described by Genkin, Pipman and Tromer. See
+    <http://www.cs.tau.ac.il/~tromer/handsoff/>
+  * Both patches have been backported from the 1.5.0-5+deb7u2 wheezy
+    security update.
+
+ -- Raphaël Hertzog <hertzog at debian.org>  Thu, 20 Nov 2014 14:46:05 +0100
+
 libgcrypt11 (1.4.5-2+squeeze1) squeeze-security; urgency=high
 
   * Pull and unfuzzz code changes from 1.5.3 security fix release from
diff -Nru libgcrypt11-1.4.5/debian/patches/37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch libgcrypt11-1.4.5/debian/patches/37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch
--- libgcrypt11-1.4.5/debian/patches/37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch	1970-01-01 01:00:00.000000000 +0100
+++ libgcrypt11-1.4.5/debian/patches/37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch	2014-11-20 15:14:20.000000000 +0100
@@ -0,0 +1,84 @@
+From 6c3598f1f6a6f2548b60a31ce3c0dd9885558a4f Mon Sep 17 00:00:00 2001
+From: Xi Wang <xi.wang at gmail.com>
+Date: Tue, 14 Aug 2012 18:54:40 -0400
+Subject: [PATCH 1/2] Replace deliberate division by zero with
+ _gcry_divide_by_zero.
+
+* mpi/mpi-pow.c: Replace 1 / msize.
+* mpi/mpih-div.c: Replace 1 / dsize.
+* src/misc.c: Add _gcry_divide_by_zero.
+--
+
+1) Division by zero doesn't "provoke a signal" on architectures
+   like PowerPC.
+
+2) C compilers like clang will optimize away these divisions, even
+   though the code tries "to make the compiler not remove" them.
+
+This patch redirects these cases to _gcry_divide_by_zero.
+
+(cherry picked from commit 2c54c4da19d3a79e9f749740828026dd41f0521a)
+---
+ mpi/mpi-pow.c  | 2 +-
+ mpi/mpih-div.c | 5 ++---
+ src/g10lib.h   | 2 ++
+ src/misc.c     | 8 ++++++++
+ 4 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/mpi/mpi-pow.c
++++ b/mpi/mpi-pow.c
+@@ -78,7 +78,7 @@ gcry_mpi_powm (gcry_mpi_t res,
+   ep = expo->d;
+ 
+   if (!msize)
+-    msize = 1 / msize;	    /* Provoke a signal.  */
++    _gcry_divide_by_zero();
+ 
+   if (!esize) 
+     {
+--- a/mpi/mpih-div.c
++++ b/mpi/mpih-div.c
+@@ -212,9 +212,8 @@ _gcry_mpih_divrem( mpi_ptr_t qp, mpi_siz
+ 
+     switch(dsize) {
+       case 0:
+-	/* We are asked to divide by zero, so go ahead and do it!  (To make
+-	   the compiler not remove this statement, return the value.)  */
+-	return 1 / dsize;
++	_gcry_divide_by_zero();
++	break;
+ 
+       case 1:
+ 	{
+--- a/src/g10lib.h
++++ b/src/g10lib.h
+@@ -101,6 +101,8 @@ void _gcry_bug (const char *file, int li
+ void _gcry_assert_failed (const char *expr, const char *file, int line);
+ #endif
+ 
++void _gcry_divide_by_zero (void) JNLIB_GCC_A_NR;
++
+ const char *_gcry_gettext (const char *key) GCC_ATTR_FORMAT_ARG(1);
+ void _gcry_fatal_error(int rc, const char *text ) JNLIB_GCC_A_NR;
+ void _gcry_log( int level, const char *fmt, ... ) JNLIB_GCC_A_PRINTF(2,3);
+--- a/src/misc.c
++++ b/src/misc.c
+@@ -19,6 +19,7 @@
+  */
+ 
+ #include <config.h>
++#include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -294,3 +295,10 @@ _gcry_burn_stack (int bytes)
+     if (bytes > 0)
+         _gcry_burn_stack (bytes);
+ }
++
++void
++_gcry_divide_by_zero (void)
++{
++    errno = EDOM;
++    _gcry_fatal_error (gpg_err_code_from_errno (errno), "divide by zero");
++}
diff -Nru libgcrypt11-1.4.5/debian/patches/38_CVE-2014-5270.patch libgcrypt11-1.4.5/debian/patches/38_CVE-2014-5270.patch
--- libgcrypt11-1.4.5/debian/patches/38_CVE-2014-5270.patch	1970-01-01 01:00:00.000000000 +0100
+++ libgcrypt11-1.4.5/debian/patches/38_CVE-2014-5270.patch	2014-11-20 15:02:15.000000000 +0100
@@ -0,0 +1,490 @@
+From 62e8e1283268f1d3b6d0cfb2fc4e7835bbcdaab6 Mon Sep 17 00:00:00 2001
+From: NIIBE Yutaka <gniibe at fsij.org>
+Date: Wed, 2 Oct 2013 09:27:09 +0900
+Subject: [PATCH 2/2] mpi: mpi-pow improvement.
+
+* mpi/mpi-pow.c (gcry_mpi_powm): New implementation of left-to-right
+k-ary exponentiation.
+--
+
+Signed-off-by: NIIBE Yutaka <gniibe at fsij.org>
+
+For the Yarom/Falkner flush+reload cache side-channel attack, we
+changed the code so that it always calls the multiplication routine
+(even if we can skip it to get result).  This results some performance
+regression.
+
+This change is for recovering performance with efficient algorithm.
+
+(cherry picked from commit 45aa6131e93fac89d46733b3436d960f35fb99b2)
+---
+ mpi/mpi-pow.c | 454 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 454 insertions(+)
+
+--- a/mpi/mpi-pow.c
++++ b/mpi/mpi-pow.c
+@@ -34,6 +34,14 @@
+ #include "longlong.h"
+ 
+ 
++/*
++ * When you need old implementation, please add compilation option
++ * -DUSE_ALGORITHM_SIMPLE_EXPONENTIATION
++ * or expose this line:
++#define USE_ALGORITHM_SIMPLE_EXPONENTIATION 1
++ */
++
++#if defined(USE_ALGORITHM_SIMPLE_EXPONENTIATION)
+ /****************
+  * RES = BASE ^ EXPO mod MOD
+  */
+@@ -338,4 +346,449 @@ gcry_mpi_powm (gcry_mpi_t res,
+   if (tspace)
+     _gcry_mpi_free_limb_space( tspace, 0 );
+ }
++#else
++/**
++ * Internal function to compute
++ *
++ *    X = R * S mod M
++ *
++ * and set the size of X at the pointer XSIZE_P.
++ * Use karatsuba structure at KARACTX_P.
++ *
++ * Condition:
++ *   RSIZE >= SSIZE
++ *   Enough space for X is allocated beforehand.
++ *
++ * For generic cases, we can/should use gcry_mpi_mulm.
++ * This function is use for specific internal case.
++ */
++static void
++mul_mod (mpi_ptr_t xp, mpi_size_t *xsize_p,
++         mpi_ptr_t rp, mpi_size_t rsize,
++         mpi_ptr_t sp, mpi_size_t ssize,
++         mpi_ptr_t mp, mpi_size_t msize,
++         struct karatsuba_ctx *karactx_p)
++{
++  if( ssize < KARATSUBA_THRESHOLD )
++    _gcry_mpih_mul ( xp, rp, rsize, sp, ssize );
++  else
++    _gcry_mpih_mul_karatsuba_case (xp, rp, rsize, sp, ssize, karactx_p);
++
++   if (rsize + ssize > msize)
++    {
++      _gcry_mpih_divrem (xp + msize, 0, xp, rsize + ssize, mp, msize);
++      *xsize_p = msize;
++    }
++   else
++     *xsize_p = rsize + ssize;
++}
+ 
++#define SIZE_B_2I3 ((1 << (5 - 1)) - 1)
++
++/****************
++ * RES = BASE ^ EXPO mod MOD
++ *
++ * To mitigate the Yarom/Falkner flush+reload cache side-channel
++ * attack on the RSA secret exponent, we don't use the square
++ * routine but multiplication.
++ *
++ * Reference:
++ *   Handbook of Applied Cryptography
++ *       Algorithm 14.83: Modified left-to-right k-ary exponentiation
++ */
++void
++gcry_mpi_powm (gcry_mpi_t res,
++               gcry_mpi_t base, gcry_mpi_t expo, gcry_mpi_t mod)
++{
++  /* Pointer to the limbs of the arguments, their size and signs. */
++  mpi_ptr_t  rp, ep, mp, bp;
++  mpi_size_t esize, msize, bsize, rsize;
++  int               msign, bsign, rsign;
++  /* Flags telling the secure allocation status of the arguments.  */
++  int        esec,  msec,  bsec;
++  /* Size of the result including space for temporary values.  */
++  mpi_size_t size;
++  /* Helper.  */
++  int mod_shift_cnt;
++  int negative_result;
++  mpi_ptr_t mp_marker = NULL;
++  mpi_ptr_t bp_marker = NULL;
++  mpi_ptr_t ep_marker = NULL;
++  mpi_ptr_t xp_marker = NULL;
++  unsigned int mp_nlimbs = 0;
++  unsigned int bp_nlimbs = 0;
++  unsigned int ep_nlimbs = 0;
++  unsigned int xp_nlimbs = 0;
++  mpi_ptr_t b_2i3[SIZE_B_2I3]; /* Pre-computed array: BASE^3, ^5, ^7, ... */
++  mpi_size_t b_2i3size[SIZE_B_2I3];
++  mpi_size_t W;
++  mpi_ptr_t base_u;
++  mpi_size_t base_u_size;
++
++  esize = expo->nlimbs;
++  msize = mod->nlimbs;
++  size = 2 * msize;
++  msign = mod->sign;
++
++  if (esize * BITS_PER_MPI_LIMB > 512)
++    W = 5;
++  else if (esize * BITS_PER_MPI_LIMB > 256)
++    W = 4;
++  else if (esize * BITS_PER_MPI_LIMB > 128)
++    W = 3;
++  else if (esize * BITS_PER_MPI_LIMB > 64)
++    W = 2;
++  else
++    W = 1;
++
++  esec = mpi_is_secure(expo);
++  msec = mpi_is_secure(mod);
++  bsec = mpi_is_secure(base);
++
++  rp = res->d;
++  ep = expo->d;
++
++  if (!msize)
++    _gcry_divide_by_zero();
++
++  if (!esize)
++    {
++      /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 depending
++         on if MOD equals 1.  */
++      res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
++      if (res->nlimbs)
++        {
++          RESIZE_IF_NEEDED (res, 1);
++          rp = res->d;
++          rp[0] = 1;
++        }
++      res->sign = 0;
++      goto leave;
++    }
++
++  /* Normalize MOD (i.e. make its most significant bit set) as
++     required by mpn_divrem.  This will make the intermediate values
++     in the calculation slightly larger, but the correct result is
++     obtained after a final reduction using the original MOD value. */
++  mp_nlimbs = msec? msize:0;
++  mp = mp_marker = mpi_alloc_limb_space(msize, msec);
++  count_leading_zeros (mod_shift_cnt, mod->d[msize-1]);
++  if (mod_shift_cnt)
++    _gcry_mpih_lshift (mp, mod->d, msize, mod_shift_cnt);
++  else
++    MPN_COPY( mp, mod->d, msize );
++
++  bsize = base->nlimbs;
++  bsign = base->sign;
++  if (bsize > msize)
++    {
++      /* The base is larger than the module.  Reduce it.
++
++         Allocate (BSIZE + 1) with space for remainder and quotient.
++         (The quotient is (bsize - msize + 1) limbs.)  */
++      bp_nlimbs = bsec ? (bsize + 1):0;
++      bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec );
++      MPN_COPY ( bp, base->d, bsize );
++      /* We don't care about the quotient, store it above the
++       * remainder, at BP + MSIZE.  */
++      _gcry_mpih_divrem( bp + msize, 0, bp, bsize, mp, msize );
++      bsize = msize;
++      /* Canonicalize the base, since we are going to multiply with it
++         quite a few times.  */
++      MPN_NORMALIZE( bp, bsize );
++    }
++  else
++    bp = base->d;
++
++  if (!bsize)
++    {
++      res->nlimbs = 0;
++      res->sign = 0;
++      goto leave;
++    }
++
++
++  /* Make BASE, EXPO and MOD not overlap with RES.  */
++  if ( rp == bp )
++    {
++      /* RES and BASE are identical.  Allocate temp. space for BASE.  */
++      gcry_assert (!bp_marker);
++      bp_nlimbs = bsec? bsize:0;
++      bp = bp_marker = mpi_alloc_limb_space( bsize, bsec );
++      MPN_COPY(bp, rp, bsize);
++    }
++  if ( rp == ep )
++    {
++      /* RES and EXPO are identical.  Allocate temp. space for EXPO.  */
++      ep_nlimbs = esec? esize:0;
++      ep = ep_marker = mpi_alloc_limb_space( esize, esec );
++      MPN_COPY(ep, rp, esize);
++    }
++  if ( rp == mp )
++    {
++      /* RES and MOD are identical.  Allocate temporary space for MOD.*/
++      gcry_assert (!mp_marker);
++      mp_nlimbs = msec?msize:0;
++      mp = mp_marker = mpi_alloc_limb_space( msize, msec );
++      MPN_COPY(mp, rp, msize);
++    }
++
++  /* Copy base to the result.  */
++  if (res->alloced < size)
++    {
++      mpi_resize (res, size);
++      rp = res->d;
++    }
++
++  /* Main processing.  */
++  {
++    mpi_size_t i, j;
++    mpi_ptr_t xp;
++    mpi_size_t xsize;
++    int c;
++    mpi_limb_t e;
++    mpi_limb_t carry_limb;
++    struct karatsuba_ctx karactx;
++    mpi_ptr_t tp;
++
++    xp_nlimbs = msec? (2 * (msize + 1)):0;
++    xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec );
++
++    memset( &karactx, 0, sizeof karactx );
++    negative_result = (ep[0] & 1) && bsign;
++
++    /* Precompute B_2I3[], BASE^(2 * i + 3), BASE^3, ^5, ^7, ... */
++    if (W > 1)                  /* X := BASE^2 */
++      mul_mod (xp, &xsize, bp, bsize, bp, bsize, mp, msize, &karactx);
++    for (i = 0; i < (1 << (W - 1)) - 1; i++)
++      {                         /* B_2I3[i] = BASE^(2 * i + 3) */
++        if (i == 0)
++          {
++            base_u = bp;
++            base_u_size = bsize;
++          }
++        else
++          {
++            base_u = b_2i3[i-1];
++            base_u_size = b_2i3size[i-1];
++          }
++
++        if (xsize >= base_u_size)
++          mul_mod (rp, &rsize, xp, xsize, base_u, base_u_size,
++                   mp, msize, &karactx);
++        else
++          mul_mod (rp, &rsize, base_u, base_u_size, xp, xsize,
++                   mp, msize, &karactx);
++        b_2i3[i] = mpi_alloc_limb_space (rsize, esec);
++        b_2i3size[i] = rsize;
++        MPN_COPY (b_2i3[i], rp, rsize);
++      }
++
++    i = esize - 1;
++
++    /* Main loop.
++
++       Make the result be pointed to alternately by XP and RP.  This
++       helps us avoid block copying, which would otherwise be
++       necessary with the overlap restrictions of
++       _gcry_mpih_divmod. With 50% probability the result after this
++       loop will be in the area originally pointed by RP (==RES->d),
++       and with 50% probability in the area originally pointed to by XP. */
++    rsign = 0;
++    if (W == 1)
++      {
++        rsize = bsize;
++      }
++    else
++      {
++        rsize = msize;
++        MPN_ZERO (rp, rsize);
++      }
++    MPN_COPY ( rp, bp, bsize );
++
++    e = ep[i];
++    count_leading_zeros (c, e);
++    e = (e << c) << 1;
++    c = BITS_PER_MPI_LIMB - 1 - c;
++
++    j = 0;
++
++    for (;;)
++      if (e == 0)
++        {
++          j += c;
++          i--;
++          if ( i < 0 )
++            {
++              c = 0;
++              break;
++            }
++
++          e = ep[i];
++          c = BITS_PER_MPI_LIMB;
++        }
++      else
++        {
++          int c0;
++          mpi_limb_t e0;
++
++          count_leading_zeros (c0, e);
++          e = (e << c0);
++          c -= c0;
++          j += c0;
++
++          if (c >= W)
++            {
++              e0 = (e >> (BITS_PER_MPI_LIMB - W));
++              e = (e << W);
++              c -= W;
++            }
++          else
++            {
++              i--;
++              if ( i < 0 )
++                {
++                  e = (e >> (BITS_PER_MPI_LIMB - c));
++                  break;
++                }
++
++              c0 = c;
++              e0 = (e >> (BITS_PER_MPI_LIMB - W))
++                | (ep[i] >> (BITS_PER_MPI_LIMB - W + c0));
++              e = (ep[i] << (W - c0));
++              c = BITS_PER_MPI_LIMB - W + c0;
++            }
++
++          count_trailing_zeros (c0, e0);
++          e0 = (e0 >> c0) >> 1;
++
++          for (j += W - c0; j; j--)
++            {
++              mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx);
++              tp = rp; rp = xp; xp = tp;
++              rsize = xsize;
++            }
++
++          if (e0 == 0)
++            {
++              base_u = bp;
++              base_u_size = bsize;
++            }
++          else
++            {
++              base_u = b_2i3[e0 - 1];
++              base_u_size = b_2i3size[e0 -1];
++            }
++
++          mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size,
++                   mp, msize, &karactx);
++          tp = rp; rp = xp; xp = tp;
++          rsize = xsize;
++
++          j = c0;
++        }
++
++    if (c != 0)
++      {
++        j += c;
++        count_trailing_zeros (c, e);
++        e = (e >> c);
++        j -= c;
++      }
++
++    while (j--)
++      {
++        mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx);
++        tp = rp; rp = xp; xp = tp;
++        rsize = xsize;
++      }
++
++    if (e != 0)
++      {
++        if ((e>>1) == 0)
++          {
++            base_u = bp;
++            base_u_size = bsize;
++          }
++        else
++          {
++            base_u = b_2i3[(e>>1) - 1];
++            base_u_size = b_2i3size[(e>>1) -1];
++          }
++
++        mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size,
++                 mp, msize, &karactx);
++        tp = rp; rp = xp; xp = tp;
++        rsize = xsize;
++
++        for (; c; c--)
++          {
++            mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx);
++            tp = rp; rp = xp; xp = tp;
++            rsize = xsize;
++          }
++      }
++
++    /* We shifted MOD, the modulo reduction argument, left
++       MOD_SHIFT_CNT steps.  Adjust the result by reducing it with the
++       original MOD.
++
++       Also make sure the result is put in RES->d (where it already
++       might be, see above).  */
++    if ( mod_shift_cnt )
++      {
++        carry_limb = _gcry_mpih_lshift( res->d, rp, rsize, mod_shift_cnt);
++        rp = res->d;
++        if ( carry_limb )
++          {
++            rp[rsize] = carry_limb;
++            rsize++;
++          }
++      }
++    else if (res->d != rp)
++      {
++        MPN_COPY (res->d, rp, rsize);
++        rp = res->d;
++      }
++
++    if ( rsize >= msize )
++      {
++        _gcry_mpih_divrem(rp + msize, 0, rp, rsize, mp, msize);
++        rsize = msize;
++      }
++
++    /* Remove any leading zero words from the result.  */
++    if ( mod_shift_cnt )
++      _gcry_mpih_rshift( rp, rp, rsize, mod_shift_cnt);
++    MPN_NORMALIZE (rp, rsize);
++
++    _gcry_mpih_release_karatsuba_ctx (&karactx );
++    for (i = 0; i < (1 << (W - 1)) - 1; i++)
++      _gcry_mpi_free_limb_space( b_2i3[i], esec ? b_2i3size[i] : 0 );
++  }
++
++  /* Fixup for negative results.  */
++  if ( negative_result && rsize )
++    {
++      if ( mod_shift_cnt )
++        _gcry_mpih_rshift( mp, mp, msize, mod_shift_cnt);
++      _gcry_mpih_sub( rp, mp, msize, rp, rsize);
++      rsize = msize;
++      rsign = msign;
++      MPN_NORMALIZE(rp, rsize);
++    }
++  gcry_assert (res->d == rp);
++  res->nlimbs = rsize;
++  res->sign = rsign;
++
++ leave:
++  if (mp_marker)
++    _gcry_mpi_free_limb_space( mp_marker, mp_nlimbs );
++  if (bp_marker)
++    _gcry_mpi_free_limb_space( bp_marker, bp_nlimbs );
++  if (ep_marker)
++    _gcry_mpi_free_limb_space( ep_marker, ep_nlimbs );
++  if (xp_marker)
++    _gcry_mpi_free_limb_space( xp_marker, xp_nlimbs );
++}
++#endif
diff -Nru libgcrypt11-1.4.5/debian/patches/series libgcrypt11-1.4.5/debian/patches/series
--- libgcrypt11-1.4.5/debian/patches/series	2013-07-26 14:17:40.000000000 +0200
+++ libgcrypt11-1.4.5/debian/patches/series	2014-11-20 14:45:56.000000000 +0100
@@ -2,3 +2,5 @@
 20_ftbfsmips.diff
 35_bug-in-mpi_powm-for-e-0.patch
 36_Mitigate-flush-reload-cache-attack-on-RSA.patch
+37_Replace-deliberate-division-by-zero-with-_gcry_divid.patch
+38_CVE-2014-5270.patch


More information about the Pkg-gnutls-maint mailing list