[med-svn] [Git][med-team/minimap2][master] 3 commits: Re-enable and adapt older simde patch to build properly on !x86 and !arm64

Nilesh Patra (@nilesh) gitlab at salsa.debian.org
Sat Mar 12 09:42:18 GMT 2022



Nilesh Patra pushed to branch master at Debian Med / minimap2


Commits:
204141e2 by Nilesh Patra at 2022-03-12T14:42:15+05:30
Re-enable and adapt older simde patch to build properly on !x86 and !arm64

- - - - -
cf8fc013 by Nilesh Patra at 2022-03-12T14:51:59+05:30
d/t/control: Add superficial import test for mappy

- - - - -
a68f8b6e by Nilesh Patra at 2022-03-12T14:51:59+05:30
Upload to unstable

- - - - -


4 changed files:

- debian/changelog
- + debian/patches/reenable-simde.patch
- debian/patches/series
- debian/tests/control


Changes:

=====================================
debian/changelog
=====================================
@@ -1,3 +1,12 @@
+minimap2 (2.24+dfsg-2) unstable; urgency=medium
+
+  * Team Upload.
+  * Re-enable and adapt older simde patch to
+    import properly on !x86 and !arm64 (Closes: #1004455)
+  * d/t/control: Add superficial import test for mappy
+
+ -- Nilesh Patra <nilesh at debian.org>  Sat, 12 Mar 2022 14:43:04 +0530
+
 minimap2 (2.24+dfsg-1) unstable; urgency=medium
 
   * New upstream version


=====================================
debian/patches/reenable-simde.patch
=====================================
@@ -0,0 +1,488 @@
+--- a/ksw2_extz2_sse.c
++++ b/ksw2_extz2_sse.c
+@@ -2,31 +2,13 @@
+ #include <assert.h>
+ #include "ksw2.h"
+ 
+-#ifdef __SSE2__
+-#ifdef USE_SIMDE
+-#include <simde/x86/sse2.h>
+-#else
+-#include <emmintrin.h>
+-#endif
+-
+-#ifdef KSW_SSE2_ONLY
+-#undef __SSE4_1__
+-#endif
+-
+-#ifdef __SSE4_1__
+-#ifdef USE_SIMDE
+-#include <simde/x86/sse4.1.h>
+-#else
+-#include <smmintrin.h>
+-#endif
+-#endif
++#define SIMDE_ENABLE_NATIVE_ALIASES
++#include "simde/x86/sse4.1.h"
+ 
+-#ifdef KSW_CPU_DISPATCH
+-#ifdef __SSE4_1__
++#if defined(SIMDE_SSE4_1_NATIVE)
+ void ksw_extz2_sse41(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat, int8_t q, int8_t e, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
+-#else
++#elif defined(SIMDE_SSE2_NATIVE)
+ void ksw_extz2_sse2(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat, int8_t q, int8_t e, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
+-#endif
+ #else
+ void ksw_extz2_sse(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat, int8_t q, int8_t e, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
+ #endif // ~KSW_CPU_DISPATCH
+@@ -137,13 +119,8 @@
+ 				st = _mm_loadu_si128((__m128i*)&qrr[t]);
+ 				mask = _mm_or_si128(_mm_cmpeq_epi8(sq, m1_), _mm_cmpeq_epi8(st, m1_));
+ 				tmp = _mm_cmpeq_epi8(sq, st);
+-#ifdef __SSE4_1__
+ 				tmp = _mm_blendv_epi8(sc_mis_, sc_mch_, tmp);
+ 				tmp = _mm_blendv_epi8(tmp,     sc_N_,   mask);
+-#else
+-				tmp = _mm_or_si128(_mm_andnot_si128(tmp,  sc_mis_), _mm_and_si128(tmp,  sc_mch_));
+-				tmp = _mm_or_si128(_mm_andnot_si128(mask, tmp),     _mm_and_si128(mask, sc_N_));
+-#endif
+ 				_mm_storeu_si128((__m128i*)((uint8_t*)s + t), tmp);
+ 			}
+ 		} else {
+@@ -159,22 +136,10 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i z, a, b, xt1, vt1, ut, tmp;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				z = _mm_max_epi8(z, a);                          // z = z > a? z : a (signed)
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8()
+-				z = _mm_and_si128(z, _mm_cmpgt_epi8(z, zero_));  // z = z > 0? z : 0;
+-				z = _mm_max_epu8(z, a);                          // z = max(z, a); this works because both are non-negative
+-#endif
+ 				__dp_code_block2;
+-#ifdef __SSE4_1__
+ 				_mm_store_si128(&x[t], _mm_max_epi8(a, zero_));
+ 				_mm_store_si128(&y[t], _mm_max_epi8(b, zero_));
+-#else
+-				tmp = _mm_cmpgt_epi8(a, zero_);
+-				_mm_store_si128(&x[t], _mm_and_si128(a, tmp));
+-				tmp = _mm_cmpgt_epi8(b, zero_);
+-				_mm_store_si128(&y[t], _mm_and_si128(b, tmp));
+-#endif
+ 			}
+ 		} else if (!(flag&KSW_EZ_RIGHT)) { // gap left-alignment
+ 			__m128i *pr = p + (size_t)r * n_col_ - st_;
+@@ -183,16 +148,9 @@
+ 				__m128i d, z, a, b, xt1, vt1, ut, tmp;
+ 				__dp_code_block1;
+ 				d = _mm_and_si128(_mm_cmpgt_epi8(a, z), flag1_); // d = a > z? 1 : 0
+-#ifdef __SSE4_1__
+ 				z = _mm_max_epi8(z, a);                          // z = z > a? z : a (signed)
+ 				tmp = _mm_cmpgt_epi8(b, z);
+ 				d = _mm_blendv_epi8(d, flag2_, tmp);             // d = b > z? 2 : d
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
+-				z = _mm_and_si128(z, _mm_cmpgt_epi8(z, zero_));  // z = z > 0? z : 0;
+-				z = _mm_max_epu8(z, a);                          // z = max(z, a); this works because both are non-negative
+-				tmp = _mm_cmpgt_epi8(b, z);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, flag2_)); // d = b > z? 2 : d; emulating blendv
+-#endif
+ 				__dp_code_block2;
+ 				tmp = _mm_cmpgt_epi8(a, zero_);
+ 				_mm_store_si128(&x[t], _mm_and_si128(tmp, a));
+@@ -209,16 +167,9 @@
+ 				__m128i d, z, a, b, xt1, vt1, ut, tmp;
+ 				__dp_code_block1;
+ 				d = _mm_andnot_si128(_mm_cmpgt_epi8(z, a), flag1_); // d = z > a? 0 : 1
+-#ifdef __SSE4_1__
+ 				z = _mm_max_epi8(z, a);                          // z = z > a? z : a (signed)
+ 				tmp = _mm_cmpgt_epi8(z, b);
+ 				d = _mm_blendv_epi8(flag2_, d, tmp);             // d = z > b? d : 2
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
+-				z = _mm_and_si128(z, _mm_cmpgt_epi8(z, zero_));  // z = z > 0? z : 0;
+-				z = _mm_max_epu8(z, a);                          // z = max(z, a); this works because both are non-negative
+-				tmp = _mm_cmpgt_epi8(z, b);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, flag2_), _mm_and_si128(tmp, d)); // d = z > b? d : 2; emulating blendv
+-#endif
+ 				__dp_code_block2;
+ 				tmp = _mm_cmpgt_epi8(zero_, a);
+ 				_mm_store_si128(&x[t], _mm_andnot_si128(tmp, a));
+@@ -249,13 +200,8 @@
+ 					_mm_storeu_si128((__m128i*)&H[t], H1);
+ 					t_ = _mm_set1_epi32(t);
+ 					tmp = _mm_cmpgt_epi32(H1, max_H_);
+-#ifdef __SSE4_1__
+ 					max_H_ = _mm_blendv_epi8(max_H_, H1, tmp);
+ 					max_t_ = _mm_blendv_epi8(max_t_, t_, tmp);
+-#else
+-					max_H_ = _mm_or_si128(_mm_and_si128(tmp, H1), _mm_andnot_si128(tmp, max_H_));
+-					max_t_ = _mm_or_si128(_mm_and_si128(tmp, t_), _mm_andnot_si128(tmp, max_t_));
+-#endif
+ 				}
+ 				_mm_storeu_si128((__m128i*)HH, max_H_);
+ 				_mm_storeu_si128((__m128i*)tt, max_t_);
+@@ -310,4 +256,4 @@
+ 		kfree(km, mem2); kfree(km, off);
+ 	}
+ }
+-#endif // __SSE2__
++
+--- a/ksw2_extd2_sse.c
++++ b/ksw2_extd2_sse.c
+@@ -3,37 +3,19 @@
+ #include <assert.h>
+ #include "ksw2.h"
+ 
+-#ifdef __SSE2__
+-#ifdef USE_SIMDE
+-#include <simde/x86/sse2.h>
+-#else
+-#include <emmintrin.h>
+-#endif
+-
+-#ifdef KSW_SSE2_ONLY
+-#undef __SSE4_1__
+-#endif
+-
+-#ifdef __SSE4_1__
+-#ifdef USE_SIMDE
+-#include <simde/x86/sse4.1.h>
+-#else
+-#include <smmintrin.h>
+-#endif
+-#endif
++#define SIMDE_ENABLE_NATIVE_ALIASES
++#include "simde/x86/sse4.1.h"
+ 
+-#ifdef KSW_CPU_DISPATCH
+-#ifdef __SSE4_1__
++#if defined(SIMDE_SSE4_1_NATIVE)
+ void ksw_extd2_sse41(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
+ 				   int8_t q, int8_t e, int8_t q2, int8_t e2, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
+-#else
++#elif defined(SIMDE_SSE2_NATIVE)
+ void ksw_extd2_sse2(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
+ 				   int8_t q, int8_t e, int8_t q2, int8_t e2, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
+-#endif
+ #else
+ void ksw_extd2_sse(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
+ 				   int8_t q, int8_t e, int8_t q2, int8_t e2, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
+-#endif // ~KSW_CPU_DISPATCH
++#endif
+ {
+ #define __dp_code_block1 \
+ 	z = _mm_load_si128(&s[t]); \
+@@ -169,13 +151,8 @@
+ 				st = _mm_loadu_si128((__m128i*)&qrr[t]);
+ 				mask = _mm_or_si128(_mm_cmpeq_epi8(sq, m1_), _mm_cmpeq_epi8(st, m1_));
+ 				tmp = _mm_cmpeq_epi8(sq, st);
+-#ifdef __SSE4_1__
+ 				tmp = _mm_blendv_epi8(sc_mis_, sc_mch_, tmp);
+ 				tmp = _mm_blendv_epi8(tmp,     sc_N_,   mask);
+-#else
+-				tmp = _mm_or_si128(_mm_andnot_si128(tmp,  sc_mis_), _mm_and_si128(tmp,  sc_mch_));
+-				tmp = _mm_or_si128(_mm_andnot_si128(mask, tmp),     _mm_and_si128(mask, sc_N_));
+-#endif
+ 				_mm_storeu_si128((__m128i*)((int8_t*)s + t), tmp);
+ 			}
+ 		} else {
+@@ -192,7 +169,6 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i z, a, b, a2, b2, xt1, x2t1, vt1, ut, tmp;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				z = _mm_max_epi8(z, a);
+ 				z = _mm_max_epi8(z, b);
+ 				z = _mm_max_epi8(z, a2);
+@@ -203,27 +179,6 @@
+ 				_mm_store_si128(&y[t],  _mm_sub_epi8(_mm_max_epi8(b,  zero_), qe_));
+ 				_mm_store_si128(&x2[t], _mm_sub_epi8(_mm_max_epi8(a2, zero_), qe2_));
+ 				_mm_store_si128(&y2[t], _mm_sub_epi8(_mm_max_epi8(b2, zero_), qe2_));
+-#else
+-				tmp = _mm_cmpgt_epi8(a,  z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a));
+-				tmp = _mm_cmpgt_epi8(b,  z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b));
+-				tmp = _mm_cmpgt_epi8(a2, z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a2));
+-				tmp = _mm_cmpgt_epi8(b2, z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b2));
+-				tmp = _mm_cmplt_epi8(sc_mch_, z);
+-				z = _mm_or_si128(_mm_and_si128(tmp, sc_mch_), _mm_andnot_si128(tmp, z));
+-				__dp_code_block2;
+-				tmp = _mm_cmpgt_epi8(a, zero_);
+-				_mm_store_si128(&x[t],  _mm_sub_epi8(_mm_and_si128(tmp, a),  qe_));
+-				tmp = _mm_cmpgt_epi8(b, zero_);
+-				_mm_store_si128(&y[t],  _mm_sub_epi8(_mm_and_si128(tmp, b),  qe_));
+-				tmp = _mm_cmpgt_epi8(a2, zero_);
+-				_mm_store_si128(&x2[t], _mm_sub_epi8(_mm_and_si128(tmp, a2), qe2_));
+-				tmp = _mm_cmpgt_epi8(b2, zero_);
+-				_mm_store_si128(&y2[t], _mm_sub_epi8(_mm_and_si128(tmp, b2), qe2_));
+-#endif
+ 			}
+ 		} else if (!(flag&KSW_EZ_RIGHT)) { // gap left-alignment
+ 			__m128i *pr = p + (size_t)r * n_col_ - st_;
+@@ -231,7 +186,6 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i d, z, a, b, a2, b2, xt1, x2t1, vt1, ut, tmp;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				d = _mm_and_si128(_mm_cmpgt_epi8(a, z), _mm_set1_epi8(1));       // d = a  > z? 1 : 0
+ 				z = _mm_max_epi8(z, a);
+ 				d = _mm_blendv_epi8(d, _mm_set1_epi8(2), _mm_cmpgt_epi8(b,  z)); // d = b  > z? 2 : d
+@@ -241,22 +195,6 @@
+ 				d = _mm_blendv_epi8(d, _mm_set1_epi8(4), _mm_cmpgt_epi8(b2, z)); // d = a2 > z? 3 : d
+ 				z = _mm_max_epi8(z, b2);
+ 				z = _mm_min_epi8(z, sc_mch_);
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
+-				tmp = _mm_cmpgt_epi8(a,  z);
+-				d = _mm_and_si128(tmp, _mm_set1_epi8(1));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a));
+-				tmp = _mm_cmpgt_epi8(b,  z);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(2)));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b));
+-				tmp = _mm_cmpgt_epi8(a2, z);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(3)));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a2));
+-				tmp = _mm_cmpgt_epi8(b2, z);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(4)));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b2));
+-				tmp = _mm_cmplt_epi8(sc_mch_, z);
+-				z = _mm_or_si128(_mm_and_si128(tmp, sc_mch_), _mm_andnot_si128(tmp, z));
+-#endif
+ 				__dp_code_block2;
+ 				tmp = _mm_cmpgt_epi8(a, zero_);
+ 				_mm_store_si128(&x[t],  _mm_sub_epi8(_mm_and_si128(tmp, a),  qe_));
+@@ -278,7 +216,6 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i d, z, a, b, a2, b2, xt1, x2t1, vt1, ut, tmp;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				d = _mm_andnot_si128(_mm_cmpgt_epi8(z, a), _mm_set1_epi8(1));    // d = z > a?  0 : 1
+ 				z = _mm_max_epi8(z, a);
+ 				d = _mm_blendv_epi8(_mm_set1_epi8(2), d, _mm_cmpgt_epi8(z, b));  // d = z > b?  d : 2
+@@ -288,22 +225,6 @@
+ 				d = _mm_blendv_epi8(_mm_set1_epi8(4), d, _mm_cmpgt_epi8(z, b2)); // d = z > b2? d : 4
+ 				z = _mm_max_epi8(z, b2);
+ 				z = _mm_min_epi8(z, sc_mch_);
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
+-				tmp = _mm_cmpgt_epi8(z, a);
+-				d = _mm_andnot_si128(tmp, _mm_set1_epi8(1));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, a));
+-				tmp = _mm_cmpgt_epi8(z, b);
+-				d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(2)));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, b));
+-				tmp = _mm_cmpgt_epi8(z, a2);
+-				d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(3)));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, a2));
+-				tmp = _mm_cmpgt_epi8(z, b2);
+-				d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(4)));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, b2));
+-				tmp = _mm_cmplt_epi8(sc_mch_, z);
+-				z = _mm_or_si128(_mm_and_si128(tmp, sc_mch_), _mm_andnot_si128(tmp, z));
+-#endif
+ 				__dp_code_block2;
+ 				tmp = _mm_cmpgt_epi8(zero_, a);
+ 				_mm_store_si128(&x[t],  _mm_sub_epi8(_mm_andnot_si128(tmp, a),  qe_));
+@@ -338,13 +259,8 @@
+ 					_mm_storeu_si128((__m128i*)&H[t], H1);
+ 					t_ = _mm_set1_epi32(t);
+ 					tmp = _mm_cmpgt_epi32(H1, max_H_);
+-#ifdef __SSE4_1__
+ 					max_H_ = _mm_blendv_epi8(max_H_, H1, tmp);
+ 					max_t_ = _mm_blendv_epi8(max_t_, t_, tmp);
+-#else
+-					max_H_ = _mm_or_si128(_mm_and_si128(tmp, H1), _mm_andnot_si128(tmp, max_H_));
+-					max_t_ = _mm_or_si128(_mm_and_si128(tmp, t_), _mm_andnot_si128(tmp, max_t_));
+-#endif
+ 				}
+ 				_mm_storeu_si128((__m128i*)HH, max_H_);
+ 				_mm_storeu_si128((__m128i*)tt, max_t_);
+@@ -399,4 +315,3 @@
+ 		kfree(km, mem2); kfree(km, off);
+ 	}
+ }
+-#endif // __SSE2__
+--- a/ksw2_exts2_sse.c
++++ b/ksw2_exts2_sse.c
+@@ -3,36 +3,19 @@
+ #include <assert.h>
+ #include "ksw2.h"
+ 
+-#ifdef __SSE2__
+-#ifdef USE_SIMDE
+-#include <simde/x86/sse2.h>
+-#else
+-#include <emmintrin.h>
+-#endif
+-#ifdef KSW_SSE2_ONLY
+-#undef __SSE4_1__
+-#endif
+-
+-#ifdef __SSE4_1__
+-#ifdef USE_SIMDE
+-#include <simde/x86/sse4.1.h>
+-#else
+-#include <smmintrin.h>
+-#endif
+-#endif
++#define SIMDE_ENABLE_NATIVE_ALIASES
++#include "simde/x86/sse4.1.h"
+ 
+-#ifdef KSW_CPU_DISPATCH
+-#ifdef __SSE4_1__
++#if defined(SIMDE_SSE4_1_NATIVE)
+ void ksw_exts2_sse41(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
+ 				   int8_t q, int8_t e, int8_t q2, int8_t noncan, int zdrop, int8_t junc_bonus, int flag, const uint8_t *junc, ksw_extz_t *ez)
+-#else
++#elif defined(SIMDE_SSE2_NATIVE)
+ void ksw_exts2_sse2(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
+ 				   int8_t q, int8_t e, int8_t q2, int8_t noncan, int zdrop, int8_t junc_bonus, int flag, const uint8_t *junc, ksw_extz_t *ez)
+-#endif
+ #else
+ void ksw_exts2_sse(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
+ 				   int8_t q, int8_t e, int8_t q2, int8_t noncan, int zdrop, int8_t junc_bonus, int flag, const uint8_t *junc, ksw_extz_t *ez)
+-#endif // ~KSW_CPU_DISPATCH
++#endif
+ {
+ #define __dp_code_block1 \
+ 	z = _mm_load_si128(&s[t]); \
+@@ -201,13 +184,8 @@
+ 				st = _mm_loadu_si128((__m128i*)&qrr[t]);
+ 				mask = _mm_or_si128(_mm_cmpeq_epi8(sq, m1_), _mm_cmpeq_epi8(st, m1_));
+ 				tmp = _mm_cmpeq_epi8(sq, st);
+-#ifdef __SSE4_1__
+ 				tmp = _mm_blendv_epi8(sc_mis_, sc_mch_, tmp);
+ 				tmp = _mm_blendv_epi8(tmp,     sc_N_,   mask);
+-#else
+-				tmp = _mm_or_si128(_mm_andnot_si128(tmp,  sc_mis_), _mm_and_si128(tmp,  sc_mch_));
+-				tmp = _mm_or_si128(_mm_andnot_si128(mask, tmp),     _mm_and_si128(mask, sc_N_));
+-#endif
+ 				_mm_storeu_si128((__m128i*)((int8_t*)s + t), tmp);
+ 			}
+ 		} else {
+@@ -224,7 +202,6 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i z, a, b, a2, a2a, xt1, x2t1, vt1, ut, tmp;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				z = _mm_max_epi8(z, a);
+ 				z = _mm_max_epi8(z, b);
+ 				z = _mm_max_epi8(z, a2a);
+@@ -233,23 +210,6 @@
+ 				_mm_store_si128(&y[t],  _mm_sub_epi8(_mm_max_epi8(b,  zero_), qe_));
+ 				tmp = _mm_load_si128(&donor[t]);
+ 				_mm_store_si128(&x2[t], _mm_sub_epi8(_mm_max_epi8(a2, tmp), q2_));
+-#else
+-				tmp = _mm_cmpgt_epi8(a,  z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a));
+-				tmp = _mm_cmpgt_epi8(b,  z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b));
+-				tmp = _mm_cmpgt_epi8(a2a, z);
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a2a));
+-				__dp_code_block2;
+-				tmp = _mm_cmpgt_epi8(a, zero_);
+-				_mm_store_si128(&x[t],  _mm_sub_epi8(_mm_and_si128(tmp, a),  qe_));
+-				tmp = _mm_cmpgt_epi8(b, zero_);
+-				_mm_store_si128(&y[t],  _mm_sub_epi8(_mm_and_si128(tmp, b),  qe_));
+-				tmp = _mm_load_si128(&donor[t]); // TODO: check if this is correct
+-				tmp = _mm_cmpgt_epi8(a2, tmp);
+-				tmp = _mm_or_si128(_mm_andnot_si128(tmp, tmp), _mm_and_si128(tmp, a2));
+-				_mm_store_si128(&x2[t], _mm_sub_epi8(tmp, q2_));
+-#endif
+ 			}
+ 		} else if (!(flag&KSW_EZ_RIGHT)) { // gap left-alignment
+ 			__m128i *pr = p + r * n_col_ - st_;
+@@ -257,24 +217,12 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i d, z, a, b, a2, a2a, xt1, x2t1, vt1, ut, tmp, tmp2;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				d = _mm_and_si128(_mm_cmpgt_epi8(a, z), _mm_set1_epi8(1));       // d = a  > z? 1 : 0
+ 				z = _mm_max_epi8(z, a);
+ 				d = _mm_blendv_epi8(d, _mm_set1_epi8(2), _mm_cmpgt_epi8(b,  z)); // d = b  > z? 2 : d
+ 				z = _mm_max_epi8(z, b);
+ 				d = _mm_blendv_epi8(d, _mm_set1_epi8(3), _mm_cmpgt_epi8(a2a, z)); // d = a2 > z? 3 : d
+ 				z = _mm_max_epi8(z, a2a);
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
+-				tmp = _mm_cmpgt_epi8(a,  z);
+-				d = _mm_and_si128(tmp, _mm_set1_epi8(1));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a));
+-				tmp = _mm_cmpgt_epi8(b,  z);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(2)));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b));
+-				tmp = _mm_cmpgt_epi8(a2a, z);
+-				d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(3)));
+-				z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a2a));
+-#endif
+ 				__dp_code_block2;
+ 				tmp = _mm_cmpgt_epi8(a, zero_);
+ 				_mm_store_si128(&x[t],  _mm_sub_epi8(_mm_and_si128(tmp, a),  qe_));
+@@ -285,11 +233,7 @@
+ 
+ 				tmp2 = _mm_load_si128(&donor[t]);
+ 				tmp = _mm_cmpgt_epi8(a2, tmp2);
+-#ifdef __SSE4_1__
+ 				tmp2 = _mm_max_epi8(a2, tmp2);
+-#else
+-				tmp2 = _mm_or_si128(_mm_andnot_si128(tmp, tmp2), _mm_and_si128(tmp, a2));
+-#endif
+ 				_mm_store_si128(&x2[t], _mm_sub_epi8(tmp2, q2_));
+ 				d = _mm_or_si128(d, _mm_and_si128(tmp, _mm_set1_epi8(0x20)));
+ 				_mm_store_si128(&pr[t], d);
+@@ -300,24 +244,12 @@
+ 			for (t = st_; t <= en_; ++t) {
+ 				__m128i d, z, a, b, a2, a2a, xt1, x2t1, vt1, ut, tmp, tmp2;
+ 				__dp_code_block1;
+-#ifdef __SSE4_1__
+ 				d = _mm_andnot_si128(_mm_cmpgt_epi8(z, a), _mm_set1_epi8(1));    // d = z > a?  0 : 1
+ 				z = _mm_max_epi8(z, a);
+ 				d = _mm_blendv_epi8(_mm_set1_epi8(2), d, _mm_cmpgt_epi8(z, b));  // d = z > b?  d : 2
+ 				z = _mm_max_epi8(z, b);
+ 				d = _mm_blendv_epi8(_mm_set1_epi8(3), d, _mm_cmpgt_epi8(z, a2a)); // d = z > a2? d : 3
+ 				z = _mm_max_epi8(z, a2a);
+-#else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
+-				tmp = _mm_cmpgt_epi8(z, a);
+-				d = _mm_andnot_si128(tmp, _mm_set1_epi8(1));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, a));
+-				tmp = _mm_cmpgt_epi8(z, b);
+-				d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(2)));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, b));
+-				tmp = _mm_cmpgt_epi8(z, a2a);
+-				d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(3)));
+-				z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, a2a));
+-#endif
+ 				__dp_code_block2;
+ 				tmp = _mm_cmpgt_epi8(zero_, a);
+ 				_mm_store_si128(&x[t],  _mm_sub_epi8(_mm_andnot_si128(tmp, a),  qe_));
+@@ -328,11 +260,7 @@
+ 
+ 				tmp2 = _mm_load_si128(&donor[t]);
+ 				tmp = _mm_cmpgt_epi8(tmp2, a2);
+-#ifdef __SSE4_1__
+ 				tmp2 = _mm_max_epi8(tmp2, a2);
+-#else
+-				tmp2 = _mm_or_si128(_mm_andnot_si128(tmp, a2), _mm_and_si128(tmp, tmp2));
+-#endif
+ 				_mm_store_si128(&x2[t], _mm_sub_epi8(tmp2, q2_));
+ 				d = _mm_or_si128(d, _mm_andnot_si128(tmp, _mm_set1_epi8(0x20))); // d = a > 0? 1<<5 : 0
+ 				_mm_store_si128(&pr[t], d);
+@@ -356,13 +284,8 @@
+ 					_mm_storeu_si128((__m128i*)&H[t], H1);
+ 					t_ = _mm_set1_epi32(t);
+ 					tmp = _mm_cmpgt_epi32(H1, max_H_);
+-#ifdef __SSE4_1__
+ 					max_H_ = _mm_blendv_epi8(max_H_, H1, tmp);
+ 					max_t_ = _mm_blendv_epi8(max_t_, t_, tmp);
+-#else
+-					max_H_ = _mm_or_si128(_mm_and_si128(tmp, H1), _mm_andnot_si128(tmp, max_H_));
+-					max_t_ = _mm_or_si128(_mm_and_si128(tmp, t_), _mm_andnot_si128(tmp, max_t_));
+-#endif
+ 				}
+ 				_mm_storeu_si128((__m128i*)HH, max_H_);
+ 				_mm_storeu_si128((__m128i*)tt, max_t_);
+@@ -413,4 +336,3 @@
+ 		kfree(km, mem2); kfree(km, off);
+ 	}
+ }
+-#endif // __SSE2__


=====================================
debian/patches/series
=====================================
@@ -4,3 +4,4 @@ ar.patch
 link_mappy_to_libminimap.patch
 python-sse4-arch.patch
 simde
+reenable-simde.patch


=====================================
debian/tests/control
=====================================
@@ -1,3 +1,7 @@
 Tests: run-unit-test
 Depends: @
 Restrictions: allow-stderr
+
+Test-Command: set -e ; for py in $(py3versions -r 2>/dev/null) ; do cd "$AUTOPKGTEST_TMP" ; echo "Testing with $py:" ; $py -c "import mappy; print(mappy)" ; done
+Depends: @, python3-all
+Restrictions: allow-stderr, superficial



View it on GitLab: https://salsa.debian.org/med-team/minimap2/-/compare/9a9241a43ecf58deb41ad7ff56fa41d752877ff0...a68f8b6e5f0b79cb9dd6346059371ad3bb98fec2

-- 
View it on GitLab: https://salsa.debian.org/med-team/minimap2/-/compare/9a9241a43ecf58deb41ad7ff56fa41d752877ff0...a68f8b6e5f0b79cb9dd6346059371ad3bb98fec2
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20220312/0db85ed6/attachment-0001.htm>


More information about the debian-med-commit mailing list