[med-svn] [Git][med-team/python-skbio][master] 3 commits: Cherry-pick upstream commit to fix dtypes and warnings
Graham Inggs (@ginggs)
gitlab at salsa.debian.org
Fri Dec 3 16:05:48 GMT 2021
Graham Inggs pushed to branch master at Debian Med / python-skbio
Commits:
b2fce2be by Graham Inggs at 2021-12-03T17:22:50+02:00
Cherry-pick upstream commit to fix dtypes and warnings
- - - - -
395bb024 by Graham Inggs at 2021-12-03T17:23:56+02:00
Drop skip_tests_failung_due_to_scipy.patch, no longer needed
- - - - -
78913d16 by Graham Inggs at 2021-12-03T17:24:55+02:00
Prepare for upload to unstable
- - - - -
4 changed files:
- debian/changelog
- + debian/patches/fix-dtype-precision-warnings.patch
- debian/patches/series
- − debian/patches/skip_tests_failung_due_to_scipy.patch
Changes:
=====================================
debian/changelog
=====================================
@@ -1,4 +1,6 @@
-python-skbio (0.5.6-6) UNRELEASED; urgency=medium
+python-skbio (0.5.6-6) unstable; urgency=medium
+
+ * Team upload
[ Andreas Tille ]
* Remove Tim Booth from Uploaders (Thanks to Tim for all his work)
@@ -6,8 +8,11 @@ python-skbio (0.5.6-6) UNRELEASED; urgency=medium
[ Graham Inggs ]
* Regenerate the .c files at build time by cython3
Closes: #1000864
+ * Cherry-pick upstream commit to fix dtypes and warnings
+ Closes: #992676, #999523
+ * Drop skip_tests_failung_due_to_scipy.patch, no longer needed
- -- Andreas Tille <tille at debian.org> Tue, 30 Nov 2021 14:04:16 +0100
+ -- Graham Inggs <ginggs at debian.org> Fri, 03 Dec 2021 15:24:18 +0000
python-skbio (0.5.6-5) unstable; urgency=medium
=====================================
debian/patches/fix-dtype-precision-warnings.patch
=====================================
@@ -0,0 +1,724 @@
+Description: fix dtype, precision, warnings, and more
+Origin: upstream, https://github.com/biocore/scikit-bio/commit/357c7fe847187bc540c4914c3ffd607d9432857d
+Author: Evan Bolyen <ebolyen at gmail.com>
+Last-Update: 2021-10-26
+
+--- a/doc/source/_templates/autosummary/class.rst
++++ b/doc/source/_templates/autosummary/class.rst
+@@ -16,7 +16,9 @@
+ '__repr__',
+ '__setattr__',
+ '__sizeof__',
+- '__subclasshook__'] and item.startswith('__')) %}
++ '__subclasshook__',
++ '__init_subclass__',
++ '__class_getitem__'] and item.startswith('__')) %}
+ {{ built_in_methods.append(item) or '' }}
+ {% endif %}
+ {% endfor %}
+--- a/doc/source/conf.py
++++ b/doc/source/conf.py
+@@ -69,16 +69,13 @@
+ return specials[display_name], '', summary, real_name
+ return display_name, sig, summary, real_name
+
+- skip = ['__init_subclass__']
+
+ items = []
+ for item in super(NewAuto, self).get_items(names):
+- if item[0] not in skip:
+- temp_item = fix_item(*item)
+- # Drop slot_wrappers (see above)
+- if temp_item is not None:
+- items.append(temp_item)
+-
++ temp_item = fix_item(*item)
++ # Drop slot_wrappers (see above)
++ if temp_item is not None:
++ items.append(temp_item)
+ return items
+
+ autosummary.Autosummary = NewAuto
+--- a/skbio/alignment/_indexing.py
++++ b/skbio/alignment/_indexing.py
+@@ -7,7 +7,7 @@
+ # ----------------------------------------------------------------------------
+
+ from abc import ABCMeta, abstractmethod
+-import collections
++import collections.abc
+
+ import numpy as np
+ import pandas as pd
+@@ -160,7 +160,7 @@
+ complete_key = False
+ partial_key = False
+ duplicated_key = False
+- if not isinstance(indexable, collections.Hashable):
++ if not isinstance(indexable, collections.abc.Hashable):
+ return False
+ if axis == 0 and self._has_fancy_index():
+ try:
+--- a/skbio/alignment/_pairwise.py
++++ b/skbio/alignment/_pairwise.py
+@@ -793,7 +793,7 @@
+ def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
+ shape = (aln2.shape.position+1, aln1.shape.position+1)
+ score_matrix = np.zeros(shape)
+- traceback_matrix = np.zeros(shape, dtype=np.int)
++ traceback_matrix = np.zeros(shape, dtype=int)
+ traceback_matrix += _traceback_encoding['uninitialized']
+ traceback_matrix[0, :] = _traceback_encoding['alignment-end']
+ traceback_matrix[:, 0] = _traceback_encoding['alignment-end']
+@@ -803,7 +803,7 @@
+ def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
+ shape = (aln2.shape.position+1, aln1.shape.position+1)
+ score_matrix = np.zeros(shape)
+- traceback_matrix = np.zeros(shape, dtype=np.int)
++ traceback_matrix = np.zeros(shape, dtype=int)
+ traceback_matrix += _traceback_encoding['uninitialized']
+ traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
+
+@@ -826,7 +826,7 @@
+ aln1, aln2, gap_open_penalty, gap_extend_penalty):
+ shape = (aln2.shape.position+1, aln1.shape.position+1)
+ score_matrix = np.zeros(shape)
+- traceback_matrix = np.zeros(shape, dtype=np.int)
++ traceback_matrix = np.zeros(shape, dtype=int)
+ traceback_matrix += _traceback_encoding['uninitialized']
+ traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
+
+--- a/skbio/alignment/_tabular_msa.py
++++ b/skbio/alignment/_tabular_msa.py
+@@ -781,7 +781,7 @@
+ if minter is not None and index is not None:
+ raise ValueError(
+ "Cannot use both `minter` and `index` at the same time.")
+- self._seqs = pd.Series([])
++ self._seqs = pd.Series([], dtype=object)
+ self.extend(sequences, minter=minter, index=index,
+ reset_index=minter is None and index is None)
+
+@@ -1983,12 +1983,13 @@
+ step=1)
+
+ if len(self):
+- self._seqs = self._seqs.append(pd.Series(sequences, index=index))
++ self._seqs = self._seqs.append(pd.Series(sequences, index=index,
++ dtype=object))
+ else:
+ # Not using Series.append to avoid turning a RangeIndex supplied
+ # via `index` parameter into an Int64Index (this happens in pandas
+ # 0.18.0).
+- self._seqs = pd.Series(sequences, index=index)
++ self._seqs = pd.Series(sequences, index=index, dtype=object)
+
+ # When extending a TabularMSA without sequences, the number of
+ # positions in the TabularMSA may change from zero to non-zero. If
+--- a/skbio/alignment/tests/test_tabular_msa.py
++++ b/skbio/alignment/tests/test_tabular_msa.py
+@@ -6,7 +6,7 @@
+ # The full license is in the file COPYING.txt, distributed with this software.
+ # ----------------------------------------------------------------------------
+
+-import collections
++import collections.abc
+ import copy
+ import unittest
+ import functools
+@@ -1377,6 +1377,8 @@
+ 'd']},
+ index=[('b', 'x', 0)]))
+
++ @unittest.skipIf(tuple(map(int, pd.__version__.split('.'))) < (1, 2, 5),
++ "Old pandas will return empty frame")
+ def test_multiindex_complicated_axis_empty_selection(self):
+ a = RNA("UUAG", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+ b = RNA("UAAG", metadata={1: 0}, positional_metadata={1: [1, 2, 3, 4]})
+@@ -1386,13 +1388,12 @@
+ positional_metadata={'c': ['a', 'b', 'c', 'd']},
+ index=[('a', 'x', 0), ('a', 'x', 1), ('a', 'y', 2),
+ ('b', 'x', 0)])
+-
+- self.assertEqual(self.get(msa, (([False, True, False, True],
+- 'x', 2), Ellipsis)),
+- TabularMSA([], metadata={'x': 'y'},
+- # TODO: Change for #1198
+- positional_metadata=None,
+- index=[]))
++ # Pandas will KeyError when the intersection is empty
++ # change appears to have happened in:
++ # https://github.com/pandas-dev/pandas/pull/42245
++ # but this was not bisected to confirm
++ with self.assertRaises(KeyError):
++ self.get(msa, (([False, True, False, True], 'x', 2), Ellipsis))
+
+ def test_bool_index_scalar_bool_label(self):
+ a = DNA("ACGA", metadata={0: 0}, positional_metadata={0: [1, 2, 3, 4]})
+@@ -3642,7 +3643,7 @@
+
+ class TestHashable(unittest.TestCase):
+ def test_unhashable_type(self):
+- self.assertNotIsInstance(TabularMSA([]), collections.Hashable)
++ self.assertNotIsInstance(TabularMSA([]), collections.abc.Hashable)
+
+ def test_unhashable_object(self):
+ with self.assertRaisesRegex(TypeError, r'unhashable'):
+--- a/skbio/diversity/_util.py
++++ b/skbio/diversity/_util.py
+@@ -6,7 +6,7 @@
+ # The full license is in the file COPYING.txt, distributed with this software.
+ # ----------------------------------------------------------------------------
+
+-import collections
++import collections.abc
+
+ import numpy as np
+ import pandas as pd
+@@ -22,7 +22,10 @@
+
+ """
+ counts = np.asarray(counts)
+- if not np.all(np.isreal(counts)):
++ try:
++ if not np.all(np.isreal(counts)):
++ raise Exception
++ except Exception:
+ raise ValueError("Counts vector must contain real-valued entries.")
+ if counts.ndim != 1:
+ raise ValueError("Only 1-D vectors are supported.")
+@@ -47,7 +50,8 @@
+ return np.asarray(counts)
+ else:
+
+- if len(counts) == 0 or not isinstance(counts[0], collections.Iterable):
++ if len(counts) == 0 or not isinstance(counts[0],
++ collections.abc.Iterable):
+ counts = [counts]
+ counts = np.asarray(counts)
+ if counts.ndim > 2:
+--- a/skbio/io/format/fasta.py
++++ b/skbio/io/format/fasta.py
+@@ -778,7 +778,7 @@
+ fh.write('>%s\n%s\n' % (header, seq_str))
+
+ if qual is not None:
+- qual_str = ' '.join(np.asarray(qual_scores, dtype=np.str))
++ qual_str = ' '.join(np.asarray(qual_scores, dtype=str))
+ if max_width is not None:
+ qual_str = qual_wrapper.fill(qual_str)
+ qual.write('>%s\n%s\n' % (header, qual_str))
+--- a/skbio/io/format/gff3.py
++++ b/skbio/io/format/gff3.py
+@@ -214,7 +214,7 @@
+ # ----------------------------------------------------------------------------
+
+ import re
+-from collections import Iterable
++from collections.abc import Iterable
+
+ from skbio.sequence import DNA, Sequence
+ from skbio.io import create_format, GFF3FormatError
+--- a/skbio/io/format/lsmat.py
++++ b/skbio/io/format/lsmat.py
+@@ -225,7 +225,7 @@
+ for id_, vals in zip(ids, obj.data):
+ fh.write("%s" % id_)
+ fh.write(delimiter)
+- fh.write(delimiter.join(np.asarray(vals, dtype=np.str)))
++ fh.write(delimiter.join(np.asarray(vals, dtype=str)))
+ fh.write('\n')
+
+
+--- a/skbio/io/format/ordination.py
++++ b/skbio/io/format/ordination.py
+@@ -411,7 +411,7 @@
+
+
+ def _format_vector(vector, id_=None):
+- formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
++ formatted_vector = '\t'.join(np.asarray(vector, dtype=str))
+
+ if id_ is None:
+ return "%s\n" % formatted_vector
+--- a/skbio/sequence/_grammared_sequence.py
++++ b/skbio/sequence/_grammared_sequence.py
+@@ -345,7 +345,7 @@
+ "Note: Use `lowercase` if your sequence contains lowercase "
+ "characters not in the sequence's alphabet."
+ % ('s' if len(bad) > 1 else '',
+- [str(b.tostring().decode("ascii")) for b in bad] if
++ [str(b.tobytes().decode("ascii")) for b in bad] if
+ len(bad) > 1 else bad[0],
+ list(self.alphabet)))
+
+--- a/skbio/sequence/_sequence.py
++++ b/skbio/sequence/_sequence.py
+@@ -25,7 +25,7 @@
+
+
+ class Sequence(MetadataMixin, PositionalMetadataMixin, IntervalMetadataMixin,
+- collections.Sequence, SkbioObject):
++ collections.abc.Sequence, SkbioObject):
+ """Store generic sequence data and optional associated metadata.
+
+ ``Sequence`` objects do not enforce an alphabet or grammar and are thus the
+@@ -424,7 +424,7 @@
+
+ @property
+ def _string(self):
+- return self._bytes.tostring()
++ return self._bytes.tobytes()
+
+ @classonlymethod
+ @experimental(as_of="0.4.1")
+@@ -1305,7 +1305,7 @@
+ index = self._munge_to_index_array(lowercase)
+ outbytes = self._bytes.copy()
+ outbytes[index] ^= self._ascii_invert_case_bit_offset
+- return str(outbytes.tostring().decode('ascii'))
++ return str(outbytes.tobytes().decode('ascii'))
+
+ @stable(as_of="0.4.0")
+ def count(self, subsequence, start=None, end=None):
+@@ -1812,7 +1812,7 @@
+ # Downcast from int64 to uint8 then convert to str. This is safe
+ # because we are guaranteed to have indices in the range 0 to 255
+ # inclusive.
+- chars = indices.astype(np.uint8).tostring().decode('ascii')
++ chars = indices.astype(np.uint8).tobytes().decode('ascii')
+
+ obs_counts = freqs[indices]
+ if relative:
+@@ -2102,7 +2102,7 @@
+ """
+ if isinstance(sliceable, str):
+ if sliceable in self.positional_metadata:
+- if self.positional_metadata[sliceable].dtype == np.bool:
++ if self.positional_metadata[sliceable].dtype == bool:
+ sliceable = self.positional_metadata[sliceable]
+ else:
+ raise TypeError("Column '%s' in positional metadata does "
+@@ -2121,7 +2121,7 @@
+ if isinstance(s, (bool, np.bool_)):
+ bool_mode = True
+ elif isinstance(s, (slice, int, np.signedinteger)) or (
+- hasattr(s, 'dtype') and s.dtype != np.bool):
++ hasattr(s, 'dtype') and s.dtype != bool):
+ int_mode = True
+ else:
+ raise TypeError("Invalid type in iterable: %s, must be one"
+@@ -2132,7 +2132,7 @@
+ " int.")
+ sliceable = np.r_[sliceable]
+
+- if sliceable.dtype == np.bool:
++ if sliceable.dtype == bool:
+ if sliceable.size != len(self):
+ raise ValueError("Boolean array (%d) does not match length of"
+ " sequence (%d)."
+--- a/skbio/sequence/tests/test_sequence.py
++++ b/skbio/sequence/tests/test_sequence.py
+@@ -11,7 +11,7 @@
+ import itertools
+ import re
+ from types import GeneratorType
+-from collections import Hashable
++from collections.abc import Hashable
+ from unittest import TestCase, main
+
+ import numpy as np
+--- a/skbio/stats/distance/_base.py
++++ b/skbio/stats/distance/_base.py
+@@ -547,8 +547,8 @@
+ subset = self._data[i_idx, j_indices]
+ values.append(subset)
+
+- i = pd.Series(i, name='i')
+- j = pd.Series(j, name='j')
++ i = pd.Series(i, name='i', dtype=str)
++ j = pd.Series(j, name='j', dtype=str)
+ values = pd.Series(np.hstack(values), name='value')
+
+ return pd.concat([i, j, values], axis=1)
+--- a/skbio/stats/distance/_mantel.py
++++ b/skbio/stats/distance/_mantel.py
+@@ -282,7 +282,7 @@
+ else:
+ perm_gen = (corr_func(x.permute(condensed=True), y_flat)[0]
+ for _ in range(permutations))
+- permuted_stats = np.fromiter(perm_gen, np.float, count=permutations)
++ permuted_stats = np.fromiter(perm_gen, float, count=permutations)
+
+ if alternative == 'two-sided':
+ count_better = (np.absolute(permuted_stats) >=
+--- a/skbio/stats/distance/_permdisp.py
++++ b/skbio/stats/distance/_permdisp.py
+@@ -139,8 +139,8 @@
+ test statistic name F-value
+ sample size 6
+ number of groups 2
+- test statistic 1.03296
+- p-value 0.35
++ test statistic ... 1.03...
++ p-value ...
+ number of permutations 99
+ Name: PERMDISP results, dtype: object
+
+@@ -155,7 +155,7 @@
+ test statistic name F-value
+ sample size 6
+ number of groups 2
+- test statistic 1.03296
++ test statistic ... 1.03...
+ p-value NaN
+ number of permutations 0
+ Name: PERMDISP results, dtype: object
+@@ -175,8 +175,8 @@
+ test statistic name F-value
+ sample size 6
+ number of groups 2
+- test statistic 3.67082
+- p-value 0.428571
++ test statistic ... 3.67...
++ p-value ... 0.42...
+ number of permutations 6
+ Name: PERMDISP results, dtype: object
+
+@@ -194,8 +194,8 @@
+ test statistic name F-value
+ sample size 6
+ number of groups 2
+- test statistic 3.67082
+- p-value 0.428571
++ test statistic ... 3.67...
++ p-value ... 0.42...
+ number of permutations 6
+ Name: PERMDISP results, dtype: object
+
+@@ -248,7 +248,8 @@
+ centroids = samples.groupby('grouping').apply(_config_med)
+
+ for label, df in samples.groupby('grouping'):
+- groups.append(cdist(df.values[:, :-1], [centroids.loc[label].values],
++ groups.append(cdist(df.values[:, :-1].astype('float64'),
++ [centroids.loc[label].values],
+ metric='euclidean'))
+
+ stat, _ = f_oneway(*groups)
+--- a/skbio/stats/distance/tests/test_base.py
++++ b/skbio/stats/distance/tests/test_base.py
+@@ -935,7 +935,7 @@
+ def test_to_series_1x1(self):
+ series = self.dm_1x1.to_series()
+
+- exp = pd.Series([], index=[])
++ exp = pd.Series([], index=[], dtype='float64')
+ assert_series_almost_equal(series, exp)
+
+ def test_to_series_2x2(self):
+--- a/skbio/stats/distance/tests/test_bioenv.py
++++ b/skbio/stats/distance/tests/test_bioenv.py
+@@ -146,7 +146,7 @@
+ # same distances yields *very* similar results. Thus, the discrepancy
+ # seems to stem from differences when computing ranks/ties.
+ obs = bioenv(self.dm_vegan, self.df_vegan)
+- assert_data_frame_almost_equal(obs, self.exp_results_vegan)
++ assert_data_frame_almost_equal(obs, self.exp_results_vegan, rtol=1e-3)
+
+ def test_bioenv_no_distance_matrix(self):
+ with self.assertRaises(TypeError):
+--- a/skbio/stats/distance/tests/test_permanova.py
++++ b/skbio/stats/distance/tests/test_permanova.py
+@@ -12,7 +12,7 @@
+
+ import numpy as np
+ import pandas as pd
+-from pandas.util.testing import assert_series_equal
++from pandas.testing import assert_series_equal
+
+ from skbio import DistanceMatrix
+ from skbio.stats.distance import permanova
+--- a/skbio/stats/distance/tests/test_permdisp.py
++++ b/skbio/stats/distance/tests/test_permdisp.py
+@@ -12,7 +12,7 @@
+ import numpy as np
+ import numpy.testing as npt
+ import pandas as pd
+-from pandas.util.testing import assert_series_equal
++from pandas.testing import assert_series_equal
+ from scipy.stats import f_oneway
+ import hdmedians as hd
+
+--- a/skbio/stats/gradient.py
++++ b/skbio/stats/gradient.py
+@@ -168,7 +168,7 @@
+ np.abs((w_vector[i] - w_vector[i-1]))
+ )
+
+- return trajectories
++ return trajectories.astype('float64')
+
+
+ def _ANOVA_trajectories(category, res_by_group):
+--- a/skbio/stats/ordination/_utils.py
++++ b/skbio/stats/ordination/_utils.py
+@@ -237,7 +237,7 @@
+ distance_matrix : 2D array_like
+ Distance matrix.
+ """
+- distance_matrix = distance_matrix.astype(np.float)
++ distance_matrix = distance_matrix.astype(float)
+
+ for i in np.arange(len(distance_matrix)):
+ distance_matrix[i] = (distance_matrix[i] * distance_matrix[i]) / -2
+@@ -259,7 +259,7 @@
+ e_matrix : 2D array_like
+ A matrix representing the "E matrix" as described above.
+ """
+- e_matrix = e_matrix.astype(np.float)
++ e_matrix = e_matrix.astype(float)
+
+ row_means = np.zeros(len(e_matrix), dtype=float)
+ col_means = np.zeros(len(e_matrix), dtype=float)
+--- a/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
++++ b/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
+@@ -15,7 +15,8 @@
+ from skbio import DistanceMatrix, OrdinationResults
+ from skbio.stats.distance import DissimilarityMatrixError
+ from skbio.stats.ordination import pcoa, pcoa_biplot
+-from skbio.util import get_data_path, assert_ordination_results_equal
++from skbio.util import (get_data_path, assert_ordination_results_equal,
++ assert_data_frame_almost_equal)
+
+
+ class TestPCoA(TestCase):
+@@ -262,7 +263,7 @@
+ full = pcoa_biplot(self.ordination, self.descriptors).features
+
+ # the biplot should be identical regardless of the number of axes used
+- pd.util.testing.assert_almost_equal(subset, full.iloc[:, :2])
++ assert_data_frame_almost_equal(subset, full.iloc[:, :2])
+
+ def test_mismatching_samples(self):
+ new_index = self.descriptors.index.tolist()
+--- a/skbio/stats/tests/test_composition.py
++++ b/skbio/stats/tests/test_composition.py
+@@ -10,7 +10,7 @@
+ from unittest import TestCase, main
+ import numpy as np
+ import numpy.testing as npt
+-import pandas.util.testing as pdt
++import pandas.testing as pdt
+ from numpy.random import normal
+ import pandas as pd
+ import scipy
+@@ -256,7 +256,8 @@
+
+ def test_clr_inv(self):
+ npt.assert_allclose(clr_inv(self.rdata1), self.ortho1)
+- npt.assert_allclose(clr(clr_inv(self.rdata1)), self.rdata1)
++ npt.assert_allclose(clr(clr_inv(self.rdata1)), self.rdata1,
++ rtol=1e-4, atol=1e-5)
+
+ # make sure that inplace modification is not occurring
+ clr_inv(self.rdata1)
+@@ -521,7 +522,7 @@
+ normal(10, 1, L),
+ normal(10, 1, L)))
+ self.table2 = np.absolute(self.table2)
+- self.table2 = pd.DataFrame(self.table2.astype(np.int).T)
++ self.table2 = pd.DataFrame(self.table2.astype(int).T)
+ self.cats2 = pd.Series([0]*D + [1]*D)
+
+ # Real valued data with 2 groupings and no significant difference
+@@ -554,7 +555,7 @@
+ normal(10, 1, L),
+ normal(10, 1, L)))
+ self.table4 = np.absolute(self.table4)
+- self.table4 = pd.DataFrame(self.table4.astype(np.int).T)
++ self.table4 = pd.DataFrame(self.table4.astype(int).T)
+ self.cats4 = pd.Series([0]*D + [1]*D + [2]*D)
+
+ # Noncontiguous case
+@@ -635,7 +636,7 @@
+ normal(10, 10, L),
+ normal(10, 10, L)))
+ self.table9 = np.absolute(self.table9)+1
+- self.table9 = pd.DataFrame(self.table9.astype(np.int).T)
++ self.table9 = pd.DataFrame(self.table9.astype(int).T)
+ self.cats9 = pd.Series([0]*D + [1]*D + [2]*D)
+
+ # Real valued data with 2 groupings
+@@ -669,7 +670,7 @@
+ normal(10, 10, L),
+ normal(10, 10, L)))
+ self.table10 = np.absolute(self.table10) + 1
+- self.table10 = pd.DataFrame(self.table10.astype(np.int).T)
++ self.table10 = pd.DataFrame(self.table10.astype(int).T)
+ self.cats10 = pd.Series([0]*D + [1]*D)
+
+ # zero count
+--- a/skbio/stats/tests/test_gradient.py
++++ b/skbio/stats/tests/test_gradient.py
+@@ -13,7 +13,7 @@
+ import numpy as np
+ import pandas as pd
+ import numpy.testing as npt
+-import pandas.util.testing as pdt
++import pandas.testing as pdt
+
+ from skbio.util import get_data_path, assert_data_frame_almost_equal
+ from skbio.stats.gradient import (GradientANOVA, AverageGradientANOVA,
+@@ -237,7 +237,7 @@
+ 's6': np.array([2.1795918367]),
+ 's7': np.array([17.8]),
+ 's8': np.array([20.3428571428])},
+- orient='index')
++ orient='index').astype(np.float64)
+ obs = _weight_by_vector(trajectory, w_vector)
+ assert_data_frame_almost_equal(obs.sort_index(), exp.sort_index())
+
+@@ -254,16 +254,16 @@
+ w_vector = pd.Series(np.array([1, 2, 3, 4, 5, 6, 7, 8]),
+ ['s1', 's2', 's3', 's4',
+ 's5', 's6', 's7', 's8']).astype(np.float64)
+- exp = pd.DataFrame.from_dict({'s1': np.array([1.0]),
+- 's2': np.array([2.0]),
+- 's3': np.array([3.0]),
+- 's4': np.array([4.0]),
+- 's5': np.array([5.0]),
+- 's6': np.array([6.0]),
+- 's7': np.array([7.0]),
+- 's8': np.array([8.0])
++ exp = pd.DataFrame.from_dict({'s1': np.array([1]),
++ 's2': np.array([2]),
++ 's3': np.array([3]),
++ 's4': np.array([4]),
++ 's5': np.array([5]),
++ 's6': np.array([6]),
++ 's7': np.array([7]),
++ 's8': np.array([8])
+ },
+- orient='index')
++ orient='index').astype(np.float64)
+ obs = _weight_by_vector(trajectory, w_vector)
+ assert_data_frame_almost_equal(obs.sort_index(), exp.sort_index())
+
+@@ -276,11 +276,12 @@
+ trajectory.sort_values(by=0, inplace=True)
+ w_vector = pd.Series(np.array([25, 30, 35, 40, 45]),
+ ['s2', 's3', 's4', 's5', 's6']).astype(np.float64)
+- exp = pd.DataFrame.from_dict({'s2': np.array([2.0]),
+- 's3': np.array([3.0]),
+- 's4': np.array([4.0]),
+- 's5': np.array([5.0]),
+- 's6': np.array([6.0])}, orient='index')
++ exp = pd.DataFrame.from_dict({'s2': np.array([2]),
++ 's3': np.array([3]),
++ 's4': np.array([4]),
++ 's5': np.array([5]),
++ 's6': np.array([6])},
++ orient='index').astype(np.float64)
+ obs = _weight_by_vector(trajectory, w_vector)
+ assert_data_frame_almost_equal(obs.sort_index(), exp.sort_index())
+
+--- a/skbio/tree/_tree.py
++++ b/skbio/tree/_tree.py
+@@ -2186,7 +2186,7 @@
+ (lo, hi, end) = (mids[0], mids[-1], len(result))
+ prefixes = [PAD] * (lo + 1) + [PA + '|'] * \
+ (hi - lo - 1) + [PAD] * (end - hi)
+- mid = np.int(np.trunc((lo + hi) / 2))
++ mid = int(np.trunc((lo + hi) / 2))
+ prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1]
+ result = [p + l for (p, l) in zip(prefixes, result)]
+ if show_internal:
+--- a/skbio/tree/tests/test_nj.py
++++ b/skbio/tree/tests/test_nj.py
+@@ -80,8 +80,9 @@
+ self.expected1_str)
+ # what is the correct way to compare TreeNode objects for equality?
+ actual_TreeNode = nj(self.dm1)
++ # precision error on ARM: 1.6653345369377348e-16 != 0.0
+ self.assertAlmostEqual(actual_TreeNode.compare_tip_distances(
+- self.expected1_TreeNode), 0.0)
++ self.expected1_TreeNode), 0.0, places=10)
+
+ def test_nj_dm2(self):
+ actual_TreeNode = nj(self.dm2)
+--- a/skbio/util/_exception.py
++++ b/skbio/util/_exception.py
+@@ -9,7 +9,7 @@
+
+ class TestingUtilError(Exception):
+ """Raised when an exception is needed to test exception handling."""
+- pass
++ __test__ = False # prevent py-test from collecting it
+
+
+ class OverrideError(AssertionError):
+--- a/skbio/util/_testing.py
++++ b/skbio/util/_testing.py
+@@ -12,7 +12,7 @@
+
+ import numpy as np
+ import numpy.testing as npt
+-import pandas.util.testing as pdt
++import pandas.testing as pdt
+ from scipy.spatial.distance import pdist
+ from ._decorator import experimental
+
+@@ -265,7 +265,7 @@
+
+
+ @experimental(as_of="0.4.0")
+-def assert_data_frame_almost_equal(left, right):
++def assert_data_frame_almost_equal(left, right, rtol=1e-5):
+ """Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
+
+ Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values
+@@ -306,10 +306,10 @@
+ check_index_type=True,
+ check_column_type=True,
+ check_frame_type=True,
+- check_less_precise=False,
+ check_names=True,
+ by_blocks=False,
+- check_exact=False)
++ check_exact=False,
++ rtol=rtol)
+ # this check ensures that empty DataFrames with different indices do not
+ # compare equal. exact=True specifies that the type of the indices must be
+ # exactly the same
+@@ -323,7 +323,6 @@
+ check_dtype=True,
+ check_index_type=True,
+ check_series_type=True,
+- check_less_precise=False,
+ check_names=True,
+ check_exact=False,
+ check_datetimelike_compat=False,
+--- a/skbio/util/tests/test_testing.py
++++ b/skbio/util/tests/test_testing.py
+@@ -228,7 +228,8 @@
+
+ def setUp(self):
+ self.series = [
+- pd.Series(),
++ pd.Series(dtype='float64'),
++ pd.Series(dtype=object),
+ pd.Series(dtype='int64'),
+ pd.Series([1, 2, 3]),
+ pd.Series([3, 2, 1]),
+--- a/skbio/workflow.py
++++ b/skbio/workflow.py
+@@ -204,7 +204,7 @@
+ from copy import deepcopy
+ from time import time
+ from functools import update_wrapper
+-from collections import Iterable
++from collections.abc import Iterable
+ from types import MethodType
+
+ from skbio.util._decorator import experimental
=====================================
debian/patches/series
=====================================
@@ -7,4 +7,4 @@ pandas1.1-ordination-fix.patch
pandas1.1-valueerror.patch
local_inventory
sphinx_add_javascript.patch
-skip_tests_failung_due_to_scipy.patch
+fix-dtype-precision-warnings.patch
=====================================
debian/patches/skip_tests_failung_due_to_scipy.patch deleted
=====================================
@@ -1,69 +0,0 @@
-Author: Andreas Tille <tille at debian.org>
-Last-Update: Wed, 13 Oct 2021 08:44:29 +0200
-Bug-Debian: https://bugs.debian.org/992676
-Description: For the moment the 4 tests that are broken due to bug #992676 are excluded
-
---- a/skbio/stats/distance/tests/test_permdisp.py
-+++ b/skbio/stats/distance/tests/test_permdisp.py
-@@ -110,61 +110,6 @@ class testPERMDISP(TestCase):
- check_index_type=True,
- check_series_type=True)
-
-- def test_centroids_eq_groups(self):
-- exp = [[1.2886811963240687, 1.890538910062923, 1.490527658097728],
-- [2.17349240061718, 2.3192679626679946, 2.028338553903792]]
-- exp_stat, _ = f_oneway(*exp)
--
-- dm = pcoa(self.eq_mat)
-- dm = dm.samples
--
-- obs = _compute_groups(dm, 'centroid', self.grouping_eq)
-- self.assertAlmostEqual(obs, exp_stat, places=6)
--
-- obs_relab = _compute_groups(dm, 'centroid', self.grouping_eq_relab)
-- self.assertAlmostEqual(obs_relab, obs, places=6)
--
-- def test_centroids_uneq_groups(self):
-- """
-- the expected result here was calculated by hand
-- """
-- exp = [[2.5847022428144935, 2.285624595858895,
-- 1.7022431146340287],
-- [1.724817266046108, 1.724817266046108],
-- [2.4333280644972795, 2.389000390879655,
-- 2.8547180589306036, 3.218568759338847]]
-- exp_stat, _ = f_oneway(*exp)
--
-- dm = pcoa(self.uneq_mat)
-- dm = dm.samples
--
-- obs = _compute_groups(dm, 'centroid', self.grouping_uneq)
-- self.assertAlmostEqual(obs, exp_stat, places=6)
--
-- obs_relab = _compute_groups(dm, 'centroid', self.grouping_uneq_relab)
-- self.assertAlmostEqual(obs, obs_relab, places=6)
--
-- def test_centroids_mixedgroups(self):
-- exp = [[2.5847022428144935, 2.285624595858895,
-- 1.7022431146340287],
-- [1.724817266046108, 1.724817266046108],
-- [2.4333280644972795, 2.389000390879655,
-- 2.8547180589306036, 3.218568759338847]]
-- dm = pcoa(self.uneq_mat)
-- dm = dm.samples
--
-- exp_stat, _ = f_oneway(*exp)
--
-- obs_mixed = _compute_groups(dm, 'centroid', self.grouping_un_mixed)
-- self.assertAlmostEqual(exp_stat, obs_mixed, places=6)
--
-- def test_centroids_null(self):
-- dm = pcoa(self.null_mat)
-- dm = dm.samples
--
-- obs_null = _compute_groups(dm, 'centroid', self.grouping_eq)
-- np.isnan(obs_null)
--
- def test_centroid_normal(self):
- exp = pd.Series(index=self.exp_index,
- data=['PERMDISP', 'F-value', 9, 2, 0.244501519876,
View it on GitLab: https://salsa.debian.org/med-team/python-skbio/-/compare/2616e52a5a2d51b96c29e1bed0d524dea8adeee2...78913d16c1e591dd39f7bbf294861239d205a54c
--
View it on GitLab: https://salsa.debian.org/med-team/python-skbio/-/compare/2616e52a5a2d51b96c29e1bed0d524dea8adeee2...78913d16c1e591dd39f7bbf294861239d205a54c
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20211203/1a6e3624/attachment-0001.htm>
More information about the debian-med-commit
mailing list