[med-svn] [python-cobra] 04/06: Work around #846045 (in pytest-benchmark) to be able to run tests

Afif Elghraoui afif at moszumanska.debian.org
Tue Nov 29 11:47:39 UTC 2016


This is an automated email from the git hooks/post-receive script.

afif pushed a commit to branch master
in repository python-cobra.

commit 0a10ae201df1065780a90b4360ced2f279be276d
Author: Afif Elghraoui <afif at debian.org>
Date:   Tue Nov 29 03:10:22 2016 -0800

    Work around #846045 (in pytest-benchmark) to be able to run tests
    
    See the quilt patch header for more information.
---
 debian/control                                |   2 -
 debian/patches/exclude-pytest-benchmark.patch | 248 ++++++++++++++++++++++++++
 debian/patches/series                         |   1 +
 debian/rules                                  |   1 -
 debian/tests/control                          |   2 -
 5 files changed, 249 insertions(+), 5 deletions(-)

diff --git a/debian/control b/debian/control
index a27aef3..9d74589 100644
--- a/debian/control
+++ b/debian/control
@@ -21,9 +21,7 @@ Build-Depends:
 	cython3,
 # Test-Depends:
 	python-pytest,
-	python-pytest-benchmark,
 	python3-pytest,
-	python3-pytest-benchmark,
 	python-jsonschema (>> 2.5.0),
 	python-numpy,
 	python-scipy,
diff --git a/debian/patches/exclude-pytest-benchmark.patch b/debian/patches/exclude-pytest-benchmark.patch
new file mode 100644
index 0000000..da5c12e
--- /dev/null
+++ b/debian/patches/exclude-pytest-benchmark.patch
@@ -0,0 +1,248 @@
+Description: Remove benchmark functions from test suite
+ We would not have run the benchmarks during the build and CI.
+ However, we would still need python-pytest-benchmark in order to specify
+ skipping the benchmarks. Because of #846045, we can't do that successfully,
+ so I resort to just removing the code that would be skipped.
+Author: Afif Elghraoui <afif at debian.org>
+Forwarded: not-needed
+--- python-cobra.orig/cobra/test/test_model.py
++++ python-cobra/cobra/test/test_model.py
+@@ -61,22 +61,6 @@
+         fake_gene.name = "foo_gene"
+         assert reaction.gene_name_reaction_rule == fake_gene.name
+ 
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_add_metabolite_benchmark(self, model, benchmark, solver):
+-        reaction = model.reactions.get_by_id("PGI")
+-        many_metabolites = dict((m, 1) for m in model.metabolites[0:50])
+-
+-        def add_remove_metabolite():
+-            reaction.add_metabolites(many_metabolites)
+-            if not getattr(model, 'solver', None):
+-                solver_dict[solver].create_problem(model)
+-            for m, c in many_metabolites.items():
+-                try:
+-                    reaction.pop(m.id)
+-                except KeyError:
+-                    pass
+-        benchmark(add_remove_metabolite)
+-
+     def test_add_metabolite(self, model):
+         reaction = model.reactions.get_by_id("PGI")
+         reaction.add_metabolites({model.metabolites[0]: 1})
+@@ -103,10 +87,6 @@
+         assert len(reaction._metabolites) == 1
+ 
+     @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_subtract_metabolite_benchmark(self, model, benchmark, solver):
+-        benchmark(self.test_subtract_metabolite, model, solver)
+-
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+     def test_subtract_metabolite(self, model, solver):
+         reaction = model.reactions.get_by_id("PGI")
+         reaction.subtract_metabolites(reaction.metabolites)
+@@ -229,25 +209,6 @@
+ class TestCobraModel:
+     """test core cobra functions"""
+ 
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_add_remove_reaction_benchmark(self, model, benchmark, solver):
+-        metabolite_foo = Metabolite("test_foo")
+-        metabolite_bar = Metabolite("test_bar")
+-        metabolite_baz = Metabolite("test_baz")
+-        actual_metabolite = model.metabolites[0]
+-        dummy_reaction = Reaction("test_foo_reaction")
+-        dummy_reaction.add_metabolites({metabolite_foo: -1,
+-                                        metabolite_bar: 1,
+-                                        metabolite_baz: -2,
+-                                        actual_metabolite: 1})
+-
+-        def benchmark_add_reaction():
+-            model.add_reaction(dummy_reaction)
+-            if not getattr(model, 'solver', None):
+-                solver_dict[solver].create_problem(model)
+-            model.remove_reactions([dummy_reaction], delete=False)
+-        benchmark(benchmark_add_reaction)
+-
+     def test_add_reaction(self, model):
+         old_reaction_count = len(model.reactions)
+         old_metabolite_count = len(model.metabolites)
+@@ -397,22 +358,6 @@
+         for reaction in gene_reactions:
+             assert target_gene not in reaction.genes
+ 
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_copy_benchmark(self, model, solver, benchmark):
+-        def _():
+-            model.copy()
+-            if not getattr(model, 'solver', None):
+-                solver_dict[solver].create_problem(model)
+-        benchmark(_)
+-
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_copy_benchmark_large_model(self, large_model, solver, benchmark):
+-        def _():
+-            large_model.copy()
+-            if not getattr(large_model, 'solver', None):
+-                solver_dict[solver].create_problem(large_model)
+-        benchmark(_)
+-
+     def test_copy(self, model):
+         """modifying copy should not modify the original"""
+         # test that deleting reactions in the copy does not change the
+@@ -427,9 +372,6 @@
+         assert old_reaction_count == len(model.reactions)
+         assert len(model.reactions) != len(model_copy.reactions)
+ 
+-    def test_deepcopy_benchmark(self, model, benchmark):
+-        benchmark(deepcopy, model)
+-
+     def test_deepcopy(self, model):
+         """Reference structures are maintained when deepcopying"""
+         model_copy = deepcopy(model)
+@@ -465,16 +407,6 @@
+         # 'check not dangling metabolites when running Model.add_reactions
+         assert len(orphan_metabolites) == 0
+ 
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_change_objective_benchmark(self, model, benchmark, solver):
+-        atpm = model.reactions.get_by_id("ATPM")
+-
+-        def benchmark_change_objective():
+-            model.objective = atpm.id
+-            if not getattr(model, 'solver', None):
+-                solver_dict[solver].create_problem(model)
+-        benchmark(benchmark_change_objective)
+-
+     def test_change_objective(self, model):
+         biomass = model.reactions.get_by_id("Biomass_Ecoli_core")
+         atpm = model.reactions.get_by_id("ATPM")
+--- python-cobra.orig/cobra/test/test_flux_analysis.py
++++ python-cobra/cobra/test/test_flux_analysis.py
+@@ -44,10 +44,6 @@
+     """Test the simulation functions in cobra.flux_analysis"""
+ 
+     @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_pfba_benchmark(self, large_model, benchmark, solver):
+-        benchmark(optimize_minimal_flux, large_model, solver=solver)
+-
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+     def test_pfba(self, model, solver):
+         optimize_minimal_flux(model, solver=solver)
+         abs_x = [abs(i) for i in model.solution.x]
+@@ -96,13 +92,6 @@
+             optimize_minimal_flux(model, solver=solver)
+         model.reactions.ATPM.lower_bound = atpm
+ 
+-    def test_single_gene_deletion_fba_benchmark(self, large_model, benchmark):
+-        genes = ['b0511', 'b2521', 'b0651', 'b2502', 'b3132', 'b1486', 'b3384',
+-                 'b4321', 'b3428', 'b2789', 'b0052', 'b0115',
+-                 'b2167', 'b0759', 'b3389', 'b4031', 'b3916', 'b2374', 'b0677',
+-                 'b2202']
+-        benchmark(single_gene_deletion, large_model, gene_list=genes)
+-
+     def test_single_gene_deletion_fba(self, model):
+         # expected knockouts for textbook model
+         growth_dict = {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
+@@ -114,15 +103,6 @@
+             assert statuses[gene] == 'optimal'
+             assert abs(rates[gene] - expected_value) < 0.01
+ 
+-    def test_single_gene_deletion_moma_benchmark(self, large_model, benchmark):
+-        try:
+-            get_solver_name(qp=True)
+-        except SolverNotFound:
+-            pytest.skip("no qp support")
+-        genes = ['b1764', 'b0463', 'b1779', 'b0417']
+-        benchmark(single_gene_deletion, large_model, gene_list=genes,
+-                  method="moma")
+-
+     def test_single_gene_deletion_moma(self, model):
+         try:
+             get_solver_name(qp=True)
+@@ -140,11 +120,6 @@
+             assert statuses[gene] == 'optimal'
+             assert abs(rates[gene] - expected_value) < 0.01
+ 
+-    def test_single_gene_deletion_benchmark(self, large_model, benchmark):
+-        reactions = ['CDPMEK', 'PRATPP', 'HISTD', 'PPCDC']
+-        benchmark(single_reaction_deletion, large_model,
+-                  reaction_list=reactions)
+-
+     def test_single_reaction_deletion(self, model):
+         expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
+                             'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
+@@ -169,12 +144,6 @@
+                 assert abs(matrix1[i][j] - matrix2[i][j]) < 10 ** -places
+ 
+     @pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
+-    def test_double_gene_deletion_benchmark(self, large_model, benchmark):
+-        genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276",
+-                 "b1241"]
+-        benchmark(double_gene_deletion, large_model, gene_list1=genes)
+-
+-    @pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
+     def test_double_gene_deletion(self, model):
+         genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276",
+                  "b1241"]
+@@ -216,11 +185,6 @@
+         self.compare_matrices(growth_list, solution["data"])
+ 
+     @pytest.mark.parametrize("solver", list(solver_dict))
+-    def test_flux_variability_benchmark(self, large_model, benchmark, solver):
+-        benchmark(flux_variability_analysis, large_model, solver=solver,
+-                  reaction_list=large_model.reactions[1::3])
+-
+-    @pytest.mark.parametrize("solver", list(solver_dict))
+     def test_flux_variability(self, model, fva_results, solver):
+         if solver == "esolver":
+             pytest.skip("esolver too slow...")
+@@ -271,10 +235,6 @@
+         test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
+         return test_model
+ 
+-    def test_loopless_benchmark(self, benchmark):
+-        test_model = self.construct_ll_test_model()
+-        benchmark(lambda: construct_loopless_model(test_model).optimize())
+-
+     def test_loopless(self):
+         try:
+             get_solver_name(mip=True)
+@@ -330,12 +290,6 @@
+         assert {i[0].id for i in result} == {"SMILEY_EX_b", "SMILEY_EX_c"}
+ 
+     @pytest.mark.skipif(numpy is None, reason="phase plane require numpy")
+-    def test_phenotype_phase_plane_benchmark(self, model, benchmark):
+-        benchmark(calculate_phenotype_phase_plane,
+-                  model, "EX_glc__D_e", "EX_o2_e",
+-                  reaction1_npoints=20, reaction2_npoints=20)
+-
+-    @pytest.mark.skipif(numpy is None, reason="phase plane require numpy")
+     def test_phenotype_phase_plane(self, model):
+         data = calculate_phenotype_phase_plane(
+             model, "EX_glc__D_e", "EX_o2_e",
+--- python-cobra.orig/cobra/test/test_io.py
++++ python-cobra/cobra/test/test_io.py
+@@ -179,14 +179,6 @@
+         self.extra_comparisons(name, test_model, reread_model)
+ 
+ 
+-def test_benchmark_read(data_directory, benchmark):
+-    benchmark(io.sbml3.read_sbml_model, join(data_directory, 'mini_fbc2.xml'))
+-
+-
+-def test_benchmark_write(model, benchmark):
+-    benchmark(io.sbml3.write_sbml_model, model, join(gettempdir(), "-bench"))
+-
+-
+ @pytest.mark.parametrize("trial", trials)
+ def test_validate(trial, data_directory):
+     if trial.validation_function is None:
+--- python-cobra.orig/cobra/test/__init__.py
++++ python-cobra/cobra/test/__init__.py
+@@ -39,4 +39,4 @@
+     """ alias for running all unit-tests on installed cobra
+     """
+     return pytest.main(
+-        ['--pyargs', 'cobra', '--benchmark-skip', '-v', '-rs']) == 0
++        ['--pyargs', 'cobra', '-v', '-rs']) == 0
diff --git a/debian/patches/series b/debian/patches/series
index d86c913..0987c97 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1 +1,2 @@
 mathjax.patch
+exclude-pytest-benchmark.patch
diff --git a/debian/rules b/debian/rules
index ca00316..c7d3397 100755
--- a/debian/rules
+++ b/debian/rules
@@ -4,7 +4,6 @@
 #include /usr/share/dpkg/default.mk
 export LC_ALL=C.UTF-8
 export PYBUILD_NAME=cobra
-export PYBUILD_TEST_ARGS=--benchmark-skip
 
 
 %:
diff --git a/debian/tests/control b/debian/tests/control
index eb2cc83..b321b9d 100644
--- a/debian/tests/control
+++ b/debian/tests/control
@@ -5,7 +5,6 @@ Depends:
 	make,
 	python-cobra,
 	python-pytest,
-	python-pytest-benchmark,
 	python-jsonschema,
 # We could have avoided retyping these by adding the "needs-recommends" restriction, but doing that would also cause matplotlib to be pulled in.
 # The tests involving matplotlib fail on debci from trying to do graphical operations on a headless server.
@@ -22,7 +21,6 @@ Depends:
 	make,
 	python3-cobra,
 	python3-pytest,
-	python3-pytest-benchmark,
 	python3-jsonschema,
 # See the comment for the corresponding section in the python2 test above.
 	python3-scipy,

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-cobra.git



More information about the debian-med-commit mailing list