[med-svn] [Git][med-team/python-cobra][master] 10 commits: python3-depinfo is available now

Andreas Tille gitlab at salsa.debian.org
Wed Oct 17 20:37:59 BST 2018


Andreas Tille pushed to branch master at Debian Med / python-cobra


Commits:
ef466393 by Andreas Tille at 2018-10-17T18:16:04Z
python3-depinfo is available now

- - - - -
13239c5c by Andreas Tille at 2018-10-17T18:18:22Z
New upstream version 0.13.4
- - - - -
c5f3728f by Andreas Tille at 2018-10-17T18:18:29Z
Update upstream source from tag 'upstream/0.13.4'

Update to upstream version '0.13.4'
with Debian dir 4489dd011abc622ebcec18d930e78fb34f86badc
- - - - -
7e3c90aa by Andreas Tille at 2018-10-17T18:18:29Z
New upstream version

- - - - -
3aad7e9a by Andreas Tille at 2018-10-17T18:22:19Z
cme fix dpkg-control

- - - - -
f18abfe1 by Andreas Tille at 2018-10-17T18:22:35Z
Adapt patches

- - - - -
2aa17ff4 by Andreas Tille at 2018-10-17T18:45:28Z
Silence lintian about wrong Python version of data file

- - - - -
0e8f2bd4 by Andreas Tille at 2018-10-17T18:45:59Z
Remove unneeded patch

- - - - -
24bf1109 by Andreas Tille at 2018-10-17T19:20:55Z
Drop autopkgtest for Python2 version

- - - - -
11d80fce by Andreas Tille at 2018-10-17T19:32:00Z
Upload to unstable

- - - - -


30 changed files:

- .travis.yml
- cobra/__init__.py
- cobra/io/mat.py
- cobra/io/yaml.py
- − cobra/test/test_flux_analysis.py
- + cobra/test/test_flux_analysis/conftest.py
- + cobra/test/test_flux_analysis/test_deletion.py
- + cobra/test/test_flux_analysis/test_gapfilling.py
- + cobra/test/test_flux_analysis/test_geometric.py
- + cobra/test/test_flux_analysis/test_loopless.py
- + cobra/test/test_flux_analysis/test_moma.py
- + cobra/test/test_flux_analysis/test_parsimonious.py
- + cobra/test/test_flux_analysis/test_phenotype_phase_plane.py
- + cobra/test/test_flux_analysis/test_reaction.py
- + cobra/test/test_flux_analysis/test_room.py
- + cobra/test/test_flux_analysis/test_sampling.py
- + cobra/test/test_flux_analysis/test_summary.py
- + cobra/test/test_flux_analysis/test_variability.py
- debian/changelog
- debian/control
- − debian/patches/drop_failing_test.patch
- debian/patches/series
- − debian/patches/testsuite-check-jsonschema.patch
- + debian/python3-cobra.lintian-overrides
- debian/tests/Makefile
- debian/tests/control.autodep8
- + release-notes/0.13.4.md
- setup.cfg
- setup.py
- tox.ini


Changes:

=====================================
.travis.yml
=====================================
@@ -34,6 +34,9 @@ matrix:
     - os: linux
       python: 3.6
       env: TOXENV=py36
+    - os: linux
+      python: 3.7-dev
+      env: TOXENV=py37
     - os: linux
       python: 3.5
       env: TOXENV=sbml


=====================================
cobra/__init__.py
=====================================
@@ -13,7 +13,7 @@ from cobra.core import (
     DictList, Gene, Metabolite, Model, Object, Reaction, Species)
 from cobra.util import show_versions
 
-__version__ = "0.13.3"
+__version__ = "0.13.4"
 
 # set the warning format to be prettier and fit on one line
 _cobra_path = _dirname(_abspath(__file__))


=====================================
cobra/io/mat.py
=====================================
@@ -24,7 +24,7 @@ except ImportError:
 
 
 # precompiled regular expressions
-_bracket_re = re.compile("r\[[a-z]\]$")
+_bracket_re = re.compile(r"\[[a-z]\]$")
 _underscore_re = re.compile(r"_[a-z]$")
 
 


=====================================
cobra/io/yaml.py
=====================================
@@ -5,11 +5,26 @@ from __future__ import absolute_import
 import io
 
 from six import string_types
-from ruamel import yaml
+from ruamel.yaml import YAML
+from ruamel.yaml.compat import StringIO
 
 from cobra.io.dict import model_to_dict, model_from_dict
 
-YAML_SPEC = "1"
+YAML_SPEC = "1.2"
+
+
+class MyYAML(YAML):
+    def dump(self, data, stream=None, **kwargs):
+        inefficient = False
+        if stream is None:
+            inefficient = True
+            stream = StringIO()
+        YAML.dump(self, data, stream, **kwargs)
+        if inefficient:
+            return stream.getvalue()
+
+
+yaml = MyYAML(typ="rt")
 
 
 def to_yaml(model, sort=False, **kwargs):
@@ -36,9 +51,10 @@ def to_yaml(model, sort=False, **kwargs):
     save_yaml_model : Write directly to a file.
     ruamel.yaml.dump : Base function.
     """
+
     obj = model_to_dict(model, sort=sort)
     obj["version"] = YAML_SPEC
-    return yaml.dump(obj, Dumper=yaml.RoundTripDumper, **kwargs)
+    return yaml.dump(obj, **kwargs)
 
 
 def from_yaml(document):
@@ -59,7 +75,8 @@ def from_yaml(document):
     --------
     load_yaml_model : Load directly from a file.
     """
-    return model_from_dict(yaml.load(document, yaml.RoundTripLoader))
+    content = StringIO(document)
+    return model_from_dict(yaml.load(content))
 
 
 def save_yaml_model(model, filename, sort=False, **kwargs):
@@ -88,9 +105,9 @@ def save_yaml_model(model, filename, sort=False, **kwargs):
     obj["version"] = YAML_SPEC
     if isinstance(filename, string_types):
         with io.open(filename, "w") as file_handle:
-            yaml.dump(obj, file_handle, Dumper=yaml.RoundTripDumper, **kwargs)
+            yaml.dump(obj, file_handle, **kwargs)
     else:
-        yaml.dump(obj, filename, Dumper=yaml.RoundTripDumper, **kwargs)
+        yaml.dump(obj, filename, **kwargs)
 
 
 def load_yaml_model(filename):
@@ -114,7 +131,6 @@ def load_yaml_model(filename):
     """
     if isinstance(filename, string_types):
         with io.open(filename, "r") as file_handle:
-            return model_from_dict(yaml.load(file_handle,
-                                             yaml.RoundTripLoader))
+            return model_from_dict(yaml.load(file_handle))
     else:
-        return model_from_dict(yaml.load(filename, yaml.RoundTripLoader))
+        return model_from_dict(yaml.load(filename))


=====================================
cobra/test/test_flux_analysis.py deleted
=====================================
@@ -1,1219 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import
-
-import re
-import sys
-import warnings
-import math
-import pytest
-import numpy
-from pandas import Series
-from contextlib import contextmanager
-from optlang.interface import OPTIMAL, INFEASIBLE
-from six import StringIO, iteritems
-
-import cobra.util.solver as sutil
-from cobra.core import Metabolite, Model, Reaction, Solution
-from cobra.flux_analysis import *
-from cobra.flux_analysis.parsimonious import add_pfba
-from cobra.flux_analysis import geometric_fba
-from cobra.flux_analysis.sampling import ACHRSampler, OptGPSampler
-from cobra.flux_analysis.reaction import assess
-from cobra.exceptions import Infeasible
-from cobra.flux_analysis.moma import add_moma
-from cobra.flux_analysis.room import add_room
-
-# The scipy interface is currently unstable and may yield errors or infeasible
-# solutions.
-all_solvers = [s for s in ["glpk", "cplex", "gurobi"] if s in sutil.solvers]
-qp_solvers = [s for s in ["cplex", "gurobi"] if s in sutil.solvers]
-
-
-def construct_ll_test_model():
-    test_model = Model()
-    test_model.add_metabolites(Metabolite("A"))
-    test_model.add_metabolites(Metabolite("B"))
-    test_model.add_metabolites(Metabolite("C"))
-    EX_A = Reaction("EX_A")
-    EX_A.add_metabolites({test_model.metabolites.A: 1})
-    DM_C = Reaction("DM_C")
-    DM_C.add_metabolites({test_model.metabolites.C: -1})
-    v1 = Reaction("v1")
-    v1.add_metabolites({test_model.metabolites.A: -1,
-                        test_model.metabolites.B: 1})
-    v2 = Reaction("v2")
-    v2.add_metabolites({test_model.metabolites.B: -1,
-                        test_model.metabolites.C: 1})
-    v3 = Reaction("v3")
-    v3.add_metabolites({test_model.metabolites.C: -1,
-                        test_model.metabolites.A: 1})
-    test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
-    DM_C.objective_coefficient = 1
-    return test_model
-
-
- at pytest.fixture(scope="function", params=all_solvers)
-def ll_test_model(request):
-    test_model = construct_ll_test_model()
-    test_model.solver = request.param
-    return test_model
-
-
-def construct_room_model():
-    test_model = Model("papin_2003")
-    v1 = Reaction("v1")
-    v2 = Reaction("v2")
-    v3 = Reaction("v3")
-    v4 = Reaction("v4")
-    v5 = Reaction("v5")
-    v6 = Reaction("v6", upper_bound=0.0)
-    b1 = Reaction("b1", upper_bound=10.0, lower_bound=0.0)
-    b2 = Reaction("b2")
-    b3 = Reaction("b3")
-    test_model.add_reactions([v1, v2, v3, v4, v5, v6, b1, b2, b3])
-    v1.reaction = "A -> B"
-    v2.reaction = "2 B -> C + byp"
-    v3.reaction = "2 B + cof -> D"
-    v4.reaction = "D -> E + cof"
-    v5.reaction = "C + cof -> D"
-    v6.reaction = "C -> E"
-    b1.reaction = "-> A"
-    b2.reaction = "E ->"
-    b3.reaction = "byp ->"
-    test_model.objective = 'b2'
-    return test_model
-
-
-def construct_room_solution():
-    fluxes = Series({'b1': 10.0, 'b2': 5.0, 'b3': 5.0, 'v1': 10.0, 'v2': 5.0,
-                     'v3': 0.0, 'v4': 0.0, 'v5': 0.0, 'v6': 5.0})
-    reduced_costs = Series({'b1': 0.0, 'b2': 0.0, 'b3': 0.0, 'v1': 0.0,
-                            'v2': 0.0, 'v3': 0.0, 'v4': 0.0, 'v5': 0.0,
-                            'v6': 0.0})
-    shadow_prices = Series({'b1': 0.0, 'b2': 0.0, 'b3': 0.0, 'v1': 0.0,
-                            'v2': 0.0, 'v3': 0.0, 'v4': 0.0, 'v5': 0.0,
-                            'v6': 0.0})
-    sol = Solution(objective_value=5.000, status='optimal',
-                   fluxes=fluxes,
-                   reduced_costs=reduced_costs,
-                   shadow_prices=shadow_prices)
-    return sol
-
-
-def construct_geometric_fba_model():
-    test_model = Model('geometric_fba_paper_model')
-    test_model.add_metabolites(Metabolite('A'))
-    test_model.add_metabolites(Metabolite('B'))
-    v1 = Reaction('v1', upper_bound=1.0)
-    v1.add_metabolites({test_model.metabolites.A: 1.0})
-    v2 = Reaction('v2', lower_bound=-1000.0)
-    v2.add_metabolites({test_model.metabolites.A: -1.0,
-                        test_model.metabolites.B: 1.0})
-    v3 = Reaction('v3', lower_bound=-1000.0)
-    v3.add_metabolites({test_model.metabolites.A: -1.0,
-                        test_model.metabolites.B: 1.0})
-    v4 = Reaction('v4', lower_bound=-1000.0)
-    v4.add_metabolites({test_model.metabolites.A: -1.0,
-                        test_model.metabolites.B: 1.0})
-    v5 = Reaction('v5')
-    v5.add_metabolites({test_model.metabolites.A: 0.0,
-                        test_model.metabolites.B: -1.0})
-    test_model.add_reactions([v1, v2, v3, v4, v5])
-    test_model.objective = 'v5'
-    return test_model
-
-
- at contextmanager
-def captured_output():
-    """A context manager to test the IO summary methods."""
-    new_out, new_err = StringIO(), StringIO()
-    old_out, old_err = sys.stdout, sys.stderr
-    try:
-        sys.stdout, sys.stderr = new_out, new_err
-        yield sys.stdout, sys.stderr
-    finally:
-        sys.stdout, sys.stderr = old_out, old_err
-
-
-class TestCobraFluxAnalysis:
-    """Test the simulation functions in cobra.flux_analysis."""
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_pfba_benchmark(self, large_model, benchmark, solver):
-        large_model.solver = solver
-        benchmark(pfba, large_model)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_pfba(self, model, solver):
-        model.solver = solver
-        with model:
-            add_pfba(model)
-            with pytest.raises(ValueError):
-                add_pfba(model)
-
-        expression = model.objective.expression
-        n_constraints = len(model.constraints)
-        solution = pfba(model)
-        assert solution.status == "optimal"
-        assert numpy.isclose(solution.x_dict["Biomass_Ecoli_core"],
-                             0.8739, atol=1e-4, rtol=0.0)
-        abs_x = [abs(i) for i in solution.x]
-        assert numpy.isclose(sum(abs_x), 518.4221, atol=1e-4, rtol=0.0)
-        # test changes to model reverted
-        assert expression == model.objective.expression
-        assert len(model.constraints) == n_constraints
-
-        # needed?
-        # Test desired_objective_value
-        # desired_objective = 0.8
-        # pfba(model, solver=solver,
-        #                       desired_objective_value=desired_objective)
-        # abs_x = [abs(i) for i in model.solution.x]
-        # assert model.solution.status == "optimal"
-        # assert abs(model.solution.f - desired_objective) < 0.001
-        # assert abs(sum(abs_x) - 476.1594) < 0.001
-
-        # TODO: parametrize fraction (DRY it up)
-        # Test fraction_of_optimum
-        solution = pfba(model, fraction_of_optimum=0.95)
-        assert solution.status == "optimal"
-        assert numpy.isclose(solution.x_dict["Biomass_Ecoli_core"],
-                             0.95 * 0.8739, atol=1e-4, rtol=0.0)
-        abs_x = [abs(i) for i in solution.x]
-        assert numpy.isclose(sum(abs_x), 493.4400, atol=1e-4, rtol=0.0)
-
-        # Infeasible solution
-        model.reactions.ATPM.lower_bound = 500
-        with warnings.catch_warnings():
-            warnings.simplefilter("error", UserWarning)
-            with pytest.raises((UserWarning, Infeasible, ValueError)):
-                pfba(model)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_geometric_fba_benchmark(self, model, benchmark, solver):
-        model.solver = solver
-        benchmark(geometric_fba, model)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_geometric_fba(self, solver):
-        model = construct_geometric_fba_model()
-        model.solver = solver
-        geometric_fba_sol = geometric_fba(model)
-        expected = Series({'v1': 1.0, 'v2': 0.33, 'v3': 0.33, 'v4': 0.33,
-                           'v5': 1.0}, index=['v1', 'v2', 'v3', 'v4', 'v5'])
-        assert numpy.allclose(geometric_fba_sol.fluxes, expected, atol=1E-02)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_fba_benchmark(self, model, benchmark,
-                                                solver):
-        model.solver = solver
-        benchmark(single_gene_deletion, model)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_fba(self, model, solver):
-        # expected knockouts for textbook model
-        model.solver = solver
-        growth_dict = {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
-                       "b2276": 0.21, "b1779": 0.00}
-        result = single_gene_deletion(
-            model=model,
-            gene_list=list(growth_dict),
-            method="fba",
-            processes=1
-        )["growth"]
-        for gene, value in iteritems(growth_dict):
-            assert numpy.isclose(result[frozenset([gene])], value,
-                                 atol=1E-02)
-
-    @pytest.mark.parametrize("solver", qp_solvers)
-    def test_single_gene_deletion_moma_benchmark(self, model, benchmark,
-                                                 solver):
-        model.solver = solver
-        genes = ['b0008', 'b0114', 'b2276', 'b1779']
-        benchmark(single_gene_deletion, model=model, gene_list=genes,
-                  method="moma", processes=1)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_linear_moma_benchmark(
-            self, model, benchmark, solver):
-        model.solver = solver
-        genes = ['b0008', 'b0114', 'b2276', 'b1779']
-        benchmark(single_gene_deletion, model=model, gene_list=genes,
-                  method="linear moma", processes=1)
-
-    @pytest.mark.parametrize("solver", qp_solvers)
-    def test_moma_sanity(self, model, solver):
-        """Test optimization criterion and optimality."""
-        model.solver = solver
-        sol = model.optimize()
-
-        with model:
-            model.reactions.PFK.knock_out()
-            knock_sol = model.optimize()
-            ssq = (knock_sol.fluxes - sol.fluxes).pow(2).sum()
-
-        with model:
-            add_moma(model, linear=False)
-            model.reactions.PFK.knock_out()
-            moma_sol = model.optimize()
-            moma_ssq = (moma_sol.fluxes - sol.fluxes).pow(2).sum()
-
-        # Use normal FBA as reference solution.
-        with model:
-            add_moma(model, solution=sol, linear=False)
-            model.reactions.PFK.knock_out()
-            moma_ref_sol = model.optimize()
-            moma_ref_ssq = (moma_ref_sol.fluxes - sol.fluxes).pow(2).sum()
-
-        assert numpy.isclose(moma_sol.objective_value, moma_ssq)
-        assert moma_ssq < ssq
-        assert numpy.isclose(moma_sol.objective_value,
-                             moma_ref_sol.objective_value)
-        assert numpy.isclose(moma_ssq, moma_ref_ssq)
-
-    @pytest.mark.parametrize("solver", qp_solvers)
-    def test_single_gene_deletion_moma(self, model, solver):
-        model.solver = solver
-
-        # expected knockouts for textbook model
-        growth_dict = {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
-                       "b2276": 0.11, "b1779": 0.00}
-
-        result = single_gene_deletion(
-            model=model,
-            gene_list=list(growth_dict),
-            method="moma",
-            processes=1
-        )["growth"]
-        for gene, value in iteritems(growth_dict):
-            assert numpy.isclose(result[frozenset([gene])], value,
-                                 atol=1E-02)
-
-    @pytest.mark.parametrize("solver", qp_solvers)
-    def test_single_gene_deletion_moma_reference(self, model, solver):
-        model.solver = solver
-
-        # expected knockouts for textbook model
-        growth_dict = {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
-                       "b2276": 0.11, "b1779": 0.00}
-
-        sol = model.optimize()
-        result = single_gene_deletion(
-            model=model,
-            gene_list=list(growth_dict),
-            method="moma",
-            solution=sol,
-            processes=1
-        )["growth"]
-        for gene, value in iteritems(growth_dict):
-            assert numpy.isclose(result[frozenset([gene])], value,
-                                 atol=1E-02)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_linear_moma_sanity(self, model, solver):
-        """Test optimization criterion and optimality."""
-        model.solver = solver
-        sol = model.optimize()
-
-        with model:
-            model.reactions.PFK.knock_out()
-            knock_sol = model.optimize()
-            sabs = (knock_sol.fluxes - sol.fluxes).abs().sum()
-
-        with model:
-            add_moma(model, linear=True)
-            model.reactions.PFK.knock_out()
-            moma_sol = model.optimize()
-            moma_sabs = (moma_sol.fluxes - sol.fluxes).abs().sum()
-
-        # Use normal FBA as reference solution.
-        with model:
-            add_moma(model, solution=sol, linear=True)
-            model.reactions.PFK.knock_out()
-            moma_ref_sol = model.optimize()
-            moma_ref_sabs = (moma_ref_sol.fluxes - sol.fluxes).abs().sum()
-
-        assert numpy.allclose(moma_sol.objective_value, moma_sabs)
-        assert moma_sabs < sabs
-        assert numpy.isclose(moma_sol.objective_value,
-                             moma_ref_sol.objective_value)
-        assert numpy.isclose(moma_sabs, moma_ref_sabs)
-
-        with model:
-            add_moma(model, linear=True)
-            with pytest.raises(ValueError):
-                add_moma(model)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_linear_moma(self, model, solver):
-        # expected knockouts for textbook model
-        growth_dict = {"b0008": 0.87, "b0114": 0.76, "b0116": 0.65,
-                       "b2276": 0.08, "b1779": 0.00}
-
-        model.solver = solver
-        sol = model.optimize()
-        result = single_gene_deletion(
-            model=model,
-            gene_list=list(growth_dict),
-            method="linear moma",
-            solution=sol,
-            processes=1
-        )["growth"]
-        for gene, value in iteritems(growth_dict):
-            assert numpy.isclose(result[frozenset([gene])], value,
-                                 atol=1E-02)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_benchmark(self, model, benchmark,
-                                            solver):
-        model.solver = solver
-        benchmark(single_reaction_deletion, model=model, processes=1)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_room_benchmark(self, model, benchmark,
-                                                 solver):
-        if solver == "glpk":
-            pytest.skip("GLPK is too slow to run ROOM.")
-        model.solver = solver
-        genes = ['b0008', 'b0114', 'b2276', 'b1779']
-        benchmark(single_gene_deletion, model=model, gene_list=genes,
-                  method="room", processes=1)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_gene_deletion_linear_room_benchmark(
-            self, model, benchmark, solver):
-        model.solver = solver
-        genes = ['b0008', 'b0114', 'b2276', 'b1779']
-        benchmark(single_gene_deletion, model=model, gene_list=genes,
-                  method="linear room", processes=1)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_room_sanity(self, model, solver):
-        model.solver = solver
-        sol = model.optimize()
-        with model:
-            model.reactions.PYK.knock_out()
-            knock_sol = model.optimize()
-
-        with model:
-            # Internally uses pFBA as reference solution.
-            add_room(model)
-            model.reactions.PYK.knock_out()
-            room_sol = model.optimize()
-
-        with model:
-            # Use FBA as reference solution.
-            add_room(model, solution=sol)
-            model.reactions.PYK.knock_out()
-            room_sol_ref = model.optimize()
-
-        flux_change = (sol.fluxes - knock_sol.fluxes).abs().sum()
-        flux_change_room = (sol.fluxes - room_sol.fluxes).abs().sum()
-        flux_change_room_ref = (sol.fluxes - room_sol_ref.fluxes).abs().sum()
-        # Expect the ROOM solution to have smaller flux changes in
-        # reactions compared to a normal FBA.
-        assert flux_change_room < flux_change or \
-            numpy.isclose(flux_change_room, flux_change, atol=1E-06)
-        # Expect the FBA-based reference to have less change in
-        # flux distribution.
-        assert flux_change_room_ref > flux_change_room or \
-            numpy.isclose(flux_change_room_ref, flux_change_room, atol=1E-06)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_linear_room_sanity(self, model, solver):
-        model.solver = solver
-        sol = model.optimize()
-        with model:
-            model.reactions.PYK.knock_out()
-            knock_sol = model.optimize()
-
-        with model:
-            # Internally uses pFBA as reference solution.
-            add_room(model, linear=True)
-            model.reactions.PYK.knock_out()
-            room_sol = model.optimize()
-
-        with model:
-            # Use FBA as reference solution.
-            add_room(model, solution=sol, linear=True)
-            model.reactions.PYK.knock_out()
-            room_sol_ref = model.optimize()
-
-        flux_change = (sol.fluxes - knock_sol.fluxes).abs().sum()
-        flux_change_room = (sol.fluxes - room_sol.fluxes).abs().sum()
-        flux_change_room_ref = (sol.fluxes - room_sol_ref.fluxes).abs().sum()
-        # Expect the ROOM solution to have smaller flux changes in
-        # reactions compared to a normal FBA.
-        assert flux_change_room < flux_change or \
-            numpy.isclose(flux_change_room, flux_change, atol=1E-06)
-        # Expect the FBA-based reference to have less change in
-        # flux distribution.
-        assert flux_change_room_ref > flux_change_room or \
-            numpy.isclose(flux_change_room_ref, flux_change_room, atol=1E-06)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_reaction_deletion_room(self, solver):
-        model = construct_room_model()
-        model.solver = solver
-        sol = construct_room_solution()
-        expected = Series({'v1': 10.0, 'v2': 5.0, 'v3': 0.0, 'v4': 5.0,
-                           'v5': 5.0, 'v6': 0.0, 'b1': 10.0, 'b2': 5.0,
-                           'b3': 5.0}, index=['v1', 'v2', 'v3', 'v4',
-                                              'v5', 'v6', 'b1', 'b2',
-                                              'b3'])
-        with model:
-            model.reactions.v6.knock_out()
-            add_room(model, solution=sol, delta=0.0, epsilon=0.0)
-            room_sol = model.optimize()
-
-        assert numpy.allclose(room_sol.fluxes, expected)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_reaction_deletion_room_linear(self, solver):
-        model = construct_room_model()
-        model.solver = solver
-        sol = construct_room_solution()
-        expected = Series({'v1': 10.0, 'v2': 5.0, 'v3': 0.0, 'v4': 5.0,
-                           'v5': 5.0, 'v6': 0.0, 'b1': 10.0, 'b2': 5.0,
-                           'b3': 5.0}, index=['v1', 'v2', 'v3', 'v4',
-                                              'v5', 'v6', 'b1', 'b2',
-                                              'b3'])
-        with model:
-            model.reactions.v6.knock_out()
-            add_room(model, solution=sol, delta=0.0, epsilon=0.0,
-                     linear=True)
-            linear_room_sol = model.optimize()
-
-        assert numpy.allclose(linear_room_sol.fluxes, expected)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_single_reaction_deletion(self, model, solver):
-        expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
-                            'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
-
-        model.solver = solver
-        result = single_reaction_deletion(
-            model=model,
-            reaction_list=list(expected_results),
-            processes=1
-        )['growth']
-        for reaction, value in iteritems(expected_results):
-            assert numpy.isclose(result[frozenset([reaction])], value,
-                                 atol=1E-05)
-
-    @classmethod
-    def compare_matrices(cls, matrix1, matrix2, places=3):
-        nrows = len(matrix1)
-        ncols = len(matrix1[0])
-        assert nrows == len(matrix2)
-        assert ncols == len(matrix2[0])
-        for i in range(nrows):
-            for j in range(ncols):
-                assert abs(matrix1[i][j] - matrix2[i][j]) < 10 ** -places
-
-    def test_double_gene_deletion_benchmark(self, large_model, benchmark):
-        genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935",
-                 "b1276",
-                 "b1241"]
-        benchmark(double_gene_deletion, large_model, gene_list1=genes,
-                  processes=1)
-
-    def test_double_gene_deletion(self, model):
-        genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935",
-                 "b1276",
-                 "b1241"]
-        growth_dict = {'b0720': {'b0720': 0.0,
-                                 'b0724': 0.0,
-                                 'b0726': 0.0,
-                                 'b1241': 0.0,
-                                 'b1276': 0.0,
-                                 'b2935': 0.0,
-                                 'b4025': 0.0},
-                       'b0724': {'b0720': 0.0,
-                                 'b0724': 0.814,
-                                 'b0726': 0.814,
-                                 'b1241': 0.814,
-                                 'b1276': 0.814,
-                                 'b2935': 0.814,
-                                 'b4025': 0.739},
-                       'b0726': {'b0720': 0.0,
-                                 'b0724': 0.814,
-                                 'b0726': 0.858,
-                                 'b1241': 0.858,
-                                 'b1276': 0.858,
-                                 'b2935': 0.858,
-                                 'b4025': 0.857},
-                       'b1241': {'b0720': 0.0,
-                                 'b0724': 0.814,
-                                 'b0726': 0.858,
-                                 'b1241': 0.874,
-                                 'b1276': 0.874,
-                                 'b2935': 0.874,
-                                 'b4025': 0.863},
-                       'b1276': {'b0720': 0.0,
-                                 'b0724': 0.814,
-                                 'b0726': 0.858,
-                                 'b1241': 0.874,
-                                 'b1276': 0.874,
-                                 'b2935': 0.874,
-                                 'b4025': 0.863},
-                       'b2935': {'b0720': 0.0,
-                                 'b0724': 0.814,
-                                 'b0726': 0.858,
-                                 'b1241': 0.874,
-                                 'b1276': 0.874,
-                                 'b2935': 0.874,
-                                 'b4025': 0.863},
-                       'b4025': {'b0720': 0.0,
-                                 'b0724': 0.739,
-                                 'b0726': 0.857,
-                                 'b1241': 0.863,
-                                 'b1276': 0.863,
-                                 'b2935': 0.863,
-                                 'b4025': 0.863}}
-        solution = double_gene_deletion(
-            model, gene_list1=genes, processes=3)['growth']
-        solution_one_process = double_gene_deletion(
-            model, gene_list1=genes, processes=1)['growth']
-        for (rxn_a, sub) in iteritems(growth_dict):
-            for rxn_b, growth in iteritems(sub):
-                sol = solution[frozenset([rxn_a, rxn_b])]
-                sol_one = solution_one_process[frozenset([rxn_a, rxn_b])]
-                assert round(sol, 3) == growth
-                assert round(sol_one, 3) == growth
-
-    def test_double_reaction_deletion(self, model):
-        reactions = ['FBA', 'ATPS4r', 'ENO', 'FRUpts2']
-        growth_dict = {
-            "FBA": {
-                "ATPS4r": 0.135,
-                "ENO": float('nan'),
-                "FRUpts2": 0.704
-            },
-            "ATPS4r": {
-                "ENO": float('nan'),
-                "FRUpts2": 0.374
-            },
-            "ENO": {
-                "FRUpts2": 0.0
-            },
-        }
-
-        solution = double_reaction_deletion(
-            model, reaction_list1=reactions, processes=3)['growth']
-        solution_one_process = double_reaction_deletion(
-            model, reaction_list1=reactions, processes=1)['growth']
-        for (rxn_a, sub) in iteritems(growth_dict):
-            for rxn_b, growth in iteritems(sub):
-                sol = solution[frozenset([rxn_a, rxn_b])]
-                sol_one = solution_one_process[frozenset([rxn_a, rxn_b])]
-                if math.isnan(growth):
-                    assert math.isnan(sol)
-                    assert math.isnan(sol_one)
-                else:
-                    assert round(sol, 3) == growth
-                    assert round(sol_one, 3) == growth
-
-    def test_double_reaction_deletion_benchmark(self, large_model,
-                                                benchmark):
-        reactions = large_model.reactions[1::100]
-        benchmark(double_reaction_deletion, large_model,
-                  reaction_list1=reactions)
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_flux_variability_benchmark(self, large_model, benchmark,
-                                        solver):
-        large_model.solver = solver
-        benchmark(flux_variability_analysis, large_model,
-                  reaction_list=large_model.reactions[1::3])
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_flux_variability_loopless_benchmark(self, model, benchmark,
-                                                 solver):
-        model.solver = solver
-        benchmark(flux_variability_analysis, model, loopless=True,
-                  reaction_list=model.reactions[1::3])
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_pfba_flux_variability(self, model, pfba_fva_results,
-                                   fva_results, solver):
-        model.solver = solver
-        with pytest.warns(UserWarning):
-            flux_variability_analysis(
-                model, pfba_factor=0.1, reaction_list=model.reactions[1::3])
-        fva_out = flux_variability_analysis(
-            model, pfba_factor=1.1, reaction_list=model.reactions)
-        for name, result in iteritems(fva_out.T):
-            for k, v in iteritems(result):
-                assert abs(pfba_fva_results[k][name] - v) < 0.00001
-                assert abs(pfba_fva_results[k][name]) <= abs(
-                    fva_results[k][name])
-        loop_reactions = [model.reactions.get_by_id(rid)
-                          for rid in ("FRD7", "SUCDi")]
-        fva_loopless = flux_variability_analysis(
-            model, pfba_factor=1.1, reaction_list=loop_reactions,
-            loopless=True)
-        assert numpy.allclose(fva_loopless["maximum"],
-                              fva_loopless["minimum"])
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_flux_variability(self, model, fva_results, solver):
-        model.solver = solver
-        fva_out = flux_variability_analysis(
-            model, reaction_list=model.reactions)
-        for name, result in iteritems(fva_out.T):
-            for k, v in iteritems(result):
-                assert abs(fva_results[k][name] - v) < 0.00001
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_flux_variability_loopless(self, model, solver):
-        model.solver = solver
-        loop_reactions = [model.reactions.get_by_id(rid)
-                          for rid in ("FRD7", "SUCDi")]
-        fva_normal = flux_variability_analysis(
-            model, reaction_list=loop_reactions)
-        fva_loopless = flux_variability_analysis(
-            model, reaction_list=loop_reactions, loopless=True)
-
-        assert not numpy.allclose(fva_normal["maximum"],
-                                  fva_normal["minimum"])
-        assert numpy.allclose(fva_loopless["maximum"],
-                              fva_loopless["minimum"])
-
-    def test_fva_data_frame(self, model):
-        df = flux_variability_analysis(model)
-        assert numpy.all([df.columns.values == ['minimum', 'maximum']])
-
-    def test_fva_infeasible(self, model):
-        infeasible_model = model.copy()
-        infeasible_model.reactions.get_by_id("EX_glc__D_e").lower_bound = 0
-        # ensure that an infeasible model does not run FVA
-        with pytest.raises(Infeasible):
-            flux_variability_analysis(infeasible_model)
-
-    def test_fva_minimization(self, model):
-        model.objective = model.reactions.EX_glc__D_e
-        model.objective_direction = 'min'
-        solution = flux_variability_analysis(model, fraction_of_optimum=.95)
-        assert solution.at['EX_glc__D_e', 'minimum'] == -10.0
-        assert solution.at['EX_glc__D_e', 'maximum'] == -9.5
-
-    def test_find_blocked_reactions_solver_none(self, model):
-        result = find_blocked_reactions(model, model.reactions[40:46])
-        assert result == ['FRUpts2']
-
-    def test_essential_genes(self, model):
-        essential_genes = {'b2779', 'b1779', 'b0720', 'b2416',
-                           'b2926', 'b1136', 'b2415'}
-        observed_essential_genes = {g.id for g in
-                                    find_essential_genes(model)}
-        assert observed_essential_genes == essential_genes
-
-    def test_essential_reactions(self, model):
-        essential_reactions = {'GLNS', 'Biomass_Ecoli_core', 'PIt2r',
-                               'GAPD',
-                               'ACONTb', 'EX_nh4_e', 'ENO', 'EX_h_e',
-                               'EX_glc__D_e', 'ICDHyr', 'CS', 'NH4t',
-                               'GLCpts',
-                               'PGM', 'EX_pi_e', 'PGK', 'RPI', 'ACONTa'}
-        observed_essential_reactions = {r.id for r in
-                                        find_essential_reactions(model)}
-        assert observed_essential_reactions == essential_reactions
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_find_blocked_reactions(self, model, solver):
-        model.solver = solver
-        result = find_blocked_reactions(model, model.reactions[40:46])
-        assert result == ['FRUpts2']
-
-        result = find_blocked_reactions(model, model.reactions[42:48])
-        assert set(result) == {'FUMt2_2', 'FRUpts2'}
-
-        result = find_blocked_reactions(model, model.reactions[30:50],
-                                        open_exchanges=True)
-        assert result == []
-
-    def test_loopless_benchmark_before(self, benchmark):
-        test_model = construct_ll_test_model()
-
-        def _():
-            with test_model:
-                add_loopless(test_model)
-                test_model.optimize()
-
-        benchmark(_)
-
-    def test_loopless_benchmark_after(self, benchmark):
-        test_model = construct_ll_test_model()
-        benchmark(loopless_solution, test_model)
-
-    def test_loopless_solution(self, ll_test_model):
-        solution_feasible = loopless_solution(ll_test_model)
-        ll_test_model.reactions.v3.lower_bound = 1
-        ll_test_model.optimize()
-        solution_infeasible = loopless_solution(ll_test_model)
-        assert solution_feasible.fluxes["v3"] == 0.0
-        assert solution_infeasible.fluxes["v3"] == 1.0
-
-    def test_loopless_solution_fluxes(self, model):
-        fluxes = model.optimize().fluxes
-        ll_solution = loopless_solution(model, fluxes=fluxes)
-        assert len(ll_solution.fluxes) == len(model.reactions)
-
-    def test_add_loopless(self, ll_test_model):
-        add_loopless(ll_test_model)
-        feasible_status = ll_test_model.optimize().status
-        ll_test_model.reactions.v3.lower_bound = 1
-        ll_test_model.slim_optimize()
-        infeasible_status = ll_test_model.solver.status
-        assert feasible_status == OPTIMAL
-        assert infeasible_status == INFEASIBLE
-
-    def test_gapfilling(self, salmonella):
-        m = Model()
-        m.add_metabolites([Metabolite(m_id) for m_id in ["a", "b", "c"]])
-        exa = Reaction("EX_a")
-        exa.add_metabolites({m.metabolites.a: 1})
-        b2c = Reaction("b2c")
-        b2c.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
-        dmc = Reaction("DM_c")
-        dmc.add_metabolites({m.metabolites.c: -1})
-        m.add_reactions([exa, b2c, dmc])
-        m.objective = 'DM_c'
-
-        universal = Model()
-        a2b = Reaction("a2b")
-        a2d = Reaction("a2d")
-        universal.add_reactions([a2b, a2d])
-        a2b.build_reaction_from_string("a --> b", verbose=False)
-        a2d.build_reaction_from_string("a --> d", verbose=False)
-
-        # # GrowMatch
-        # result = gapfilling.growMatch(m, universal)[0]
-        result = gapfilling.gapfill(m, universal)[0]
-        assert len(result) == 1
-        assert result[0].id == "a2b"
-
-        # # SMILEY
-        # result = gapfilling.SMILEY(m, "b", universal)[0]
-        with m:
-            m.objective = m.add_boundary(m.metabolites.b, type='demand')
-            result = gapfilling.gapfill(m, universal)[0]
-            assert len(result) == 1
-            assert result[0].id == "a2b"
-
-        # # 2 rounds of GrowMatch with exchange reactions
-        # result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
-        result = gapfilling.gapfill(m, None, exchange_reactions=True,
-                                    iterations=2)
-        assert len(result) == 2
-        assert len(result[0]) == 1
-        assert len(result[1]) == 1
-        assert {i[0].id for i in result} == {"EX_b", "EX_c"}
-
-        # somewhat bigger model
-        universal = Model("universal_reactions")
-        with salmonella as model:
-            for i in [i.id for i in model.metabolites.f6p_c.reactions]:
-                reaction = model.reactions.get_by_id(i)
-                universal.add_reactions([reaction.copy()])
-                model.remove_reactions([reaction])
-            gf = gapfilling.GapFiller(model, universal,
-                                      penalties={'TKT2': 1e3},
-                                      demand_reactions=False)
-            solution = gf.fill()
-            assert 'TKT2' not in {r.id for r in solution[0]}
-            assert gf.validate(solution[0])
-
-    def check_line(self, output, expected_entries,
-                   pattern=re.compile(r"\s")):
-        """Ensure each expected entry is in the output."""
-        output_set = set(
-            pattern.sub("", line) for line in output.splitlines())
-        for elem in expected_entries:
-            assert pattern.sub("", elem) in output_set
-
-    def check_in_line(self, output, expected_entries,
-                      pattern=re.compile(r"\s")):
-        """Ensure each expected entry is contained in the output."""
-        output_strip = [pattern.sub("", line) for line in
-                        output.splitlines()]
-        for elem in expected_entries:
-            assert any(
-                pattern.sub("", elem) in line for line in output_strip), \
-                "Not found: {} in:\n{}".format(pattern.sub("", elem),
-                                               "\n".join(output_strip))
-
-    @pytest.mark.parametrize("names", [False, True])
-    def test_model_summary_previous_solution(self, model, opt_solver, names):
-        model.solver = opt_solver
-        solution = model.optimize()
-        rxn_test = model.exchanges[0]
-        if names:
-            met_test = list(rxn_test.metabolites.keys())[0].name
-        else:
-            met_test = list(rxn_test.metabolites.keys())[0].id
-
-        solution.fluxes[rxn_test.id] = 321
-
-        with captured_output() as (out, err):
-            model.summary(solution, names=names)
-        self.check_in_line(out.getvalue(), [met_test + '321'])
-
-    @pytest.mark.parametrize("names", [False, True])
-    def test_model_summary(self, model, opt_solver, names):
-        model.solver = opt_solver
-        # test non-fva version (these should be fixed for textbook model
-        if names:
-            expected_entries = [
-                'O2      21.8',
-                'D-Glucose  10',
-                'Ammonium      4.77',
-                'Phosphate       3.21',
-                'H2O  29.2',
-                'CO2  22.8',
-                'H+    17.5',
-                'Biomass_Ecol...  0.874',
-            ]
-        else:
-            expected_entries = [
-                'o2_e      21.8',
-                'glc__D_e  10',
-                'nh4_e      4.77',
-                'pi_e       3.21',
-                'h2o_e  29.2',
-                'co2_e  22.8',
-                'h_e    17.5',
-                'Biomass_Ecol...  0.874',
-            ]
-        # Need to use a different method here because
-        # there are multiple entries per line.
-        model.optimize()
-        with captured_output() as (out, err):
-            model.summary(names=names)
-        self.check_in_line(out.getvalue(), expected_entries)
-
-        # with model:
-        #     model.objective = model.exchanges[0]
-        #     model.summary()
-
-    @pytest.mark.parametrize("fraction", [0.95])
-    def test_model_summary_with_fva(self, model, opt_solver, fraction):
-        if opt_solver == "optlang-gurobi":
-            pytest.xfail("FVA currently buggy")
-        # test non-fva version (these should be fixed for textbook model
-        expected_entries = [
-            'idFluxRangeidFluxRangeBiomass_Ecol...0.874',
-            'o2_e       21.8   [19.9, 23.7]'
-            'h2o_e       29.2  [25, 30.7]',
-            'glc__D_e   10     [9.52, 10]'
-            'co2_e       22.8  [18.9, 24.7]',
-            'nh4_e       4.77  [4.53, 5.16]'
-            'h_e         17.5  [16.7, 22.4]',
-            'pi_e        3.21  [3.05, 3.21]'
-            'for_e        0    [0, 5.72]',
-            'ac_e         0    [0, 1.91]',
-            'pyr_e        0    [0, 1.27]',
-            'lac__D_e     0    [0, 1.07]',
-            'succ_e       0    [0, 0.837]',
-            'glu__L_e     0    [0, 0.636]',
-            'akg_e        0    [0, 0.715]',
-            'etoh_e       0    [0, 1.11]',
-            'acald_e      0    [0, 1.27]',
-        ]
-        # Need to use a different method here because
-        # there are multiple entries per line.
-        model.solver = opt_solver
-        solution = model.optimize()
-        with captured_output() as (out, err):
-            model.summary(solution, fva=fraction)
-        self.check_in_line(out.getvalue(), expected_entries)
-
-    @pytest.mark.parametrize("met", ["q8_c"])
-    def test_metabolite_summary_previous_solution(
-            self, model, opt_solver, met):
-        model.solver = opt_solver
-        solution = pfba(model)
-        model.metabolites.get_by_id(met).summary(solution)
-
-    @pytest.mark.parametrize("met, names", [
-        ("q8_c", False),
-        ("q8_c", True)
-    ])
-    def test_metabolite_summary(self, model, opt_solver, met, names):
-        model.solver = opt_solver
-        model.optimize()
-        with captured_output() as (out, err):
-            model.metabolites.get_by_id(met).summary(names=names)
-
-        if names:
-            expected_entries = [
-                'PRODUCING REACTIONS -- Ubiquinone-8 (q8_c)',
-                '%       FLUX  RXN ID      REACTION',
-                '100%   43.6   cytochr...  2.0 H+ + 0.5 O2 + Ubiquinol-8 --> '
-                'H2O + 2.0 H+ ...',
-                'CONSUMING REACTIONS -- Ubiquinone-8 (q8_c)',
-                '%       FLUX  RXN ID      REACTION',
-                '88%    38.5   NADH de...  4.0 H+ + Nicotinamide adenine '
-                'dinucleotide - re...',
-                '12%     5.06  succina...  Ubiquinone-8 + Succinate --> '
-                'Fumarate + Ubiquin...'
-            ]
-        else:
-            expected_entries = [
-                'PRODUCING REACTIONS -- Ubiquinone-8 (q8_c)',
-                '%       FLUX  RXN ID    REACTION',
-                '100%   43.6   CYTBD     '
-                '2.0 h_c + 0.5 o2_c + q8h2_c --> h2o_c + 2.0 h_e...',
-                'CONSUMING REACTIONS -- Ubiquinone-8 (q8_c)',
-                '%       FLUX  RXN ID    REACTION',
-                '88%    38.5   NADH16    '
-                '4.0 h_c + nadh_c + q8_c --> 3.0 h_e + nad_c + q...',
-                '12%     5.06  SUCDi     q8_c + succ_c --> fum_c + q8h2_c',
-            ]
-
-        self.check_in_line(out.getvalue(), expected_entries)
-
-    @pytest.mark.parametrize("fraction, met", [(0.99, "fdp_c")])
-    def test_metabolite_summary_with_fva(self, model, opt_solver, fraction,
-                                         met):
-        #     pytest.xfail("FVA currently buggy")
-
-        model.solver = opt_solver
-        model.optimize()
-        with captured_output() as (out, err):
-            model.metabolites.get_by_id(met).summary(fva=fraction)
-
-        expected_entries = [
-            'PRODUCING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
-            '%       FLUX  RANGE         RXN ID    REACTION',
-            '100%    7.48  [6.17, 9.26]  PFK       '
-            'atp_c + f6p_c --> adp_c + fdp_c + h_c',
-            'CONSUMING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
-            '%       FLUX  RANGE         RXN ID    REACTION',
-            '100%    7.48  [6.17, 8.92]  FBA       fdp_c <=> dhap_c + g3p_c',
-            '0%      0     [0, 1.72]     FBP       '
-            'fdp_c + h2o_c --> f6p_c + pi_c',
-        ]
-
-        self.check_line(out.getvalue(), expected_entries)
-
-
-class TestCobraFluxSampling:
-    """Tests and benchmark flux sampling."""
-
-    def test_single_achr(self, model):
-        s = sample(model, 10, method="achr")
-        assert s.shape == (10, len(model.reactions))
-
-    def test_single_optgp(self, model):
-        s = sample(model, 10, processes=1)
-        assert s.shape == (10, len(model.reactions))
-
-    def test_multi_optgp(self, model):
-        s = sample(model, 10, processes=2)
-        assert s.shape == (10, len(model.reactions))
-
-    def test_wrong_method(self, model):
-        with pytest.raises(ValueError):
-            sample(model, 1, method="schwupdiwupp")
-
-    def test_validate_wrong_sample(self, model):
-        s = self.achr.sample(10)
-        s["hello"] = 1
-        with pytest.raises(ValueError):
-            self.achr.validate(s)
-
-    def test_fixed_seed(self, model):
-        s = sample(model, 1, seed=42)
-        assert numpy.allclose(s.TPI[0], 9.12037487)
-
-    def test_equality_constraint(self, model):
-        model.reactions.ACALD.bounds = (-1.5, -1.5)
-        s = sample(model, 10)
-        assert numpy.allclose(s.ACALD, -1.5, atol=1e-6, rtol=0)
-        s = sample(model, 10, method="achr")
-        assert numpy.allclose(s.ACALD, -1.5, atol=1e-6, rtol=0)
-
-    def test_inequality_constraint(self, model):
-        co = model.problem.Constraint(
-            model.reactions.ACALD.flux_expression, lb=-0.5)
-        model.add_cons_vars(co)
-        s = sample(model, 10)
-        assert all(s.ACALD > -0.5 - 1e-6)
-        s = sample(model, 10, method="achr")
-        assert all(s.ACALD > -0.5 - 1e-6)
-
-    def setup_class(self):
-        from . import create_test_model
-        model = create_test_model("textbook")
-        achr = ACHRSampler(model, thinning=1)
-        assert ((achr.n_warmup > 0) and
-                (achr.n_warmup <= 2 * len(model.variables)))
-        assert all(achr.validate(achr.warmup) == "v")
-        self.achr = achr
-
-        optgp = OptGPSampler(model, processes=1, thinning=1)
-        assert ((optgp.n_warmup > 0) and
-                (optgp.n_warmup <= 2 * len(model.variables)))
-        assert all(optgp.validate(optgp.warmup) == "v")
-        self.optgp = optgp
-
-    def test_achr_init_benchmark(self, model, benchmark):
-        benchmark(lambda: ACHRSampler(model))
-
-    def test_optgp_init_benchmark(self, model, benchmark):
-        benchmark(lambda: OptGPSampler(model, processes=2))
-
-    def test_sampling(self):
-        s = self.achr.sample(10)
-        assert all(self.achr.validate(s) == "v")
-
-        s = self.optgp.sample(10)
-        assert all(self.optgp.validate(s) == "v")
-
-    def test_achr_sample_benchmark(self, benchmark):
-        benchmark(self.achr.sample, 1)
-
-    def test_optgp_sample_benchmark(self, benchmark):
-        benchmark(self.optgp.sample, 1)
-
-    def test_batch_sampling(self):
-        for b in self.achr.batch(5, 4):
-            assert all(self.achr.validate(b) == "v")
-
-        for b in self.optgp.batch(5, 4):
-            assert all(self.optgp.validate(b) == "v")
-
-    def test_variables_samples(self):
-        vnames = numpy.array([v.name for v in self.achr.model.variables])
-        s = self.achr.sample(10, fluxes=False)
-        assert s.shape == (10, self.achr.warmup.shape[1])
-        assert (s.columns == vnames).all()
-        assert (self.achr.validate(s) == "v").all()
-        s = self.optgp.sample(10, fluxes=False)
-        assert s.shape == (10, self.optgp.warmup.shape[1])
-        assert (s.columns == vnames).all()
-        assert (self.optgp.validate(s) == "v").all()
-
-    def test_inhomogeneous_sanity(self, model):
-        """Test whether inhomogeneous sampling gives approximately the same
-           standard deviation as a homogeneous version."""
-        model.reactions.ACALD.bounds = (-1.5, -1.5)
-        s_inhom = sample(model, 64)
-        model.reactions.ACALD.bounds = (-1.5 - 1e-3, -1.5 + 1e-3)
-        s_hom = sample(model, 64)
-        relative_diff = (s_inhom.std() + 1e-12) / (s_hom.std() + 1e-12)
-        assert 0.5 < relative_diff.abs().mean() < 2
-
-        model.reactions.ACALD.bounds = (-1.5, -1.5)
-        s_inhom = sample(model, 64, method="achr")
-        model.reactions.ACALD.bounds = (-1.5 - 1e-3, -1.5 + 1e-3)
-        s_hom = sample(model, 64, method="achr")
-        relative_diff = (s_inhom.std() + 1e-12) / (s_hom.std() + 1e-12)
-        assert 0.5 < relative_diff.abs().mean() < 2
-
-    def test_reproject(self):
-        s = self.optgp.sample(10, fluxes=False).values
-        proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
-        assert all(self.optgp.validate(proj) == "v")
-        s = numpy.random.rand(10, self.optgp.warmup.shape[1])
-        proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
-        assert all(self.optgp.validate(proj) == "v")
-
-    def test_complicated_model(self):
-        """Difficult model since the online mean calculation is numerically
-        unstable so many samples weakly violate the equality constraints."""
-        model = Model('flux_split')
-        reaction1 = Reaction('V1')
-        reaction2 = Reaction('V2')
-        reaction3 = Reaction('V3')
-        reaction1.lower_bound = 0
-        reaction2.lower_bound = 0
-        reaction3.lower_bound = 0
-        reaction1.upper_bound = 6
-        reaction2.upper_bound = 8
-        reaction3.upper_bound = 10
-        A = Metabolite('A')
-        reaction1.add_metabolites({A: -1})
-        reaction2.add_metabolites({A: -1})
-        reaction3.add_metabolites({A: 1})
-        model.add_reactions([reaction1])
-        model.add_reactions([reaction2])
-        model.add_reactions([reaction3])
-
-        optgp = OptGPSampler(model, 1, seed=42)
-        achr = ACHRSampler(model, seed=42)
-        optgp_samples = optgp.sample(100)
-        achr_samples = achr.sample(100)
-        assert any(optgp_samples.corr().abs() < 1.0)
-        assert any(achr_samples.corr().abs() < 1.0)
-        # > 95% are valid
-        assert(sum(optgp.validate(optgp_samples) == "v") > 95)
-        assert(sum(achr.validate(achr_samples) == "v") > 95)
-
-    def test_single_point_space(self, model):
-        """Model where constraints reduce the sampling space to one point."""
-        pfba_sol = pfba(model)
-        pfba_const = model.problem.Constraint(
-            sum(model.variables), ub=pfba_sol.objective_value)
-        model.add_cons_vars(pfba_const)
-        model.reactions.Biomass_Ecoli_core.lower_bound = \
-            pfba_sol.fluxes.Biomass_Ecoli_core
-        with pytest.raises(ValueError):
-            s = sample(model, 1)
-
-
-class TestProductionEnvelope:
-    """Test the production envelope."""
-
-    def test_envelope_one(self, model):
-        df = production_envelope(model, ["EX_o2_e"])
-        assert numpy.isclose(df["flux_maximum"].sum(), 9.342, atol=1e-3)
-
-    def test_envelope_multi_reaction_objective(self, model):
-        obj = {model.reactions.EX_ac_e: 1,
-               model.reactions.EX_co2_e: 1}
-        with pytest.raises(ValueError):
-            production_envelope(model, "EX_o2_e", obj)
-
-    @pytest.mark.parametrize("variables, num", [
-        (["EX_glc__D_e"], 30),
-        (["EX_glc__D_e", "EX_o2_e"], 20),
-        (["EX_glc__D_e", "EX_o2_e", "EX_ac_e"], 10)
-    ])
-    def test_multi_variable_envelope(self, model, variables, num):
-        df = production_envelope(model, variables, points=num)
-        assert len(df) == num ** len(variables)
-
-    def test_envelope_two(self, model):
-        df = production_envelope(model, ["EX_glc__D_e", "EX_o2_e"],
-                                 objective="EX_ac_e")
-        assert numpy.isclose(df["flux_maximum"].sum(), 1737.466, atol=1e-3)
-        assert numpy.isclose(df["carbon_yield_maximum"].sum(), 83.579,
-                             atol=1e-3)
-        assert numpy.isclose(df["mass_yield_maximum"].sum(), 82.176,
-                             atol=1e-3)
-
-
-class TestReactionUtils:
-    """Test the assess_ functions in reactions.py."""
-
-    @pytest.mark.parametrize("solver", all_solvers)
-    def test_assess(self, model, solver):
-        with model:
-            assert assess(model, model.reactions.GLCpts,
-                          solver=solver) is True
-            pyr = model.metabolites.pyr_c
-            a = Metabolite('a')
-            b = Metabolite('b')
-            model.add_metabolites([a, b])
-            pyr_a2b = Reaction('pyr_a2b')
-            pyr_a2b.add_metabolites({pyr: -1, a: -1, b: 1})
-            model.add_reactions([pyr_a2b])
-            res = assess(model, pyr_a2b, 0.01, solver=solver)
-            expected = {
-                'precursors': {a: {'required': 0.01, 'produced': 0.0}},
-                'products': {b: {'required': 0.01, 'capacity': 0.0}}}
-            assert res == expected


=====================================
cobra/test/test_flux_analysis/conftest.py
=====================================
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+"""Define module level fixtures."""
+
+
+from __future__ import absolute_import
+
+import pytest
+from pandas import Series
+
+import cobra.util.solver as sutil
+from cobra.core import Metabolite, Model, Reaction, Solution
+
+# The scipy interface is currently unstable and may yield errors or infeasible
+# solutions.
+
+
+ at pytest.fixture(scope="session",
+                params=[s for s in ["glpk", "cplex", "gurobi"]
+                        if s in sutil.solvers])
+def all_solvers(request):
+    """Return the avaialble solvers."""
+    return request.param
+
+
+ at pytest.fixture(scope="session",
+                params=[s for s in ["cplex", "gurobi"]
+                        if s in sutil.solvers])
+def qp_solvers(request):
+    """Return the available QP solvers."""
+    return request.param
+
+
+ at pytest.fixture(scope="module")
+def room_model():
+    """
+    Generate ROOM model as described in [1]_
+
+    References
+    ----------
+    .. [1] Tomer Shlomi, Omer Berkman and Eytan Ruppin, "Regulatory on/off
+     minimization of metabolic flux changes after genetic perturbations",
+     PNAS 2005 102 (21) 7695-7700; doi:10.1073/pnas.0406346102
+
+    """
+    test_model = Model("papin_2003")
+
+    v_1 = Reaction("v1")
+    v_2 = Reaction("v2")
+    v_3 = Reaction("v3")
+    v_4 = Reaction("v4")
+    v_5 = Reaction("v5")
+    v_6 = Reaction("v6", upper_bound=0.0)
+    b_1 = Reaction("b1", upper_bound=10.0, lower_bound=0.0)
+    b_2 = Reaction("b2")
+    b_3 = Reaction("b3")
+
+    test_model.add_reactions([v_1, v_2, v_3, v_4, v_5, v_6, b_1, b_2, b_3])
+
+    v_1.reaction = "A -> B"
+    v_2.reaction = "2 B -> C + byp"
+    v_3.reaction = "2 B + cof -> D"
+    v_4.reaction = "D -> E + cof"
+    v_5.reaction = "C + cof -> D"
+    v_6.reaction = "C -> E"
+    b_1.reaction = "-> A"
+    b_2.reaction = "E ->"
+    b_3.reaction = "byp ->"
+
+    test_model.objective = 'b2'
+
+    return test_model
+
+
+ at pytest.fixture(scope="module")
+def room_solution():
+    """
+    Generate ROOM solution as described in [1]_
+
+    References
+    ----------
+    .. [1] Tomer Shlomi, Omer Berkman and Eytan Ruppin, "Regulatory on/off
+     minimization of metabolic flux changes after genetic perturbations",
+     PNAS 2005 102 (21) 7695-7700; doi:10.1073/pnas.0406346102
+
+    """
+    fluxes = Series({'b1': 10.0, 'b2': 5.0, 'b3': 5.0, 'v1': 10.0, 'v2': 5.0,
+                     'v3': 0.0, 'v4': 0.0, 'v5': 0.0, 'v6': 5.0})
+    reduced_costs = Series({'b1': 0.0, 'b2': 0.0, 'b3': 0.0, 'v1': 0.0,
+                            'v2': 0.0, 'v3': 0.0, 'v4': 0.0, 'v5': 0.0,
+                            'v6': 0.0})
+    shadow_prices = Series({'b1': 0.0, 'b2': 0.0, 'b3': 0.0, 'v1': 0.0,
+                            'v2': 0.0, 'v3': 0.0, 'v4': 0.0, 'v5': 0.0,
+                            'v6': 0.0})
+    sol = Solution(objective_value=5.000, status='optimal',
+                   fluxes=fluxes,
+                   reduced_costs=reduced_costs,
+                   shadow_prices=shadow_prices)
+    return sol


=====================================
cobra/test/test_flux_analysis/test_deletion.py
=====================================
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of reaction and gene deletions."""
+
+from __future__ import absolute_import
+
+import math
+
+import numpy as np
+from pandas import Series
+from six import iteritems
+
+import pytest
+from cobra.flux_analysis.deletion import (
+    double_gene_deletion, double_reaction_deletion, single_gene_deletion,
+    single_reaction_deletion)
+from cobra.flux_analysis.room import add_room
+
+
+# Single gene deletion FBA
+def test_single_gene_deletion_fba_benchmark(model, benchmark,
+                                            all_solvers):
+    """Benchmark single gene deletion using FBA."""
+    model.solver = all_solvers
+    benchmark(single_gene_deletion, model)
+
+
+def test_single_gene_deletion_fba(model, all_solvers):
+    """Test single gene deletion using FBA."""
+    # expected knockouts for textbook model
+    model.solver = all_solvers
+    growth_dict = {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
+                   "b2276": 0.21, "b1779": 0.00}
+    result = single_gene_deletion(
+        model=model,
+        gene_list=list(growth_dict),
+        method="fba",
+        processes=1
+    )["growth"]
+    for gene, value in iteritems(growth_dict):
+        assert np.isclose(result[frozenset([gene])], value,
+                          atol=1E-02)
+
+
+# Singe gene deletion MOMA
+def test_single_gene_deletion_moma_benchmark(model, benchmark,
+                                             qp_solvers):
+    """Benchmark single gene deletion using MOMA."""
+    model.solver = qp_solvers
+    genes = ['b0008', 'b0114', 'b2276', 'b1779']
+    benchmark(single_gene_deletion, model=model, gene_list=genes,
+              method="moma", processes=1)
+
+
+def test_single_gene_deletion_moma(model, qp_solvers):
+    """Test single gene deletion using MOMA."""
+    model.solver = qp_solvers
+    # expected knockouts for textbook model
+    growth_dict = {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
+                   "b2276": 0.11, "b1779": 0.00}
+
+    result = single_gene_deletion(
+        model=model,
+        gene_list=list(growth_dict),
+        method="moma",
+        processes=1
+    )["growth"]
+    for gene, value in iteritems(growth_dict):
+        assert np.isclose(result[frozenset([gene])], value,
+                          atol=1E-02)
+
+
+def test_single_gene_deletion_moma_reference(model, qp_solvers):
+    """Test single gene deletion using MOMA (reference solution)."""
+    model.solver = qp_solvers
+    # expected knockouts for textbook model
+    growth_dict = {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
+                   "b2276": 0.11, "b1779": 0.00}
+
+    sol = model.optimize()
+    result = single_gene_deletion(
+        model=model,
+        gene_list=list(growth_dict),
+        method="moma",
+        solution=sol,
+        processes=1
+    )["growth"]
+    for gene, value in iteritems(growth_dict):
+        assert np.isclose(result[frozenset([gene])], value,
+                          atol=1E-02)
+
+
+# Single gene deletion linear MOMA
+def test_single_gene_deletion_linear_moma_benchmark(
+        model, benchmark, all_solvers):
+    """Benchmark single gene deletion using linear MOMA."""
+    model.solver = all_solvers
+    genes = ['b0008', 'b0114', 'b2276', 'b1779']
+    benchmark(single_gene_deletion, model=model, gene_list=genes,
+              method="linear moma", processes=1)
+
+
+def test_single_gene_deletion_linear_moma(model, all_solvers):
+    """Test single gene deletion using linear MOMA (reference solution)."""
+    model.solver = all_solvers
+    # expected knockouts for textbook model
+    growth_dict = {"b0008": 0.87, "b0114": 0.76, "b0116": 0.65,
+                   "b2276": 0.08, "b1779": 0.00}
+
+    sol = model.optimize()
+    result = single_gene_deletion(
+        model=model,
+        gene_list=list(growth_dict),
+        method="linear moma",
+        solution=sol,
+        processes=1
+    )["growth"]
+    for gene, value in iteritems(growth_dict):
+        assert np.isclose(result[frozenset([gene])], value,
+                          atol=1E-02)
+
+
+# Single gene deletion ROOM
+def test_single_gene_deletion_room_benchmark(model, benchmark,
+                                             all_solvers):
+    """Benchmark single gene deletion using ROOM."""
+    if all_solvers == "glpk":
+        pytest.skip("GLPK is too slow to run ROOM.")
+    model.solver = all_solvers
+    genes = ['b0008', 'b0114', 'b2276', 'b1779']
+    benchmark(single_gene_deletion, model=model, gene_list=genes,
+              method="room", processes=1)
+
+
+# Single gene deletion linear ROOM
+def test_single_gene_deletion_linear_room_benchmark(
+        model, benchmark, all_solvers):
+    """Benchmark single gene deletion using linear ROOM."""
+    model.solver = all_solvers
+    genes = ['b0008', 'b0114', 'b2276', 'b1779']
+    benchmark(single_gene_deletion, model=model, gene_list=genes,
+              method="linear room", processes=1)
+
+
+# Single reaction deletion
+def test_single_reaction_deletion_benchmark(model, benchmark,
+                                            all_solvers):
+    """Benchmark single reaction deletion."""
+    model.solver = all_solvers
+    benchmark(single_reaction_deletion, model=model, processes=1)
+
+
+def test_single_reaction_deletion(model, all_solvers):
+    """Test single reaction deletion."""
+    model.solver = all_solvers
+    expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
+                        'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
+    result = single_reaction_deletion(
+        model=model,
+        reaction_list=list(expected_results),
+        processes=1
+    )['growth']
+    for reaction, value in iteritems(expected_results):
+        assert np.isclose(result[frozenset([reaction])], value,
+                          atol=1E-05)
+
+
+# Single reaction deletion ROOM
+def test_single_reaction_deletion_room(room_model, room_solution,
+                                       all_solvers):
+    """Test single reaction deletion using ROOM."""
+    room_model.solver = all_solvers
+    expected = Series({'v1': 10.0, 'v2': 5.0, 'v3': 0.0, 'v4': 5.0,
+                       'v5': 5.0, 'v6': 0.0, 'b1': 10.0, 'b2': 5.0,
+                       'b3': 5.0}, index=['v1', 'v2', 'v3', 'v4',
+                                          'v5', 'v6', 'b1', 'b2',
+                                          'b3'])
+    with room_model:
+        room_model.reactions.v6.knock_out()
+        add_room(room_model, solution=room_solution, delta=0.0, epsilon=0.0)
+        room_sol = room_model.optimize()
+
+    assert np.allclose(room_sol.fluxes, expected)
+
+
+# Single reaction deletion linear ROOM
+def test_single_reaction_deletion_linear_room(room_model, room_solution,
+                                              all_solvers):
+    """Test single reaction deletion using linear ROOM."""
+    room_model.solver = all_solvers
+    expected = Series({'v1': 10.0, 'v2': 5.0, 'v3': 0.0, 'v4': 5.0,
+                       'v5': 5.0, 'v6': 0.0, 'b1': 10.0, 'b2': 5.0,
+                       'b3': 5.0}, index=['v1', 'v2', 'v3', 'v4',
+                                          'v5', 'v6', 'b1', 'b2',
+                                          'b3'])
+    with room_model:
+        room_model.reactions.v6.knock_out()
+        add_room(room_model, solution=room_solution, delta=0.0, epsilon=0.0,
+                 linear=True)
+        linear_room_sol = room_model.optimize()
+
+    assert np.allclose(linear_room_sol.fluxes, expected)
+
+
+# Double gene deletion
+def test_double_gene_deletion_benchmark(large_model, benchmark):
+    """Benchmark double gene deletion."""
+    genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935",
+             "b1276",
+             "b1241"]
+    benchmark(double_gene_deletion, large_model, gene_list1=genes,
+              processes=1)
+
+
+def test_double_gene_deletion(model):
+    """Test double gene deletion."""
+    genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935",
+             "b1276",
+             "b1241"]
+    growth_dict = {'b0720': {'b0720': 0.0,
+                             'b0724': 0.0,
+                             'b0726': 0.0,
+                             'b1241': 0.0,
+                             'b1276': 0.0,
+                             'b2935': 0.0,
+                             'b4025': 0.0},
+                   'b0724': {'b0720': 0.0,
+                             'b0724': 0.814,
+                             'b0726': 0.814,
+                             'b1241': 0.814,
+                             'b1276': 0.814,
+                             'b2935': 0.814,
+                             'b4025': 0.739},
+                   'b0726': {'b0720': 0.0,
+                             'b0724': 0.814,
+                             'b0726': 0.858,
+                             'b1241': 0.858,
+                             'b1276': 0.858,
+                             'b2935': 0.858,
+                             'b4025': 0.857},
+                   'b1241': {'b0720': 0.0,
+                             'b0724': 0.814,
+                             'b0726': 0.858,
+                             'b1241': 0.874,
+                             'b1276': 0.874,
+                             'b2935': 0.874,
+                             'b4025': 0.863},
+                   'b1276': {'b0720': 0.0,
+                             'b0724': 0.814,
+                             'b0726': 0.858,
+                             'b1241': 0.874,
+                             'b1276': 0.874,
+                             'b2935': 0.874,
+                             'b4025': 0.863},
+                   'b2935': {'b0720': 0.0,
+                             'b0724': 0.814,
+                             'b0726': 0.858,
+                             'b1241': 0.874,
+                             'b1276': 0.874,
+                             'b2935': 0.874,
+                             'b4025': 0.863},
+                   'b4025': {'b0720': 0.0,
+                             'b0724': 0.739,
+                             'b0726': 0.857,
+                             'b1241': 0.863,
+                             'b1276': 0.863,
+                             'b2935': 0.863,
+                             'b4025': 0.863}}
+    solution = double_gene_deletion(
+        model, gene_list1=genes, processes=3)['growth']
+    solution_one_process = double_gene_deletion(
+        model, gene_list1=genes, processes=1)['growth']
+    for (rxn_a, sub) in iteritems(growth_dict):
+        for rxn_b, growth in iteritems(sub):
+            sol = solution[frozenset([rxn_a, rxn_b])]
+            sol_one = solution_one_process[frozenset([rxn_a, rxn_b])]
+            assert round(sol, 3) == growth
+            assert round(sol_one, 3) == growth
+
+
+# Double reaction deletion
+def test_double_reaction_deletion_benchmark(large_model, benchmark):
+    """Benchmark double reaction deletion."""
+    reactions = large_model.reactions[1::100]
+    benchmark(double_reaction_deletion, large_model,
+              reaction_list1=reactions)
+
+
+def test_double_reaction_deletion(model):
+    """Test double reaction deletion."""
+    reactions = ['FBA', 'ATPS4r', 'ENO', 'FRUpts2']
+    growth_dict = {
+        "FBA": {
+            "ATPS4r": 0.135,
+            "ENO": float('nan'),
+            "FRUpts2": 0.704
+        },
+        "ATPS4r": {
+            "ENO": float('nan'),
+            "FRUpts2": 0.374
+        },
+        "ENO": {
+            "FRUpts2": 0.0
+        },
+    }
+
+    solution = double_reaction_deletion(
+        model, reaction_list1=reactions, processes=3)['growth']
+    solution_one_process = double_reaction_deletion(
+        model, reaction_list1=reactions, processes=1)['growth']
+    for (rxn_a, sub) in iteritems(growth_dict):
+        for rxn_b, growth in iteritems(sub):
+            sol = solution[frozenset([rxn_a, rxn_b])]
+            sol_one = solution_one_process[frozenset([rxn_a, rxn_b])]
+            if math.isnan(growth):
+                assert math.isnan(sol)
+                assert math.isnan(sol_one)
+            else:
+                assert round(sol, 3) == growth
+                assert round(sol_one, 3) == growth


=====================================
cobra/test/test_flux_analysis/test_gapfilling.py
=====================================
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of gapfilling."""
+
+from __future__ import absolute_import
+
+from cobra.core import Metabolite, Model, Reaction
+from cobra.flux_analysis.gapfilling import GapFiller, gapfill
+
+
+def test_gapfilling(salmonella):
+    """Test Gapfilling."""
+    m = Model()
+    m.add_metabolites([Metabolite(m_id) for m_id in ["a", "b", "c"]])
+    exa = Reaction("EX_a")
+    exa.add_metabolites({m.metabolites.a: 1})
+    b2c = Reaction("b2c")
+    b2c.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
+    dmc = Reaction("DM_c")
+    dmc.add_metabolites({m.metabolites.c: -1})
+    m.add_reactions([exa, b2c, dmc])
+    m.objective = 'DM_c'
+
+    universal = Model()
+    a2b = Reaction("a2b")
+    a2d = Reaction("a2d")
+    universal.add_reactions([a2b, a2d])
+    a2b.build_reaction_from_string("a --> b", verbose=False)
+    a2d.build_reaction_from_string("a --> d", verbose=False)
+
+    # # GrowMatch
+    # result = gapfilling.growMatch(m, universal)[0]
+    result = gapfill(m, universal)[0]
+    assert len(result) == 1
+    assert result[0].id == "a2b"
+
+    # # SMILEY
+    # result = gapfilling.SMILEY(m, "b", universal)[0]
+    with m:
+        m.objective = m.add_boundary(m.metabolites.b, type='demand')
+        result = gapfill(m, universal)[0]
+        assert len(result) == 1
+        assert result[0].id == "a2b"
+
+    # # 2 rounds of GrowMatch with exchange reactions
+    # result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
+    result = gapfill(m, None, exchange_reactions=True,
+                     iterations=2)
+    assert len(result) == 2
+    assert len(result[0]) == 1
+    assert len(result[1]) == 1
+    assert {i[0].id for i in result} == {"EX_b", "EX_c"}
+
+    # somewhat bigger model
+    universal = Model("universal_reactions")
+    with salmonella as model:
+        for i in [i.id for i in model.metabolites.f6p_c.reactions]:
+            reaction = model.reactions.get_by_id(i)
+            universal.add_reactions([reaction.copy()])
+            model.remove_reactions([reaction])
+        gf = GapFiller(model, universal,
+                       penalties={'TKT2': 1e3},
+                       demand_reactions=False)
+        solution = gf.fill()
+        assert 'TKT2' not in {r.id for r in solution[0]}
+        assert gf.validate(solution[0])


=====================================
cobra/test/test_flux_analysis/test_geometric.py
=====================================
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of Geometric FBA."""
+
+from __future__ import absolute_import
+
+import numpy as np
+from pandas import Series
+
+import pytest
+from cobra.core import Metabolite, Model, Reaction, Solution
+from cobra.flux_analysis import geometric_fba
+
+
+ at pytest.fixture(scope="module")
+def geometric_fba_model():
+    """
+    Generate geometric FBA model as described in [1]_
+
+    References
+    ----------
+    .. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).
+           Flux balance analysis: A geometric perspective.
+           Journal of theoretical biology.258. 311-5.
+           10.1016/j.jtbi.2009.01.027.
+
+    """
+    test_model = Model('geometric_fba_paper_model')
+
+    test_model.add_metabolites(Metabolite('A'))
+    test_model.add_metabolites(Metabolite('B'))
+
+    v_1 = Reaction('v1', upper_bound=1.0)
+    v_1.add_metabolites({test_model.metabolites.A: 1.0})
+
+    v_2 = Reaction('v2', lower_bound=-1000.0)
+    v_2.add_metabolites({test_model.metabolites.A: -1.0,
+                         test_model.metabolites.B: 1.0})
+
+    v_3 = Reaction('v3', lower_bound=-1000.0)
+    v_3.add_metabolites({test_model.metabolites.A: -1.0,
+                         test_model.metabolites.B: 1.0})
+
+    v_4 = Reaction('v4', lower_bound=-1000.0)
+    v_4.add_metabolites({test_model.metabolites.A: -1.0,
+                         test_model.metabolites.B: 1.0})
+
+    v_5 = Reaction('v5')
+    v_5.add_metabolites({test_model.metabolites.A: 0.0,
+                         test_model.metabolites.B: -1.0})
+
+    test_model.add_reactions([v_1, v_2, v_3, v_4, v_5])
+
+    test_model.objective = 'v5'
+
+    return test_model
+
+
+def test_geometric_fba_benchmark(model, benchmark, all_solvers):
+    """Benchmark geometric_fba."""
+    model.solver = all_solvers
+    benchmark(geometric_fba, model)
+
+
+def test_geometric_fba(geometric_fba_model, all_solvers):
+    """Test geometric_fba."""
+    geometric_fba_model.solver = all_solvers
+    geometric_fba_sol = geometric_fba(geometric_fba_model)
+    expected = Series({'v1': 1.0, 'v2': 0.33, 'v3': 0.33, 'v4': 0.33,
+                       'v5': 1.0}, index=['v1', 'v2', 'v3', 'v4', 'v5'])
+    assert np.allclose(geometric_fba_sol.fluxes, expected, atol=1E-02)


=====================================
cobra/test/test_flux_analysis/test_loopless.py
=====================================
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of loopless.py"""
+
+from __future__ import absolute_import
+
+from optlang.interface import INFEASIBLE, OPTIMAL
+
+import cobra.util.solver as sutil
+import pytest
+from cobra.core import Metabolite, Model, Reaction
+from cobra.flux_analysis.loopless import add_loopless, loopless_solution
+
+
+def construct_ll_test_model():
+    """Construct test model."""
+    test_model = Model()
+    test_model.add_metabolites(Metabolite("A"))
+    test_model.add_metabolites(Metabolite("B"))
+    test_model.add_metabolites(Metabolite("C"))
+    EX_A = Reaction("EX_A")
+    EX_A.add_metabolites({test_model.metabolites.A: 1})
+    DM_C = Reaction("DM_C")
+    DM_C.add_metabolites({test_model.metabolites.C: -1})
+    v1 = Reaction("v1")
+    v1.add_metabolites({test_model.metabolites.A: -1,
+                        test_model.metabolites.B: 1})
+    v2 = Reaction("v2")
+    v2.add_metabolites({test_model.metabolites.B: -1,
+                        test_model.metabolites.C: 1})
+    v3 = Reaction("v3")
+    v3.add_metabolites({test_model.metabolites.C: -1,
+                        test_model.metabolites.A: 1})
+    test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
+    DM_C.objective_coefficient = 1
+    return test_model
+
+
+ at pytest.fixture(scope="function", params=[s for s in ["glpk", "cplex",
+                                                      "gurobi"]
+                                          if s in sutil.solvers])
+def ll_test_model(request):
+    """Return test model set with different solvers."""
+    test_model = construct_ll_test_model()
+    test_model.solver = request.param
+    return test_model
+
+
+def test_loopless_benchmark_before(benchmark):
+    """Benchmark initial condition."""
+    test_model = construct_ll_test_model()
+
+    def _():
+        with test_model:
+            add_loopless(test_model)
+            test_model.optimize()
+
+    benchmark(_)
+
+
+def test_loopless_benchmark_after(benchmark):
+    """Benchmark final condition."""
+    test_model = construct_ll_test_model()
+    benchmark(loopless_solution, test_model)
+
+
+def test_loopless_solution(ll_test_model):
+    """Test loopless_solution()."""
+    solution_feasible = loopless_solution(ll_test_model)
+    ll_test_model.reactions.v3.lower_bound = 1
+    ll_test_model.optimize()
+    solution_infeasible = loopless_solution(ll_test_model)
+    assert solution_feasible.fluxes["v3"] == 0.0
+    assert solution_infeasible.fluxes["v3"] == 1.0
+
+
+def test_loopless_solution_fluxes(model):
+    """Test fluxes of loopless_solution()"""
+    fluxes = model.optimize().fluxes
+    ll_solution = loopless_solution(model, fluxes=fluxes)
+    assert len(ll_solution.fluxes) == len(model.reactions)
+
+
+def test_add_loopless(ll_test_model):
+    """Test add_loopless()."""
+    add_loopless(ll_test_model)
+    feasible_status = ll_test_model.optimize().status
+    ll_test_model.reactions.v3.lower_bound = 1
+    ll_test_model.slim_optimize()
+    infeasible_status = ll_test_model.solver.status
+    assert feasible_status == OPTIMAL
+    assert infeasible_status == INFEASIBLE


=====================================
cobra/test/test_flux_analysis/test_moma.py
=====================================
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of MOMA."""
+
+from __future__ import absolute_import
+
+import numpy as np
+
+import pytest
+from cobra.flux_analysis.moma import add_moma
+
+
+def test_moma_sanity(model, qp_solvers):
+    """Test optimization criterion and optimality for MOMA."""
+    model.solver = qp_solvers
+    sol = model.optimize()
+
+    with model:
+        model.reactions.PFK.knock_out()
+        knock_sol = model.optimize()
+        ssq = (knock_sol.fluxes - sol.fluxes).pow(2).sum()
+
+    with model:
+        add_moma(model, linear=False)
+        model.reactions.PFK.knock_out()
+        moma_sol = model.optimize()
+        moma_ssq = (moma_sol.fluxes - sol.fluxes).pow(2).sum()
+
+    # Use normal FBA as reference solution.
+    with model:
+        add_moma(model, solution=sol, linear=False)
+        model.reactions.PFK.knock_out()
+        moma_ref_sol = model.optimize()
+        moma_ref_ssq = (moma_ref_sol.fluxes - sol.fluxes).pow(2).sum()
+
+    assert np.isclose(moma_sol.objective_value, moma_ssq)
+    assert moma_ssq < ssq
+    assert np.isclose(moma_sol.objective_value,
+                      moma_ref_sol.objective_value)
+    assert np.isclose(moma_ssq, moma_ref_ssq)
+
+
+def test_linear_moma_sanity(model, all_solvers):
+    """Test optimization criterion and optimality for linear MOMA."""
+    model.solver = all_solvers
+    sol = model.optimize()
+
+    with model:
+        model.reactions.PFK.knock_out()
+        knock_sol = model.optimize()
+        sabs = (knock_sol.fluxes - sol.fluxes).abs().sum()
+
+    with model:
+        add_moma(model, linear=True)
+        model.reactions.PFK.knock_out()
+        moma_sol = model.optimize()
+        moma_sabs = (moma_sol.fluxes - sol.fluxes).abs().sum()
+
+    # Use normal FBA as reference solution.
+    with model:
+        add_moma(model, solution=sol, linear=True)
+        model.reactions.PFK.knock_out()
+        moma_ref_sol = model.optimize()
+        moma_ref_sabs = (moma_ref_sol.fluxes - sol.fluxes).abs().sum()
+
+    assert np.allclose(moma_sol.objective_value, moma_sabs)
+    assert moma_sabs < sabs
+    assert np.isclose(moma_sol.objective_value,
+                      moma_ref_sol.objective_value)
+    assert np.isclose(moma_sabs, moma_ref_sabs)
+
+    with model:
+        add_moma(model, linear=True)
+        with pytest.raises(ValueError):
+            add_moma(model)


=====================================
cobra/test/test_flux_analysis/test_parsimonious.py
=====================================
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of pFBA."""
+
+import warnings
+
+import numpy as np
+
+import pytest
+from cobra.exceptions import Infeasible
+from cobra.flux_analysis.parsimonious import add_pfba, pfba
+
+
+def test_pfba_benchmark(large_model, benchmark, all_solvers):
+    """Benchmark pFBA functionality."""
+    large_model.solver = all_solvers
+    benchmark(pfba, large_model)
+
+
+def test_pfba(model, all_solvers):
+    """Test pFBA functionality."""
+    model.solver = all_solvers
+    with model:
+        add_pfba(model)
+        with pytest.raises(ValueError):
+            add_pfba(model)
+
+    expression = model.objective.expression
+    n_constraints = len(model.constraints)
+    solution = pfba(model)
+    assert solution.status == "optimal"
+    assert np.isclose(solution.x_dict["Biomass_Ecoli_core"],
+                      0.8739, atol=1e-4, rtol=0.0)
+    abs_x = [abs(i) for i in solution.x]
+    assert np.isclose(sum(abs_x), 518.4221, atol=1e-4, rtol=0.0)
+    # test changes to model reverted
+    assert expression == model.objective.expression
+    assert len(model.constraints) == n_constraints
+
+    # needed?
+    # Test desired_objective_value
+    # desired_objective = 0.8
+    # pfba(model, solver=solver,
+    #                       desired_objective_value=desired_objective)
+    # abs_x = [abs(i) for i in model.solution.x]
+    # assert model.solution.status == "optimal"
+    # assert abs(model.solution.f - desired_objective) < 0.001
+    # assert abs(sum(abs_x) - 476.1594) < 0.001
+
+    # TODO: parametrize fraction (DRY it up)
+    # Test fraction_of_optimum
+    solution = pfba(model, fraction_of_optimum=0.95)
+    assert solution.status == "optimal"
+    assert np.isclose(solution.x_dict["Biomass_Ecoli_core"],
+                      0.95 * 0.8739, atol=1e-4, rtol=0.0)
+    abs_x = [abs(i) for i in solution.x]
+    assert np.isclose(sum(abs_x), 493.4400, atol=1e-4, rtol=0.0)
+
+    # Infeasible solution
+    model.reactions.ATPM.lower_bound = 500
+    with warnings.catch_warnings():
+        warnings.simplefilter("error", UserWarning)
+        with pytest.raises((UserWarning, Infeasible, ValueError)):
+            pfba(model)


=====================================
cobra/test/test_flux_analysis/test_phenotype_phase_plane.py
=====================================
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of Phenotype Phase Plane Analysis."""
+
+from __future__ import absolute_import
+
+import numpy as np
+
+import pytest
+from cobra.flux_analysis.phenotype_phase_plane import production_envelope
+
+
+def test_envelope_one(model):
+    """Test flux of production envelope."""
+    df = production_envelope(model, ["EX_o2_e"])
+    assert np.isclose(df["flux_maximum"].sum(), 9.342, atol=1e-3)
+
+
+def test_envelope_multi_reaction_objective(model):
+    """Test production of multiple objectives."""
+    obj = {model.reactions.EX_ac_e: 1,
+           model.reactions.EX_co2_e: 1}
+    with pytest.raises(ValueError):
+        production_envelope(model, "EX_o2_e", obj)
+
+
+ at pytest.mark.parametrize("variables, num", [
+    (["EX_glc__D_e"], 30),
+    (["EX_glc__D_e", "EX_o2_e"], 20),
+    (["EX_glc__D_e", "EX_o2_e", "EX_ac_e"], 10)
+])
+def test_multi_variable_envelope(model, variables, num):
+    """Test production of envelope (multiple variable)."""
+    df = production_envelope(model, variables, points=num)
+    assert len(df) == num ** len(variables)
+
+
+def test_envelope_two(model):
+    """Test production of envelope."""
+    df = production_envelope(model, ["EX_glc__D_e", "EX_o2_e"],
+                             objective="EX_ac_e")
+    assert np.isclose(df["flux_maximum"].sum(), 1737.466, atol=1e-3)
+    assert np.isclose(df["carbon_yield_maximum"].sum(), 83.579,
+                      atol=1e-3)
+    assert np.isclose(df["mass_yield_maximum"].sum(), 82.176,
+                      atol=1e-3)


=====================================
cobra/test/test_flux_analysis/test_reaction.py
=====================================
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+"""Test _assess functions in reaction.py"""
+
+from __future__ import absolute_import
+
+from cobra.core import Metabolite, Reaction
+from cobra.flux_analysis.reaction import assess
+
+
+def test_assess(model, all_solvers):
+    """Test assess functions."""
+    with model:
+        assert assess(model, model.reactions.GLCpts,
+                      solver=all_solvers) is True
+        pyr = model.metabolites.pyr_c
+        a = Metabolite('a')
+        b = Metabolite('b')
+        model.add_metabolites([a, b])
+        pyr_a2b = Reaction('pyr_a2b')
+        pyr_a2b.add_metabolites({pyr: -1, a: -1, b: 1})
+        model.add_reactions([pyr_a2b])
+        res = assess(model, pyr_a2b, 0.01, solver=all_solvers)
+        expected = {
+            'precursors': {a: {'required': 0.01, 'produced': 0.0}},
+            'products': {b: {'required': 0.01, 'capacity': 0.0}}}
+        assert res == expected


=====================================
cobra/test/test_flux_analysis/test_room.py
=====================================
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of ROOM."""
+
+from __future__ import absolute_import
+
+import numpy as np
+
+from cobra.flux_analysis.room import add_room
+
+
+def test_room_sanity(model, all_solvers):
+    """Test optimization criterion and optimality for ROOM."""
+    model.solver = all_solvers
+    sol = model.optimize()
+    with model:
+        model.reactions.PYK.knock_out()
+        knock_sol = model.optimize()
+
+    with model:
+        # Internally uses pFBA as reference solution.
+        add_room(model)
+        model.reactions.PYK.knock_out()
+        room_sol = model.optimize()
+
+    with model:
+        # Use FBA as reference solution.
+        add_room(model, solution=sol)
+        model.reactions.PYK.knock_out()
+        room_sol_ref = model.optimize()
+
+    flux_change = (sol.fluxes - knock_sol.fluxes).abs().sum()
+    flux_change_room = (sol.fluxes - room_sol.fluxes).abs().sum()
+    flux_change_room_ref = (sol.fluxes - room_sol_ref.fluxes).abs().sum()
+    # Expect the ROOM solution to have smaller flux changes in
+    # reactions compared to a normal FBA.
+    assert flux_change_room < flux_change or \
+        np.isclose(flux_change_room, flux_change, atol=1E-06)
+    # Expect the FBA-based reference to have less change in
+    # flux distribution.
+    assert flux_change_room_ref > flux_change_room or \
+        np.isclose(flux_change_room_ref, flux_change_room, atol=1E-06)
+
+
+def test_linear_room_sanity(model, all_solvers):
+    """Test optimization criterion and optimality for linear ROOM."""
+    model.solver = all_solvers
+    sol = model.optimize()
+    with model:
+        model.reactions.PYK.knock_out()
+        knock_sol = model.optimize()
+
+    with model:
+        # Internally uses pFBA as reference solution.
+        add_room(model, linear=True)
+        model.reactions.PYK.knock_out()
+        room_sol = model.optimize()
+
+    with model:
+        # Use FBA as reference solution.
+        add_room(model, solution=sol, linear=True)
+        model.reactions.PYK.knock_out()
+        room_sol_ref = model.optimize()
+
+    flux_change = (sol.fluxes - knock_sol.fluxes).abs().sum()
+    flux_change_room = (sol.fluxes - room_sol.fluxes).abs().sum()
+    flux_change_room_ref = (sol.fluxes - room_sol_ref.fluxes).abs().sum()
+    # Expect the ROOM solution to have smaller flux changes in
+    # reactions compared to a normal FBA.
+    assert flux_change_room < flux_change or \
+        np.isclose(flux_change_room, flux_change, atol=1E-06)
+    # Expect the FBA-based reference to have less change in
+    # flux distribution.
+    assert flux_change_room_ref > flux_change_room or \
+        np.isclose(flux_change_room_ref, flux_change_room, atol=1E-06)


=====================================
cobra/test/test_flux_analysis/test_sampling.py
=====================================
@@ -0,0 +1,226 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of flux sampling methods."""
+
+from __future__ import absolute_import
+
+import numpy as np
+
+import pytest
+from cobra.core import Metabolite, Model, Reaction
+from cobra.flux_analysis.parsimonious import pfba
+from cobra.flux_analysis.sampling import ACHRSampler, OptGPSampler, sample
+
+
+ at pytest.fixture(scope="function")
+def achr(model):
+    """Return ACHRSampler instance for tests."""
+    sampler = ACHRSampler(model, thinning=1)
+    assert ((sampler.n_warmup > 0) and
+            (sampler.n_warmup <= 2 * len(model.variables)))
+    assert all(sampler.validate(sampler.warmup) == "v")
+    return sampler
+
+
+ at pytest.fixture(scope="function")
+def optgp(model):
+    """Return OptGPSampler instance for tests."""
+    sampler = OptGPSampler(model, processes=1, thinning=1)
+    assert ((sampler.n_warmup > 0) and
+            (sampler.n_warmup <= 2 * len(model.variables)))
+    assert all(sampler.validate(sampler.warmup) == "v")
+    return sampler
+
+
+# Benchmarks
+def test_achr_init_benchmark(model, benchmark):
+    """Benchmark inital ACHR sampling."""
+    benchmark(lambda: ACHRSampler(model))
+
+
+def test_optgp_init_benchmark(model, benchmark):
+    """Benchmark inital OptGP sampling."""
+    benchmark(lambda: OptGPSampler(model, processes=2))
+
+
+def test_achr_sample_benchmark(achr, benchmark):
+    """Benchmark ACHR sampling."""
+    benchmark(achr.sample, 1)
+
+
+def test_optgp_sample_benchmark(optgp, benchmark):
+    """Benchmark OptGP sampling."""
+    benchmark(optgp.sample, 1)
+
+
+# Tests
+def test_single_achr(model):
+    """Test ACHR sampling (one sample)."""
+    s = sample(model, 10, method="achr")
+    assert s.shape == (10, len(model.reactions))
+
+
+def test_single_optgp(model):
+    """Test OptGP sampling (one sample)."""
+    s = sample(model, 10, processes=1)
+    assert s.shape == (10, len(model.reactions))
+
+
+def test_multi_optgp(model):
+    """Test OptGP sampling (multi sample)."""
+    s = sample(model, 10, processes=2)
+    assert s.shape == (10, len(model.reactions))
+
+
+def test_wrong_method(model):
+    """Test method intake sanity."""
+    with pytest.raises(ValueError):
+        sample(model, 1, method="schwupdiwupp")
+
+
+def test_validate_wrong_sample(achr, model):
+    """Test sample correctness."""
+    s = achr.sample(10)
+    s["hello"] = 1
+    with pytest.raises(ValueError):
+        achr.validate(s)
+
+
+def test_fixed_seed(model):
+    """Test result of fixed seed for sampling."""
+    s = sample(model, 1, seed=42)
+    assert np.allclose(s.TPI[0], 9.12037487)
+
+
+def test_equality_constraint(model):
+    """Test equality constraint."""
+    model.reactions.ACALD.bounds = (-1.5, -1.5)
+    s = sample(model, 10)
+    assert np.allclose(s.ACALD, -1.5, atol=1e-6, rtol=0)
+    s = sample(model, 10, method="achr")
+    assert np.allclose(s.ACALD, -1.5, atol=1e-6, rtol=0)
+
+
+def test_inequality_constraint(model):
+    """Test inequality constraint."""
+    co = model.problem.Constraint(
+        model.reactions.ACALD.flux_expression, lb=-0.5)
+    model.add_cons_vars(co)
+    s = sample(model, 10)
+    assert all(s.ACALD > -0.5 - 1e-6)
+    s = sample(model, 10, method="achr")
+    assert all(s.ACALD > -0.5 - 1e-6)
+
+
+def test_sampling(achr, optgp):
+    """Test sampling."""
+    s = achr.sample(10)
+    assert all(achr.validate(s) == "v")
+
+    s = optgp.sample(10)
+    assert all(optgp.validate(s) == "v")
+
+
+def test_batch_sampling(achr, optgp):
+    """Test batch sampling."""
+    for b in achr.batch(5, 4):
+        assert all(achr.validate(b) == "v")
+
+    for b in optgp.batch(5, 4):
+        assert all(optgp.validate(b) == "v")
+
+
+def test_variables_samples(achr, optgp):
+    """Test variable samples."""
+    vnames = np.array([v.name for v in achr.model.variables])
+    s = achr.sample(10, fluxes=False)
+    assert s.shape == (10, achr.warmup.shape[1])
+    assert (s.columns == vnames).all()
+    assert (achr.validate(s) == "v").all()
+
+    s = optgp.sample(10, fluxes=False)
+    assert s.shape == (10, optgp.warmup.shape[1])
+    assert (s.columns == vnames).all()
+    assert (optgp.validate(s) == "v").all()
+
+
+def test_inhomogeneous_sanity(model):
+    """Test whether inhomogeneous sampling gives approximately the same
+    standard deviation as a homogeneous version."""
+    model.reactions.ACALD.bounds = (-1.5, -1.5)
+    s_inhom = sample(model, 64)
+
+    model.reactions.ACALD.bounds = (-1.5 - 1e-3, -1.5 + 1e-3)
+    s_hom = sample(model, 64)
+
+    relative_diff = (s_inhom.std() + 1e-12) / (s_hom.std() + 1e-12)
+    assert 0.5 < relative_diff.abs().mean() < 2
+
+    model.reactions.ACALD.bounds = (-1.5, -1.5)
+    s_inhom = sample(model, 64, method="achr")
+
+    model.reactions.ACALD.bounds = (-1.5 - 1e-3, -1.5 + 1e-3)
+    s_hom = sample(model, 64, method="achr")
+
+    relative_diff = (s_inhom.std() + 1e-12) / (s_hom.std() + 1e-12)
+    assert 0.5 < relative_diff.abs().mean() < 2
+
+
+def test_reproject(optgp):
+    """Test reprojection of sampling."""
+    s = optgp.sample(10, fluxes=False).values
+    proj = np.apply_along_axis(optgp._reproject, 1, s)
+    assert all(optgp.validate(proj) == "v")
+
+    s = np.random.rand(10, optgp.warmup.shape[1])
+    proj = np.apply_along_axis(optgp._reproject, 1, s)
+    assert all(optgp.validate(proj) == "v")
+
+
+def test_complicated_model():
+    """Test a complicated model.
+
+    Difficult model since the online mean calculation is numerically
+    unstable so many samples weakly violate the equality constraints.
+
+    """
+    model = Model('flux_split')
+
+    reaction1 = Reaction('V1')
+    reaction2 = Reaction('V2')
+    reaction3 = Reaction('V3')
+    reaction1.bounds = (0, 6)
+    reaction2.bounds = (0, 8)
+    reaction3.bounds = (0, 10)
+
+    A = Metabolite('A')
+
+    reaction1.add_metabolites({A: -1})
+    reaction2.add_metabolites({A: -1})
+    reaction3.add_metabolites({A: 1})
+
+    model.add_reactions([reaction1, reaction2, reaction3])
+
+    optgp = OptGPSampler(model, 1, seed=42)
+    achr = ACHRSampler(model, seed=42)
+
+    optgp_samples = optgp.sample(100)
+    achr_samples = achr.sample(100)
+
+    assert any(optgp_samples.corr().abs() < 1.0)
+    assert any(achr_samples.corr().abs() < 1.0)
+    # > 95% are valid
+    assert sum(optgp.validate(optgp_samples) == "v") > 95
+    assert sum(achr.validate(achr_samples) == "v") > 95
+
+
+def test_single_point_space(model):
+    """Test the reduction of the sampling space to one point."""
+    pfba_sol = pfba(model)
+    pfba_const = model.problem.Constraint(
+        sum(model.variables), ub=pfba_sol.objective_value)
+    model.add_cons_vars(pfba_const)
+    model.reactions.Biomass_Ecoli_core.lower_bound = \
+        pfba_sol.fluxes.Biomass_Ecoli_core
+    with pytest.raises(ValueError):
+        s = sample(model, 1)


=====================================
cobra/test/test_flux_analysis/test_summary.py
=====================================
@@ -0,0 +1,214 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of summary methods."""
+
+from __future__ import absolute_import
+
+import re
+import sys
+from contextlib import contextmanager
+
+from six import StringIO
+
+import pytest
+from cobra.flux_analysis.parsimonious import pfba
+
+
+ at contextmanager
+def captured_output():
+    """A context manager to test the IO summary methods."""
+    new_out, new_err = StringIO(), StringIO()
+    old_out, old_err = sys.stdout, sys.stderr
+    try:
+        sys.stdout, sys.stderr = new_out, new_err
+        yield sys.stdout, sys.stderr
+
+    finally:
+        sys.stdout, sys.stderr = old_out, old_err
+
+
+def check_line(output, expected_entries,
+               pattern=re.compile(r"\s")):
+    """Ensure each expected entry is in the output."""
+    output_set = set(
+        pattern.sub("", line) for line in output.splitlines())
+    for elem in expected_entries:
+        assert pattern.sub("", elem) in output_set
+
+
+def check_in_line(output, expected_entries,
+                  pattern=re.compile(r"\s")):
+    """Ensure each expected entry is contained in the output."""
+    output_strip = [pattern.sub("", line) for line in
+                    output.splitlines()]
+    for elem in expected_entries:
+        assert any(
+            pattern.sub("", elem) in line for line in output_strip), \
+            "Not found: {} in:\n{}".format(pattern.sub("", elem),
+                                           "\n".join(output_strip))
+
+
+ at pytest.mark.parametrize("names", [False, True])
+def test_model_summary_previous_solution(model, opt_solver, names):
+    """Test summary of previous solution."""
+    model.solver = opt_solver
+    solution = model.optimize()
+    rxn_test = model.exchanges[0]
+    if names:
+        met_test = list(rxn_test.metabolites.keys())[0].name
+    else:
+        met_test = list(rxn_test.metabolites.keys())[0].id
+
+    solution.fluxes[rxn_test.id] = 321
+
+    with captured_output() as (out, _):
+        model.summary(solution, names=names)
+    check_in_line(out.getvalue(), [met_test + '321'])
+
+
+ at pytest.mark.parametrize("names", [False, True])
+def test_model_summary(model, opt_solver, names):
+    """Test model summary."""
+    model.solver = opt_solver
+    # test non-fva version (these should be fixed for textbook model)
+    if names:
+        expected_entries = [
+            'O2      21.8',
+            'D-Glucose  10',
+            'Ammonium      4.77',
+            'Phosphate       3.21',
+            'H2O  29.2',
+            'CO2  22.8',
+            'H+    17.5',
+            'Biomass_Ecol...  0.874',
+        ]
+    else:
+        expected_entries = [
+            'o2_e      21.8',
+            'glc__D_e  10',
+            'nh4_e      4.77',
+            'pi_e       3.21',
+            'h2o_e  29.2',
+            'co2_e  22.8',
+            'h_e    17.5',
+            'Biomass_Ecol...  0.874',
+        ]
+    # Need to use a different method here because
+    # there are multiple entries per line.
+    model.optimize()
+    with captured_output() as (out, _):
+        model.summary(names=names)
+    check_in_line(out.getvalue(), expected_entries)
+
+    # with model:
+    #     model.objective = model.exchanges[0]
+    #     model.summary()
+
+
+ at pytest.mark.parametrize("fraction", [0.95])
+def test_model_summary_with_fva(model, opt_solver, fraction):
+    """Test model summary (using FVA)."""
+    if opt_solver == "optlang-gurobi":
+        pytest.xfail("FVA currently buggy")
+    # test non-fva version (these should be fixed for textbook model
+    expected_entries = [
+        'idFluxRangeidFluxRangeBiomass_Ecol...0.874',
+        'o2_e       21.8   [19.9, 23.7]'
+        'h2o_e       29.2  [25, 30.7]',
+        'glc__D_e   10     [9.52, 10]'
+        'co2_e       22.8  [18.9, 24.7]',
+        'nh4_e       4.77  [4.53, 5.16]'
+        'h_e         17.5  [16.7, 22.4]',
+        'pi_e        3.21  [3.05, 3.21]'
+        'for_e        0    [0, 5.72]',
+        'ac_e         0    [0, 1.91]',
+        'pyr_e        0    [0, 1.27]',
+        'lac__D_e     0    [0, 1.07]',
+        'succ_e       0    [0, 0.837]',
+        'glu__L_e     0    [0, 0.636]',
+        'akg_e        0    [0, 0.715]',
+        'etoh_e       0    [0, 1.11]',
+        'acald_e      0    [0, 1.27]',
+    ]
+    # Need to use a different method here because
+    # there are multiple entries per line.
+    model.solver = opt_solver
+    solution = model.optimize()
+    with captured_output() as (out, _):
+        model.summary(solution, fva=fraction)
+    check_in_line(out.getvalue(), expected_entries)
+
+
+ at pytest.mark.parametrize("met", ["q8_c"])
+def test_metabolite_summary_previous_solution(
+        model, opt_solver, met):
+    """Test metabolite summary of previous solution."""
+    model.solver = opt_solver
+    solution = pfba(model)
+    model.metabolites.get_by_id(met).summary(solution)
+
+
+ at pytest.mark.parametrize("met, names", [
+    ("q8_c", False),
+    ("q8_c", True)
+])
+def test_metabolite_summary(model, opt_solver, met, names):
+    """Test metabolite summary."""
+    model.solver = opt_solver
+    model.optimize()
+    with captured_output() as (out, _):
+        model.metabolites.get_by_id(met).summary(names=names)
+
+    if names:
+        expected_entries = [
+            'PRODUCING REACTIONS -- Ubiquinone-8 (q8_c)',
+            '%       FLUX  RXN ID      REACTION',
+            '100%   43.6   cytochr...  2.0 H+ + 0.5 O2 + Ubiquinol-8 --> '
+            'H2O + 2.0 H+ ...',
+            'CONSUMING REACTIONS -- Ubiquinone-8 (q8_c)',
+            '%       FLUX  RXN ID      REACTION',
+            '88%    38.5   NADH de...  4.0 H+ + Nicotinamide adenine '
+            'dinucleotide - re...',
+            '12%     5.06  succina...  Ubiquinone-8 + Succinate --> '
+            'Fumarate + Ubiquin...'
+        ]
+    else:
+        expected_entries = [
+            'PRODUCING REACTIONS -- Ubiquinone-8 (q8_c)',
+            '%       FLUX  RXN ID    REACTION',
+            '100%   43.6   CYTBD     '
+            '2.0 h_c + 0.5 o2_c + q8h2_c --> h2o_c + 2.0 h_e...',
+            'CONSUMING REACTIONS -- Ubiquinone-8 (q8_c)',
+            '%       FLUX  RXN ID    REACTION',
+            '88%    38.5   NADH16    '
+            '4.0 h_c + nadh_c + q8_c --> 3.0 h_e + nad_c + q...',
+            '12%     5.06  SUCDi     q8_c + succ_c --> fum_c + q8h2_c',
+        ]
+
+    check_in_line(out.getvalue(), expected_entries)
+
+
+ at pytest.mark.parametrize("fraction, met", [(0.99, "fdp_c")])
+def test_metabolite_summary_with_fva(model, opt_solver, fraction,
+                                     met):
+    """Test metabolite summary (using FVA)."""
+    #     pytest.xfail("FVA currently buggy")
+
+    model.solver = opt_solver
+    model.optimize()
+    with captured_output() as (out, _):
+        model.metabolites.get_by_id(met).summary(fva=fraction)
+
+    expected_entries = [
+        'PRODUCING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
+        '%       FLUX  RANGE         RXN ID    REACTION',
+        '100%    7.48  [6.17, 9.26]  PFK       '
+        'atp_c + f6p_c --> adp_c + fdp_c + h_c',
+        'CONSUMING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
+        '%       FLUX  RANGE         RXN ID    REACTION',
+        '100%    7.48  [6.17, 8.92]  FBA       fdp_c <=> dhap_c + g3p_c',
+        '0%      0     [0, 1.72]     FBP       '
+        'fdp_c + h2o_c --> f6p_c + pi_c',
+    ]
+
+    check_line(out.getvalue(), expected_entries)


=====================================
cobra/test/test_flux_analysis/test_variability.py
=====================================
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+"""Test functionalities of Flux Variability Analysis."""
+
+from __future__ import absolute_import
+
+import numpy as np
+from six import iteritems
+
+import pytest
+from cobra.exceptions import Infeasible
+from cobra.flux_analysis.variability import (
+    find_blocked_reactions, find_essential_genes, find_essential_reactions,
+    flux_variability_analysis)
+
+
+# FVA
+def test_flux_variability_benchmark(large_model, benchmark,
+                                    all_solvers):
+    """Benchmark FVA."""
+    large_model.solver = all_solvers
+    benchmark(flux_variability_analysis, large_model,
+              reaction_list=large_model.reactions[1::3])
+
+
+def test_pfba_flux_variability(model, pfba_fva_results,
+                               fva_results, all_solvers):
+    """Test FVA using pFBA."""
+    model.solver = all_solvers
+    with pytest.warns(UserWarning):
+        flux_variability_analysis(
+            model, pfba_factor=0.1, reaction_list=model.reactions[1::3])
+    fva_out = flux_variability_analysis(
+        model, pfba_factor=1.1, reaction_list=model.reactions)
+    for name, result in iteritems(fva_out.T):
+        for k, v in iteritems(result):
+            assert abs(pfba_fva_results[k][name] - v) < 0.00001
+            assert abs(pfba_fva_results[k][name]) <= abs(
+                fva_results[k][name])
+    loop_reactions = [model.reactions.get_by_id(rid)
+                      for rid in ("FRD7", "SUCDi")]
+    fva_loopless = flux_variability_analysis(
+        model, pfba_factor=1.1, reaction_list=loop_reactions,
+        loopless=True)
+    assert np.allclose(fva_loopless["maximum"],
+                       fva_loopless["minimum"])
+
+
+def test_flux_variability(model, fva_results, all_solvers):
+    """Test FVA."""
+    model.solver = all_solvers
+    fva_out = flux_variability_analysis(
+        model, reaction_list=model.reactions)
+    for name, result in iteritems(fva_out.T):
+        for k, v in iteritems(result):
+            assert abs(fva_results[k][name] - v) < 0.00001
+
+
+# Loopless FVA
+def test_flux_variability_loopless_benchmark(model, benchmark,
+                                             all_solvers):
+    """Benchmark loopless FVA."""
+    model.solver = all_solvers
+    benchmark(flux_variability_analysis, model, loopless=True,
+              reaction_list=model.reactions[1::3])
+
+
+def test_flux_variability_loopless(model, all_solvers):
+    """Test loopless FVA."""
+    model.solver = all_solvers
+    loop_reactions = [model.reactions.get_by_id(rid)
+                      for rid in ("FRD7", "SUCDi")]
+    fva_normal = flux_variability_analysis(
+        model, reaction_list=loop_reactions)
+    fva_loopless = flux_variability_analysis(
+        model, reaction_list=loop_reactions, loopless=True)
+
+    assert not np.allclose(fva_normal["maximum"],
+                           fva_normal["minimum"])
+    assert np.allclose(fva_loopless["maximum"],
+                       fva_loopless["minimum"])
+
+
+# Internals (essentiality, blocked reactions)
+def test_fva_data_frame(model):
+    """Test DataFrame obtained from FVA."""
+    df = flux_variability_analysis(model)
+    assert np.all([df.columns.values == ['minimum', 'maximum']])
+
+
+def test_fva_infeasible(model):
+    """Test FVA infeasibility."""
+    infeasible_model = model.copy()
+    infeasible_model.reactions.get_by_id("EX_glc__D_e").lower_bound = 0
+    # ensure that an infeasible model does not run FVA
+    with pytest.raises(Infeasible):
+        flux_variability_analysis(infeasible_model)
+
+
+def test_fva_minimization(model):
+    """Test minimization using FVA."""
+    model.objective = model.reactions.EX_glc__D_e
+    model.objective_direction = 'min'
+    solution = flux_variability_analysis(model, fraction_of_optimum=.95)
+    assert solution.at['EX_glc__D_e', 'minimum'] == -10.0
+    assert solution.at['EX_glc__D_e', 'maximum'] == -9.5
+
+
+def test_find_blocked_reactions_solver_none(model):
+    """Test find_blocked_reactions() [no specific solver]."""
+    result = find_blocked_reactions(model, model.reactions[40:46])
+    assert result == ['FRUpts2']
+
+
+def test_essential_genes(model):
+    """Test find_essential_genes()."""
+    essential_genes = {'b2779', 'b1779', 'b0720', 'b2416',
+                       'b2926', 'b1136', 'b2415'}
+    observed_essential_genes = {g.id for g in
+                                find_essential_genes(model)}
+    assert observed_essential_genes == essential_genes
+
+
+def test_essential_reactions(model):
+    """Test find_blocked_reactions()."""
+    essential_reactions = {'GLNS', 'Biomass_Ecoli_core', 'PIt2r',
+                           'GAPD',
+                           'ACONTb', 'EX_nh4_e', 'ENO', 'EX_h_e',
+                           'EX_glc__D_e', 'ICDHyr', 'CS', 'NH4t',
+                           'GLCpts',
+                           'PGM', 'EX_pi_e', 'PGK', 'RPI', 'ACONTa'}
+    observed_essential_reactions = {r.id for r in
+                                    find_essential_reactions(model)}
+    assert observed_essential_reactions == essential_reactions
+
+
+def test_find_blocked_reactions(model, all_solvers):
+    """Test find_blocked_reactions()."""
+    model.solver = all_solvers
+    result = find_blocked_reactions(model, model.reactions[40:46])
+    assert result == ['FRUpts2']
+
+    result = find_blocked_reactions(model, model.reactions[42:48])
+    assert set(result) == {'FUMt2_2', 'FRUpts2'}
+
+    result = find_blocked_reactions(model, model.reactions[30:50],
+                                    open_exchanges=True)
+    assert result == []


=====================================
debian/changelog
=====================================
@@ -1,9 +1,10 @@
-python-cobra (0.13.3-1) UNRELEASED; urgency=medium
+python-cobra (0.13.4-1) unstable; urgency=medium
 
   * Team upload.
   
   [ Afif Elghraoui ]
   * New upstream version
+    Closes: #904709
   * Update dependencies
   * Refresh patch
   * Drop obsolete patch
@@ -12,17 +13,19 @@ python-cobra (0.13.3-1) UNRELEASED; urgency=medium
   * Standards-Version: 4.2.1
   * Point Vcs-fields to Salsa
   * debhelper 11
+  * cme fix dpkg-control
   * Build-Depends: python*-requests, python*-pip
   * Drop failing test TestCobraFluxSampling.test_fixed_seed
   * Rename debian/tests/control to control.autodep8
   * Testsuite: autopkgtest-pkg-python
   * Drop Python2 package since it has no reverse dependencies
+    (as well as autopkgtest of Python2 version)
   * Build-Depends: python3-depinfo, python3-pipdeptree
   * Drop failing test: test_show_versions (needs further investigation
     with upstream)
-  TODO: Wait until ftpmaster accepted python3-depinfo
+  * Silence lintian about wrong Python version of data file
 
- -- Andreas Tille <tille at debian.org>  Sat, 28 Apr 2018 22:36:04 +0200
+ -- Andreas Tille <tille at debian.org>  Wed, 17 Oct 2018 21:31:48 +0200
 
 python-cobra (0.5.9-1) unstable; urgency=medium
 


=====================================
debian/control
=====================================
@@ -4,29 +4,28 @@ Uploaders: Afif Elghraoui <afif at debian.org>
 Section: python
 Testsuite: autopkgtest-pkg-python
 Priority: optional
-Build-Depends:
-	debhelper (>= 11~),
-	dh-python,
-	libglpk-dev,
-	python3-all,
-	python3-all-dev,
-	python3-setuptools,
-	python3-depinfo,
-	python3-future,
-	python3-numpy (>= 1.6),
-	python3-optlang,,
-	python3-ruamel.yaml,
-	python3-pandas (>= 0.17.0),
-	python3-pip,
-	python3-pipdeptree,
-	python3-requests,
-	python3-swiglpk,
-	python3-tabulate,
-	cython3,
+Build-Depends: debhelper (>= 11~),
+               dh-python,
+               libglpk-dev,
+               python3-all,
+               python3-all-dev,
+               python3-setuptools,
+               python3-depinfo,
+               python3-future,
+               python3-numpy,
+               python3-optlang,
+               python3-ruamel.yaml,
+               python3-pandas (>= 0.17.0),
+               python3-pip,
+               python3-pipdeptree,
+               python3-requests,
+               python3-swiglpk,
+               python3-tabulate,
+               cython3,
 # Test-Depends:
-	python3-pytest,
-	python3-pytest-benchmark,
-	python3-jsonschema (>> 2.5.0)
+               python3-pytest,
+               python3-pytest-benchmark,
+               python3-jsonschema (>> 2.5.0)
 Standards-Version: 4.2.1
 Vcs-Browser: https://salsa.debian.org/med-team/python-cobra
 Vcs-Git: https://salsa.debian.org/med-team/python-cobra.git
@@ -34,14 +33,12 @@ Homepage: http://opencobra.github.io/cobrapy/
 
 Package: python3-cobra
 Architecture: any
-Depends:
-	${shlibs:Depends},
-	${misc:Depends},
-	${python3:Depends},
-	python-cobra-data (= ${source:Version}),
-Suggests:
-	python3-matplotlib,
-	qsopt-ex,
+Depends: ${shlibs:Depends},
+         ${misc:Depends},
+         ${python3:Depends},
+         python-cobra-data (= ${source:Version})
+Suggests: python3-matplotlib,
+          qsopt-ex
 Description: constraint-based modeling of biological networks with Python 3
  COnstraint-Based Reconstruction and Analysis (COBRA) methods are widely
  used for genome-scale modeling of metabolic networks in both prokaryotes


=====================================
debian/patches/drop_failing_test.patch deleted
=====================================
@@ -1,19 +0,0 @@
-Description: Drop failing test TestCobraFluxSampling.test_fixed_seed
-Author: Andreas Tille <tille at debian.org>
-Last-Update: Sat, 28 Apr 2018 22:36:04 +0200
-
---- a/cobra/test/test_flux_analysis.py
-+++ b/cobra/test/test_flux_analysis.py
-@@ -1025,9 +1025,9 @@ class TestCobraFluxSampling:
-         with pytest.raises(ValueError):
-             self.achr.validate(s)
- 
--    def test_fixed_seed(self, model):
--        s = sample(model, 1, seed=42)
--        assert numpy.allclose(s.TPI[0], 9.12037487)
-+#    def test_fixed_seed(self, model):
-+#        s = sample(model, 1, seed=42)
-+#        assert numpy.allclose(s.TPI[0], 9.12037487)
- 
-     def test_equality_constraint(self, model):
-         model.reactions.ACALD.bounds = (-1.5, -1.5)


=====================================
debian/patches/series
=====================================
@@ -1,3 +1,2 @@
 mathjax.patch
-drop_failing_test.patch
 drop_failing_test2.patch


=====================================
debian/patches/testsuite-check-jsonschema.patch deleted
=====================================
@@ -1,29 +0,0 @@
-Description: Disable jsonschema requirement for test-suite
- The python{3}-jsonschema packages are not yet available in Debian unstable
- for the version required by this package. The test suite is not appropriately
- configured to skip the tests that require this package. The patches here
- fix that.
-Author: Afif Elghraoui <afif at ghraoui.name>
-Forwarded: no
-Last-Update: 2015-08-10
---- python-cobra.orig/cobra/test/io_tests.py
-+++ python-cobra/cobra/test/io_tests.py
-@@ -164,7 +164,7 @@
-         # MAT does not store gene names
-         None
- 
--
-+ at skipIf(not jsonschema, "jsonschema required")
- class TestCobraIOjson(TestCase, TestCobraIO):
-     def setUp(self):
-         self.test_model = mini_model
---- python-cobra.orig/setup.py
-+++ python-cobra/setup.py
-@@ -164,7 +164,6 @@
-     packages=find_packages(exclude=['cobra.oven', 'cobra.oven*']),
-     setup_requires=[],
-     install_requires=["six"],
--    tests_require=["jsonschema > 2.5"],
-     extras_require=extras,
-     ext_modules=ext_modules,
- 


=====================================
debian/python3-cobra.lintian-overrides
=====================================
@@ -0,0 +1,2 @@
+# The data file is for both Python versions
+python3-cobra: python-package-depends-on-package-from-other-python-variant Depends: python-cobra-data


=====================================
debian/tests/Makefile
=====================================
@@ -1,9 +1,8 @@
 
 .ONESHELL:
 
-all: py2-test py3-test
+all: py3-test
 
-py2-test: SHELL=python2
 py3-test: SHELL=python3
 
 %-test:


=====================================
debian/tests/control.autodep8
=====================================
@@ -1,19 +1,3 @@
-Test-Command:
-	export MAKEFILES="$PWD/debian/tests/Makefile";
-	cd $ADTTMP && make py2-test
-Depends:
-	make,
-	python-cobra,
-	python-pytest,
-	python-jsonschema,
-# We could have avoided retyping these by adding the "needs-recommends" restriction, but doing that would also cause matplotlib to be pulled in.
-# The tests involving matplotlib fail on debci from trying to do graphical operations on a headless server.
-	python-sbml,
-	python-scipy,
-	python-numpy,
-	python-pandas,
-Restrictions: allow-stderr
-
 Test-Command:
 	export MAKEFILES="$PWD/debian/tests/Makefile";
 	cd $ADTTMP && make py3-test
@@ -26,4 +10,6 @@ Depends:
 	python3-scipy,
 	python3-numpy,
 	python3-pandas,
+	# Python2 test also contained python-sbml.  Once python3-sbml might exist this should be probably added here
+	# python3-sbml
 Restrictions: allow-stderr


=====================================
release-notes/0.13.4.md
=====================================
@@ -0,0 +1,7 @@
+# Release notes for cobrapy 0.13.4
+
+## Fixes
+
+* Internal re-organization of the test suite.
+* Upgrade the `ruamel.yaml` version making it compatible with Python 3.7.
+* Fix a bug with a regular expression in the Matlab interface.


=====================================
setup.cfg
=====================================
@@ -1,5 +1,5 @@
 [bumpversion]
-current_version = 0.13.3
+current_version = 0.13.4
 commit = True
 tag = True
 parse = (?P<major>\d+)


=====================================
setup.py
=====================================
@@ -37,14 +37,14 @@ except IOError:
 if __name__ == "__main__":
     setup(
         name="cobra",
-        version="0.13.3",
+        version="0.13.4",
         packages=find_packages(),
         setup_requires=setup_requirements,
         install_requires=[
             "six",
             "future",
             "swiglpk",
-            "ruamel.yaml<0.15",
+            "ruamel.yaml>=0.15",
             "numpy>=1.13",
             "pandas>=0.17.0",
             "optlang>=1.4.2",
@@ -88,6 +88,7 @@ if __name__ == "__main__":
             'Programming Language :: Python :: 3.4',
             'Programming Language :: Python :: 3.5',
             'Programming Language :: Python :: 3.6',
+            'Programming Language :: Python :: 3.7',
             'Programming Language :: Python :: Implementation :: CPython',
             'Topic :: Scientific/Engineering',
             'Topic :: Scientific/Engineering :: Bio-Informatics'


=====================================
tox.ini
=====================================
@@ -1,5 +1,5 @@
 [tox]
-envlist = pep8, py27, py34, py35, py36, sbml, array
+envlist = pep8, py27, py34, py35, py36, py37, sbml, array
 
 [testenv]
 passenv =



View it on GitLab: https://salsa.debian.org/med-team/python-cobra/compare/fff89040cdb873308a398a97bc8a778b455ced12...11d80fce5d66886597adffefca19de87bde01fd2

-- 
View it on GitLab: https://salsa.debian.org/med-team/python-cobra/compare/fff89040cdb873308a398a97bc8a778b455ced12...11d80fce5d66886597adffefca19de87bde01fd2
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20181017/974003d7/attachment-0001.html>


More information about the debian-med-commit mailing list