[med-svn] [Git][med-team/python-cobra][upstream] New upstream version 0.11.3

Andreas Tille gitlab at salsa.debian.org
Sat Apr 28 22:14:46 BST 2018


Andreas Tille pushed to branch upstream at Debian Med / python-cobra


Commits:
e3e44933 by Andreas Tille at 2018-04-28T22:35:51+02:00
New upstream version 0.11.3
- - - - -


16 changed files:

- cobra/__init__.py
- cobra/core/reaction.py
- cobra/flux_analysis/parsimonious.py
- cobra/flux_analysis/sampling.py
- cobra/flux_analysis/variability.py
- cobra/test/test_flux_analysis.py
- cobra/test/test_solver_model.py
- cobra/util/solver.py
- documentation_builder/constraints_objectives.ipynb
- documentation_builder/sampling.ipynb
- + release-notes/0.11.1.md
- + release-notes/0.11.2.md
- + release-notes/0.11.3.md
- release-notes/next-release.md
- setup.cfg
- setup.py


Changes:

=====================================
cobra/__init__.py
=====================================
--- a/cobra/__init__.py
+++ b/cobra/__init__.py
@@ -13,7 +13,7 @@ from cobra.core import (
     DictList, Gene, Metabolite, Model, Object, Reaction, Species)
 from cobra.util.version_info import show_versions
 
-__version__ = "0.11.0"
+__version__ = "0.11.3"
 
 # set the warning format to be prettier and fit on one line
 _cobra_path = _dirname(_abspath(__file__))


=====================================
cobra/core/reaction.py
=====================================
--- a/cobra/core/reaction.py
+++ b/cobra/core/reaction.py
@@ -420,7 +420,7 @@ class Reaction(Object):
             if g not in self._genes:  # if an old gene is not a new gene
                 try:
                     g._reaction.remove(self)
-                except:
+                except KeyError:
                     warn("could not remove old gene %s from reaction %s" %
                          (g.id, self.id))
 
@@ -618,6 +618,7 @@ class Reaction(Object):
     __radd__ = __add__
 
     def __iadd__(self, other):
+
         self.add_metabolites(other._metabolites, combine=True)
         gpr1 = self.gene_reaction_rule.strip()
         gpr2 = other.gene_reaction_rule.strip()
@@ -643,11 +644,26 @@ class Reaction(Object):
     def __imul__(self, coefficient):
         """Scale coefficients in a reaction by a given value
 
-        E.g. A -> B becomes 2A -> 2B
+        E.g. A -> B becomes 2A -> 2B.
+
+        If coefficient is less than zero, the reaction is reversed and the
+        bounds are swapped.
         """
         self._metabolites = {
             met: value * coefficient
             for met, value in iteritems(self._metabolites)}
+
+        if coefficient < 0:
+            self.bounds = (-self.upper_bound, -self.lower_bound)
+
+        if self._model:
+            self._model._populate_solver([self])
+
+        context = get_context(self)
+        if context:
+            context(partial(self._model._populate_solver, [self]))
+            context(partial(self.__imul__, 1./coefficient))
+
         return self
 
     def __mul__(self, coefficient):
@@ -757,37 +773,25 @@ class Reaction(Object):
                 # reaction
                 metabolite._reaction.add(self)
 
-        for metabolite, the_coefficient in list(self._metabolites.items()):
-            if the_coefficient == 0:
-                # make the metabolite aware that it no longer participates
-                # in this reaction
-                metabolite._reaction.remove(self)
-                self._metabolites.pop(metabolite)
-
         # from cameo ...
         model = self.model
         if model is not None:
             model.add_metabolites(new_metabolites)
 
-            for metabolite, coefficient in metabolites_to_add.items():
-
-                if isinstance(metabolite,
-                              str):  # support metabolites added as strings.
-                    metabolite = model.metabolites.get_by_id(metabolite)
-                if combine:
-                    try:
-                        old_coefficient = old_coefficients[metabolite]
-                    except KeyError:
-                        pass
-                    else:
-                        coefficient = coefficient + old_coefficient
-
+            for metabolite, coefficient in self._metabolites.items():
                 model.constraints[
                     metabolite.id].set_linear_coefficients(
                     {self.forward_variable: coefficient,
                      self.reverse_variable: -coefficient
                      })
 
+        for metabolite, the_coefficient in list(self._metabolites.items()):
+            if the_coefficient == 0:
+                # make the metabolite aware that it no longer participates
+                # in this reaction
+                metabolite._reaction.remove(self)
+                self._metabolites.pop(metabolite)
+
         context = get_context(self)
         if context and reversibly:
             if combine:


=====================================
cobra/flux_analysis/parsimonious.py
=====================================
--- a/cobra/flux_analysis/parsimonious.py
+++ b/cobra/flux_analysis/parsimonious.py
@@ -94,7 +94,7 @@ def add_pfba(model, objective=None, fraction_of_optimum=1.0):
     if objective is not None:
         model.objective = objective
     if model.solver.objective.name == '_pfba_objective':
-        raise ValueError('model already has pfba objective')
+        raise ValueError('The model already has a pFBA objective.')
     sutil.fix_objective_as_constraint(model, fraction=fraction_of_optimum)
     reaction_variables = ((rxn.forward_variable, rxn.reverse_variable)
                           for rxn in model.reactions)


=====================================
cobra/flux_analysis/sampling.py
=====================================
--- a/cobra/flux_analysis/sampling.py
+++ b/cobra/flux_analysis/sampling.py
@@ -24,18 +24,14 @@ from cobra.util import (create_stoichiometric_matrix, constraint_matrices,
 LOGGER = getLogger(__name__)
 """The logger for the package."""
 
-bounds_tol = np.finfo(np.float32).eps
+bounds_tol = 1e-6
 """The tolerance used for checking bounds feasibility."""
 
-feasibility_tol = bounds_tol
+feasibility_tol = 1e-6
 """The tolerance used for checking equalities feasibility."""
 
-nproj = 1000000
-"""Reproject the solution into the feasibility space every nproj iterations."""
-
-nproj_center = 10000
-"""Reproject the center into the nullspace every nproj_center iterations.
-   Only used for inhomogeneous problems."""
+max_tries = 100
+"""Maximum number of retries for sampling."""
 
 Problem = namedtuple("Problem",
                      ["equalities", "b", "inequalities", "bounds",
@@ -106,7 +102,7 @@ def shared_np_array(shape, data=None, integer=False):
 
 
 # Has to be declared outside of class to be used for multiprocessing :(
-def _step(sampler, x, delta, fraction=None):
+def _step(sampler, x, delta, fraction=None, tries=0):
     """Sample a new feasible point from the point `x` in direction `delta`."""
     prob = sampler.problem
     valid = ((np.abs(delta) > feasibility_tol) &
@@ -116,14 +112,19 @@ def _step(sampler, x, delta, fraction=None):
     valphas = (valphas / delta[valid]).flatten()
     if prob.bounds.shape[0] > 0:
         # permissible alphas for staying in constraint bounds
+        ineqs = prob.inequalities.dot(delta)
+        valid = np.abs(ineqs) > feasibility_tol
         balphas = ((1.0 - bounds_tol) * prob.bounds -
-                   prob.inequalities.dot(x))
-        balphas = (balphas / prob.inequalities.dot(delta)).flatten()
+                   prob.inequalities.dot(x))[:, valid]
+        balphas = (balphas / ineqs[valid]).flatten()
         # combined alphas
         alphas = np.hstack([valphas, balphas])
     else:
         alphas = valphas
-    alpha_range = (alphas[alphas > 0.0].min(), alphas[alphas <= 0.0].max())
+    pos_alphas = alphas[alphas > 0.0]
+    neg_alphas = alphas[alphas <= 0.0]
+    alpha_range = np.array([neg_alphas.max() if len(neg_alphas) > 0 else 0,
+                            pos_alphas.min() if len(pos_alphas) > 0 else 0])
 
     if fraction:
         alpha = alpha_range[0] + fraction * (alpha_range[1] - alpha_range[0])
@@ -133,12 +134,21 @@ def _step(sampler, x, delta, fraction=None):
 
     # Numerical instabilities may cause bounds invalidation
     # reset sampler and sample from one of the original warmup directions
-    # if that occurs
-    if np.any(sampler._bounds_dist(p) < -bounds_tol):
+    # if that occurs. Also reset if we got stuck.
+    if (np.any(sampler._bounds_dist(p) < -bounds_tol) or
+            np.abs(np.abs(alpha_range).max() * delta).max() < bounds_tol):
+        if tries > max_tries:
+            raise RuntimeError("Can not escape sampling region, model seems"
+                               " numerically unstable :( Reporting the "
+                               "model to "
+                               "https://github.com/opencobra/cobrapy/issues "
+                               "will help us to fix this :)")
         LOGGER.info("found bounds infeasibility in sample, "
                     "resetting to center")
         newdir = sampler.warmup[np.random.randint(sampler.n_warmup)]
-        return _step(sampler, sampler.center, newdir - sampler.center)
+        sampler.retries += 1
+        return _step(sampler, sampler.center, newdir - sampler.center, None,
+                     tries + 1)
     return p
 
 
@@ -152,6 +162,13 @@ class HRSampler(object):
     thinning : int
         The thinning factor of the generated sampling chain. A thinning of 10
         means samples are returned every 10 steps.
+    nproj : int > 0, optional
+        How often to reproject the sampling point into the feasibility space.
+        Avoids numerical issues at the cost of lower sampling. If you observe
+        many equality constraint violations with `sampler.validate` you should
+        lower this number.
+    seed : int > 0, optional
+        The random number seed that should be used.
 
     Attributes
     ----------
@@ -162,6 +179,9 @@ class HRSampler(object):
     n_samples : int
         The total number of samples that have been generated by this
         sampler instance.
+    retries : int
+        The overall of sampling retries the sampler has observed. Larger
+        values indicate numerical instabilities.
     problem : collections.namedtuple
         A python object whose attributes define the entire sampling problem in
         matrix form. See docstring of `Problem`.
@@ -169,6 +189,8 @@ class HRSampler(object):
         A matrix of with as many columns as reactions in the model and more
         than 3 rows containing a warmup sample in each row. None if no warmup
         points have been generated yet.
+    nproj : int
+        How often to reproject the sampling point into the feasibility space.
     seed : positive integer, optional
         Sets the random number seed. Initialized to the current time stamp if
         None.
@@ -180,7 +202,7 @@ class HRSampler(object):
         the respective reverse variable.
     """
 
-    def __init__(self, model, thinning, seed=None):
+    def __init__(self, model, thinning, nproj=None, seed=None):
         """Initialize a new sampler object."""
         # This currently has to be done to reset the solver basis which is
         # required to get deterministic warmup point generation
@@ -189,7 +211,12 @@ class HRSampler(object):
             raise TypeError("sampling does not work with integer problems :(")
         self.model = model.copy()
         self.thinning = thinning
+        if nproj is None:
+            self.nproj = int(min(len(self.model.variables)**3, 1e6))
+        else:
+            self.nproj = nproj
         self.n_samples = 0
+        self.retries = 0
         self.problem = self.__build_problem()
         # Set up a map from reaction -> forward/reverse variable
         var_idx = {v: idx for idx, v in enumerate(model.variables)}
@@ -252,32 +279,54 @@ class HRSampler(object):
         if necessary).
         """
         self.n_warmup = 0
-        idx = np.hstack([self.fwd_idx, self.rev_idx])
-        self.warmup = np.zeros((len(idx), len(self.model.variables)))
+        reactions = self.model.reactions
+        self.warmup = np.zeros((2 * len(reactions), len(self.model.variables)))
         self.model.objective = Zero
-        self.model.objective.direction = "max"
-        variables = self.model.variables
-        for i in idx:
-            # Omit fixed reactions
-            if self.problem.variable_fixed[i]:
-                LOGGER.info("skipping fixed variable %s" %
-                            variables[i].name)
-                continue
-            self.model.objective.set_linear_coefficients({variables[i]: 1})
-            self.model.slim_optimize()
-            if not self.model.solver.status == OPTIMAL:
-                LOGGER.info("can not maximize variable %s, skipping it" %
-                            variables[i].name)
-                continue
-            primals = self.model.solver.primal_values
-            sol = [primals[v.name] for v in self.model.variables]
-            self.warmup[self.n_warmup, ] = sol
+        for sense in ("min", "max"):
+            self.model.objective_direction = sense
+            for i, r in enumerate(reactions):
+                variables = (self.model.variables[self.fwd_idx[i]],
+                             self.model.variables[self.rev_idx[i]])
+                # Omit fixed reactions if they are non-homogeneous
+                if r.upper_bound - r.lower_bound < bounds_tol:
+                    LOGGER.info("skipping fixed reaction %s" % r.id)
+                    continue
+                self.model.objective.set_linear_coefficients(
+                    {variables[0]: 1, variables[1]: -1})
+                self.model.slim_optimize()
+                if not self.model.solver.status == OPTIMAL:
+                    LOGGER.info("can not maximize reaction %s, skipping it" %
+                                r.id)
+                    continue
+                primals = self.model.solver.primal_values
+                sol = [primals[v.name] for v in self.model.variables]
+                self.warmup[self.n_warmup, ] = sol
+                self.n_warmup += 1
+                # Reset objective
+                self.model.objective.set_linear_coefficients(
+                    {variables[0]: 0, variables[1]: 0})
+        # Shrink to measure
+        self.warmup = self.warmup[0:self.n_warmup, :]
+        # Remove redundant search directions
+        keep = np.logical_not(self._is_redundant(self.warmup))
+        self.warmup = self.warmup[keep, :]
+        self.n_warmup = self.warmup.shape[0]
+
+        # Catch some special cases
+        if len(self.warmup.shape) == 1 or self.warmup.shape[0] == 1:
+            raise ValueError("Your flux cone consists only of a single point!")
+        elif self.n_warmup == 2:
+            if not self.problem.homogeneous:
+                raise ValueError("Can not sample from an inhomogenous problem"
+                                 " with only 2 search directions :(")
+            LOGGER.info("All search directions on a line, adding another one.")
+            newdir = self.warmup.T.dot([0.25, 0.25])
+            self.warmup = np.vstack([self.warmup, newdir])
             self.n_warmup += 1
-            # revert objective
-            self.model.objective.set_linear_coefficients({variables[i]: 0})
+
         # Shrink warmup points to measure
-        self.warmup = shared_np_array((self.n_warmup, len(variables)),
-                                      self.warmup[0:self.n_warmup, ])
+        self.warmup = shared_np_array(
+            (self.n_warmup, len(self.model.variables)), self.warmup)
 
     def _reproject(self, p):
         """Reproject a point into the feasibility region.
@@ -307,14 +356,28 @@ class HRSampler(object):
             new = nulls.dot(nulls.T.dot(p))
         # Projections may violate bounds
         # set to random point in space in that case
-        if any(self._bounds_dist(new) < -bounds_tol):
+        if any(new != p):
             LOGGER.info("reprojection failed in sample"
                         " %d, using random point in space" % self.n_samples)
-            idx = np.random.randint(self.n_warmup,
-                                    size=min(2, int(np.sqrt(self.n_warmup))))
-            new = self.warmup[idx, :].mean(axis=0)
+            new = self._random_point()
         return new
 
+    def _random_point(self):
+        """Find an approximately random point in the flux cone."""
+        idx = np.random.randint(self.n_warmup,
+                                size=min(2, np.ceil(np.sqrt(self.n_warmup))))
+        return self.warmup[idx, :].mean(axis=0)
+
+    def _is_redundant(self, matrix, cutoff=1.0 - feasibility_tol):
+        """Identify rdeundant rows in a matrix that can be removed."""
+        # Avoid zero variances
+        extra_col = matrix[:, 0] + 1
+        # Avoid zero rows being correlated with constant rows
+        extra_col[matrix.sum(axis=1) == 0] = 2
+        corr = np.corrcoef(np.c_[matrix, extra_col])
+        corr = np.tril(corr, -1)
+        return (np.abs(corr) > cutoff).any(axis=1)
+
     def _bounds_dist(self, p):
         """Get the lower and upper bound distances. Negative is bad."""
         prob = self.problem
@@ -445,7 +508,12 @@ class ACHRSampler(HRSampler):
     thinning : int, optional
         The thinning factor of the generated sampling chain. A thinning of 10
         means samples are returned every 10 steps.
-    seed : positive integer, optional
+    nproj : int > 0, optional
+        How often to reproject the sampling point into the feasibility space.
+        Avoids numerical issues at the cost of lower sampling. If you observe
+        many equality constraint violations with `sampler.validate` you should
+        lower this number.
+    seed : int > 0, optional
         Sets the random number seed. Initialized to the current time stamp if
         None.
 
@@ -465,9 +533,14 @@ class ACHRSampler(HRSampler):
         A matrix of with as many columns as reactions in the model and more
         than 3 rows containing a warmup sample in each row. None if no warmup
         points have been generated yet.
+    retries : int
+        The overall of sampling retries the sampler has observed. Larger
+        values indicate numerical instabilities.
     seed : positive integer, optional
         Sets the random number seed. Initialized to the current time stamp if
         None.
+    nproj : int
+        How often to reproject the sampling point into the feasibility space.
     fwd_idx : np.array
         Has one entry for each reaction in the model containing the index of
         the respective forward variable.
@@ -503,9 +576,10 @@ class ACHRSampler(HRSampler):
     .. [2] https://github.com/opencobra/cobratoolbox
     """
 
-    def __init__(self, model, thinning=100, seed=None):
+    def __init__(self, model, thinning=100, nproj=None, seed=None):
         """Initialize a new ACHRSampler."""
-        super(ACHRSampler, self).__init__(model, thinning, seed=seed)
+        super(ACHRSampler, self).__init__(model, thinning, nproj=nproj,
+                                          seed=seed)
         self.generate_fva_warmup()
         self.prev = self.center = self.warmup.mean(axis=0)
         np.random.seed(self._seed)
@@ -516,10 +590,11 @@ class ACHRSampler(HRSampler):
         delta = self.warmup[pi, ] - self.center
         self.prev = _step(self, self.prev, delta)
         if self.problem.homogeneous and (self.n_samples *
-                                         self.thinning % nproj == 0):
+                                         self.thinning % self.nproj == 0):
             self.prev = self._reproject(self.prev)
-        self.center = (self.n_samples * self.center + self.prev) / (
-                       self.n_samples + 1)
+            self.center = self._reproject(self.center)
+        self.center = ((self.n_samples * self.center) / (self.n_samples + 1) +
+                       self.prev / (self.n_samples + 1))
         self.n_samples += 1
 
     def sample(self, n, fluxes=True):
@@ -584,15 +659,17 @@ def _sample_chain(args):
         delta = sampler.warmup[pi, ] - center
 
         prev = _step(sampler, prev, delta)
-        if sampler.problem.homogeneous and (n_samples *
-                                            sampler.thinning % nproj == 0):
+        if sampler.problem.homogeneous and (
+                n_samples * sampler.thinning % sampler.nproj == 0):
             prev = sampler._reproject(prev)
+            center = sampler._reproject(center)
         if i % sampler.thinning == 0:
             samples[i//sampler.thinning - 1, ] = prev
-            center = (n_samples * center + prev) / (n_samples + 1)
-            n_samples += 1
+        center = ((n_samples * center) / (n_samples + 1) +
+                  prev / (n_samples + 1))
+        n_samples += 1
 
-    return samples
+    return (sampler.retries, samples)
 
 
 class OptGPSampler(HRSampler):
@@ -610,7 +687,12 @@ class OptGPSampler(HRSampler):
     thinning : int, optional
         The thinning factor of the generated sampling chain. A thinning of 10
         means samples are returned every 10 steps.
-    seed : positive integer, optional
+    nproj : int > 0, optional
+        How often to reproject the sampling point into the feasibility space.
+        Avoids numerical issues at the cost of lower sampling. If you observe
+        many equality constraint violations with `sampler.validate` you should
+        lower this number.
+    seed : int > 0, optional
         Sets the random number seed. Initialized to the current time stamp if
         None.
 
@@ -630,9 +712,14 @@ class OptGPSampler(HRSampler):
         A matrix of with as many columns as reactions in the model and more
         than 3 rows containing a warmup sample in each row. None if no warmup
         points have been generated yet.
+    retries : int
+        The overall of sampling retries the sampler has observed. Larger
+        values indicate numerical instabilities.
     seed : positive integer, optional
         Sets the random number seed. Initialized to the current time stamp if
         None.
+    nproj : int
+        How often to reproject the sampling point into the feasibility space.
     fwd_idx : np.array
         Has one entry for each reaction in the model containing the index of
         the respective forward variable.
@@ -672,7 +759,8 @@ class OptGPSampler(HRSampler):
        https://doi.org/10.1371/journal.pone.0086587
     """
 
-    def __init__(self, model, processes, thinning=100, seed=None):
+    def __init__(self, model, processes, thinning=100, nproj=None,
+                 seed=None):
         """Initialize a new OptGPSampler."""
         super(OptGPSampler, self).__init__(model, thinning, seed=seed)
         self.generate_fva_warmup()
@@ -725,18 +813,19 @@ class OptGPSampler(HRSampler):
             # No with statement or starmap here since Python 2.x
             # does not support it :(
             mp = Pool(self.processes, initializer=mp_init, initargs=(self,))
-            chains = mp.map(_sample_chain, args, chunksize=1)
+            results = mp.map(_sample_chain, args, chunksize=1)
             mp.close()
             mp.join()
-            chains = np.vstack(chains)
+            chains = np.vstack([r[1] for r in results])
+            self.retries += sum(r[0] for r in results)
         else:
             mp_init(self)
-            chains = _sample_chain((n, 0))
+            results = _sample_chain((n, 0))
+            chains = results[1]
 
         # Update the global center
         self.center = (self.n_samples * self.center +
-                       n * np.atleast_2d(chains).mean(axis=0)) / (
-                       self.n_samples + n)
+                       np.atleast_2d(chains).sum(0)) / (self.n_samples + n)
         self.n_samples += n
 
         if fluxes:


=====================================
cobra/flux_analysis/variability.py
=====================================
--- a/cobra/flux_analysis/variability.py
+++ b/cobra/flux_analysis/variability.py
@@ -2,13 +2,11 @@
 
 from __future__ import absolute_import
 
-import math
 from warnings import warn
-from itertools import chain
 
-import pandas
+from numpy import zeros
 from optlang.symbolics import Zero
-from six import iteritems
+from pandas import DataFrame
 
 from cobra.flux_analysis.loopless import loopless_fva_iter
 from cobra.flux_analysis.parsimonious import add_pfba
@@ -20,38 +18,38 @@ from cobra.util import solver as sutil
 
 def flux_variability_analysis(model, reaction_list=None, loopless=False,
                               fraction_of_optimum=1.0, pfba_factor=None):
-    """Runs flux variability analysis to find the min/max flux values for each
-    each reaction in `reaction_list`.
+    """
+    Determine the minimum and maximum possible flux value for each reaction.
 
     Parameters
     ----------
-    model : a cobra model
+    model : cobra.Model
         The model for which to run the analysis. It will *not* be modified.
     reaction_list : list of cobra.Reaction or str, optional
         The reactions for which to obtain min/max fluxes. If None will use
-        all reactions in the model.
+        all reactions in the model (default).
     loopless : boolean, optional
-        Whether to return only loopless solutions. Ignored for legacy solvers,
-        also see `Notes`.
+        Whether to return only loopless solutions. This is significantly
+        slower. Please also refer to the notes.
     fraction_of_optimum : float, optional
-        Must be <= 1.0. Requires that the objective value is at least
-        fraction * max_objective_value. A value of 0.85 for instance means that
-        the objective has to be at least at 85% percent of its maximum.
+        Must be <= 1.0. Requires that the objective value is at least the
+        fraction times maximum objective value. A value of 0.85 for instance
+        means that the objective has to be at least at 85% percent of its
+        maximum.
     pfba_factor : float, optional
-        Add additional constraint to the model that the total sum of
-        absolute fluxes must not be larger than this value times the
+        Add an additional constraint to the model that requires the total sum
+        of absolute fluxes must not be larger than this value times the
         smallest possible sum of absolute fluxes, i.e., by setting the value
-        to 1.1 then the total sum of absolute fluxes must not be more than
-        10% larger than the pfba solution. Since the pfba solution is the
-        one that optimally minimizes the total flux sum, the pfba_factor
+        to 1.1 the total sum of absolute fluxes must not be more than
+        10% larger than the pFBA solution. Since the pFBA solution is the
+        one that optimally minimizes the total flux sum, the ``pfba_factor``
         should, if set, be larger than one. Setting this value may lead to
         more realistic predictions of the effective flux bounds.
 
     Returns
     -------
     pandas.DataFrame
-        DataFrame with reaction identifier as the index columns
-
+        A data frame with reaction identifiers as the index and two columns:
         - maximum: indicating the highest possible flux
         - minimum: indicating the lowest possible flux
 
@@ -66,7 +64,7 @@ def flux_variability_analysis(model, reaction_list=None, loopless=False,
     Using the loopless option will lead to a significant increase in
     computation time (about a factor of 100 for large models). However, the
     algorithm used here (see [2]_) is still more than 1000x faster than the
-    "naive" version using `add_loopless(model)`. Also note that if you have
+    "naive" version using ``add_loopless(model)``. Also note that if you have
     included constraints that force a loop (for instance by setting all fluxes
     in a loop to be non-zero) this loop will be included in the solution.
 
@@ -85,87 +83,67 @@ def flux_variability_analysis(model, reaction_list=None, loopless=False,
     """
     if reaction_list is None:
         reaction_list = model.reactions
+    else:
+        reaction_list = model.reactions.get_by_any(reaction_list)
 
-    fva_result = _fva_optlang(model, reaction_list, fraction_of_optimum,
-                              loopless, pfba_factor)
-    return pandas.DataFrame(fva_result).T
-
-
-def _fva_optlang(model, reaction_list, fraction, loopless, pfba_factor):
-    """Helper function to perform FVA with the optlang interface.
-
-    Parameters
-    ----------
-    model : a cobra model
-    reaction_list : list of reactions
-    fraction : float, optional
-        Must be <= 1.0. Requires that the objective value is at least
-        fraction * max_objective_value. A value of 0.85 for instance means that
-        the objective has to be at least at 85% percent of its maximum.
-    loopless : boolean, optional
-        Whether to return only loopless solutions.
-    pfba_factor : float, optional
-        Add additional constraint to the model that the total sum of
-        absolute fluxes must not be larger than this value times the
-        smallest possible sum of absolute fluxes, i.e., by setting the value
-        to 1.1 then the total sum of absolute fluxes must not be more than
-        10% larger than the pfba solution. Setting this value may lead to
-        more realistic predictions of the effective flux bounds.
-
-    Returns
-    -------
-    dict
-        A dictionary containing the results.
-    """
     prob = model.problem
-    fva_results = {rxn.id: {} for rxn in reaction_list}
-    with model as m:
-        m.slim_optimize(error_value=None,
-                        message="There is no optimal solution for the "
-                                "chosen objective!")
-        # Add objective as a variable to the model than set to zero
-        # This also uses the fraction to create the lower bound for the
-        # old objective
-        fva_old_objective = prob.Variable(
-            "fva_old_objective", lb=fraction * m.solver.objective.value)
+    fva_results = DataFrame({
+        "minimum": zeros(len(reaction_list), dtype=float),
+        "maximum": zeros(len(reaction_list), dtype=float)
+    }, index=[r.id for r in reaction_list])
+    with model:
+        # Safety check before setting up FVA.
+        model.slim_optimize(error_value=None,
+                            message="There is no optimal solution for the "
+                                    "chosen objective!")
+        # Add the previous objective as a variable to the model then set it to
+        # zero. This also uses the fraction to create the lower/upper bound for
+        # the old objective.
+        if model.solver.objective.direction == "max":
+            fva_old_objective = prob.Variable(
+                "fva_old_objective",
+                lb=fraction_of_optimum * model.solver.objective.value)
+        else:
+            fva_old_objective = prob.Variable(
+                "fva_old_objective",
+                ub=fraction_of_optimum * model.solver.objective.value)
         fva_old_obj_constraint = prob.Constraint(
-            m.solver.objective.expression - fva_old_objective, lb=0, ub=0,
+            model.solver.objective.expression - fva_old_objective, lb=0, ub=0,
             name="fva_old_objective_constraint")
-        m.add_cons_vars([fva_old_objective, fva_old_obj_constraint])
+        model.add_cons_vars([fva_old_objective, fva_old_obj_constraint])
 
         if pfba_factor is not None:
             if pfba_factor < 1.:
-                warn('pfba_factor should be larger or equal to 1', UserWarning)
-            with m:
-                add_pfba(m, fraction_of_optimum=0)
-                ub = m.slim_optimize(error_value=None)
+                warn("The 'pfba_factor' should be larger or equal to 1.",
+                     UserWarning)
+            with model:
+                add_pfba(model, fraction_of_optimum=0)
+                ub = model.slim_optimize(error_value=None)
                 flux_sum = prob.Variable("flux_sum", ub=pfba_factor * ub)
                 flux_sum_constraint = prob.Constraint(
-                    m.solver.objective.expression - flux_sum, lb=0, ub=0,
+                    model.solver.objective.expression - flux_sum, lb=0, ub=0,
                     name="flux_sum_constraint")
-            m.add_cons_vars([flux_sum, flux_sum_constraint])
+            model.add_cons_vars([flux_sum, flux_sum_constraint])
 
-        m.objective = Zero  # This will trigger the reset as well
+        model.objective = Zero  # This will trigger the reset as well
         for what in ("minimum", "maximum"):
             sense = "min" if what == "minimum" else "max"
             for rxn in reaction_list:
-                r_id = rxn.id
-                rxn = m.reactions.get_by_id(r_id)
                 # The previous objective assignment already triggers a reset
                 # so directly update coefs here to not trigger redundant resets
                 # in the history manager which can take longer than the actual
                 # FVA for small models
-                m.solver.objective.set_linear_coefficients(
+                model.solver.objective.set_linear_coefficients(
                     {rxn.forward_variable: 1, rxn.reverse_variable: -1})
-                m.solver.objective.direction = sense
-                m.slim_optimize()
-                sutil.check_solver_status(m.solver.status)
+                model.solver.objective.direction = sense
+                model.slim_optimize()
+                sutil.check_solver_status(model.solver.status)
                 if loopless:
-                    value = loopless_fva_iter(m, rxn)
+                    value = loopless_fva_iter(model, rxn)
                 else:
-                    value = m.solver.objective.value
-                fva_results[r_id][what] = value
-                m.solver.objective.set_linear_coefficients(
+                    value = model.solver.objective.value
+                fva_results.at[rxn.id, what] = value
+                model.solver.objective.set_linear_coefficients(
                     {rxn.forward_variable: 0, rxn.reverse_variable: 0})
 
     return fva_results


=====================================
cobra/test/test_flux_analysis.py
=====================================
--- a/cobra/test/test_flux_analysis.py
+++ b/cobra/test/test_flux_analysis.py
@@ -456,6 +456,13 @@ class TestCobraFluxAnalysis:
         with pytest.raises(Infeasible):
             flux_variability_analysis(infeasible_model)
 
+    def test_fva_minimization(self, model):
+        model.objective = model.reactions.EX_glc__D_e
+        model.objective_direction = 'min'
+        solution = flux_variability_analysis(model, fraction_of_optimum=.95)
+        assert solution.at['EX_glc__D_e', 'minimum'] == -10.0
+        assert solution.at['EX_glc__D_e', 'maximum'] == -9.5
+
     def test_find_blocked_reactions_solver_none(self, model):
         result = find_blocked_reactions(model, model.reactions[40:46])
         assert result == ['FRUpts2']
@@ -752,7 +759,7 @@ class TestCobraFluxSampling:
 
     def test_fixed_seed(self, model):
         s = sample(model, 1, seed=42)
-        assert numpy.allclose(s.TPI[0], 8.9516392250671544)
+        assert numpy.allclose(s.TPI[0], 9.12037487)
 
     def test_equality_constraint(self, model):
         model.reactions.ACALD.bounds = (-1.5, -1.5)
@@ -847,6 +854,48 @@ class TestCobraFluxSampling:
         proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
         assert all(self.optgp.validate(proj) == "v")
 
+    def test_complicated_model(self):
+        """Difficult model since the online mean calculation is numerically
+        unstable so many samples weakly violate the equality constraints."""
+        model = Model('flux_split')
+        reaction1 = Reaction('V1')
+        reaction2 = Reaction('V2')
+        reaction3 = Reaction('V3')
+        reaction1.lower_bound = 0
+        reaction2.lower_bound = 0
+        reaction3.lower_bound = 0
+        reaction1.upper_bound = 6
+        reaction2.upper_bound = 8
+        reaction3.upper_bound = 10
+        A = Metabolite('A')
+        reaction1.add_metabolites({A: -1})
+        reaction2.add_metabolites({A: -1})
+        reaction3.add_metabolites({A: 1})
+        model.add_reactions([reaction1])
+        model.add_reactions([reaction2])
+        model.add_reactions([reaction3])
+
+        optgp = OptGPSampler(model, 1, seed=42)
+        achr = ACHRSampler(model, seed=42)
+        optgp_samples = optgp.sample(100)
+        achr_samples = achr.sample(100)
+        assert any(optgp_samples.corr().abs() < 1.0)
+        assert any(achr_samples.corr().abs() < 1.0)
+        # > 95% are valid
+        assert(sum(optgp.validate(optgp_samples) == "v") > 95)
+        assert(sum(achr.validate(achr_samples) == "v") > 95)
+
+    def test_single_point_space(self, model):
+        """Model where constraints reduce the sampling space to one point."""
+        pfba_sol = pfba(model)
+        pfba_const = model.problem.Constraint(
+            sum(model.variables), ub=pfba_sol.objective_value)
+        model.add_cons_vars(pfba_const)
+        model.reactions.Biomass_Ecoli_core.lower_bound = \
+            pfba_sol.fluxes.Biomass_Ecoli_core
+        with pytest.raises(ValueError):
+            s = sample(model, 1)
+
 
 class TestProductionEnvelope:
     """Test the production envelope."""


=====================================
cobra/test/test_solver_model.py
=====================================
--- a/cobra/test/test_solver_model.py
+++ b/cobra/test/test_solver_model.py
@@ -463,6 +463,26 @@ class TestReaction:
                 already_included_metabolite.id].expression.has(
                 -10 * reaction.reverse_variable)
 
+    def test_reaction_imul(self, model):
+        with model:
+            model.reactions.EX_glc__D_e *= 100
+            assert model.constraints.glc__D_e.expression.coeff(
+                model.variables.EX_glc__D_e) == -100
+            assert model.reactions.EX_glc__D_e.reaction == \
+                '100.0 glc__D_e <=> '
+
+        assert model.constraints.glc__D_e.expression.coeff(
+            model.variables.EX_glc__D_e) == -1
+        assert model.reactions.EX_glc__D_e.reaction == 'glc__D_e <=> '
+
+        with model:
+            model.reactions.EX_glc__D_e *= -2
+            assert model.reactions.EX_glc__D_e.bounds == (-1000.0, 10.0)
+            assert model.reactions.EX_glc__D_e.reaction == ' <=> 2.0 glc__D_e'
+
+        assert model.reactions.EX_glc__D_e.bounds == (-10, 1000.0)
+        assert model.reactions.EX_glc__D_e.reaction == 'glc__D_e <=> '
+
     # def test_pop(self, model):
     #     pgi = model.reactions.PGI
     #     g6p = model.metabolites.get_by_id("g6p_c")


=====================================
cobra/util/solver.py
=====================================
--- a/cobra/util/solver.py
+++ b/cobra/util/solver.py
@@ -421,5 +421,8 @@ def assert_optimal(model, message='optimization failed'):
     message : str (optional)
         Message to for the exception if solver status was not optimal.
     """
-    if model.solver.status != optlang.interface.OPTIMAL:
-        raise OPTLANG_TO_EXCEPTIONS_DICT[model.solver.status](message)
+    status = model.solver.status
+    if status != optlang.interface.OPTIMAL:
+        exception_cls = OPTLANG_TO_EXCEPTIONS_DICT.get(
+            status, OptimizationError)
+        raise exception_cls("{} ({})".format(message, status))


=====================================
documentation_builder/constraints_objectives.ipynb
=====================================
--- a/documentation_builder/constraints_objectives.ipynb
+++ b/documentation_builder/constraints_objectives.ipynb
@@ -2,40 +2,28 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Tailored constraints, variables and objectives"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Thanks to the use of symbolic expressions via the optlang mathematical modeling package, it is relatively straight-forward to add new variables, constraints and advanced objectives that can not easily be formulated as a combination of different reaction and their corresponding upper and lower bounds. Here we demonstrate this optlang functionality which is exposed via the `model.solver.interface`."
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Constraints"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Suppose we want to ensure that two reactions have the same flux in our model. We can add this criteria as constraint to our model using the optlang solver interface by simply defining the relevant expression as follows."
    ]
@@ -43,11 +31,7 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import cobra.test\n",
@@ -57,11 +41,7 @@
   {
    "cell_type": "code",
    "execution_count": 2,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "same_flux = model.problem.Constraint(\n",
@@ -73,10 +53,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "The flux for our reaction of interest is obtained by the `model.reactions.FBA.flux_expression` which is simply the sum of the forward and reverse flux, i.e.,"
    ]
@@ -84,11 +61,7 @@
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -107,10 +80,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Now I can maximize growth rate whilst the fluxes of reactions 'FBA' and 'NH4t' are constrained to be (near) identical."
    ]
@@ -118,11 +88,7 @@
   {
    "cell_type": "code",
    "execution_count": 4,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -140,10 +106,30 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
+   "source": [
+    "It is also possible to add many constraints at once. For large models, with constraints involving many reactions, the efficient way to do this is to first build a dictionary of the linear coefficients for every flux, and then add the constraint at once. For example, suppose we want to add a constrain on the sum of the absolute values of every flux in the network to be less than 100:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "coefficients = dict()\n",
+    "for rxn in model.reactions:\n",
+    "    coefficients[rxn.forward_variable] = 1.\n",
+    "    coefficients[rxn.reverse_variable] = 1.\n",
+    "constraint = model.problem.Constraint(0, lb=0, ub=100)\n",
+    "model.add_cons_vars(constraint)\n",
+    "model.solver.update()\n",
+    "constraint.set_linear_coefficients(coefficients=coefficients)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "## Objectives\n",
     "\n",
@@ -152,10 +138,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Simple objective such as the maximization of the flux through one or more reactions can conveniently be done by simply \n",
     "assigning to the `model.objective` property as we have seen in previous chapters, e.g.,"
@@ -164,11 +147,7 @@
   {
    "cell_type": "code",
    "execution_count": 5,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -188,10 +167,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "The objectives mathematical expression is seen by"
    ]
@@ -199,11 +175,7 @@
   {
    "cell_type": "code",
    "execution_count": 6,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -222,20 +194,14 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "But suppose we need a more complicated objective, such as minimizing the Euclidean distance of the solution to the origin minus another variable, while subject to additional linear constraints. This is an objective function with both linear and quadratic components. "
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Consider the example problem:\n",
     "\n",
@@ -255,11 +221,7 @@
   {
    "cell_type": "code",
    "execution_count": 7,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -282,10 +244,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "We return to the textbook model and set the solver to one that can handle quadratic objectives such as cplex. We then add the linear constraint that the sum of our x and y reactions, that we set to FBA and NH4t, must equal 2."
    ]
@@ -294,9 +253,7 @@
    "cell_type": "code",
    "execution_count": 8,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -310,10 +267,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Next we add the quadratic objective"
    ]
@@ -321,11 +275,7 @@
   {
    "cell_type": "code",
    "execution_count": 9,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "quadratic_objective = model.problem.Objective(\n",
@@ -340,11 +290,7 @@
   {
    "cell_type": "code",
    "execution_count": 10,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -360,20 +306,14 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Variables"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "We can also create additional variables to facilitate studying the effects of new constraints and variables. Suppose we want to study the difference in flux between nitrogen and carbon uptake whilst we block other reactions. For this it will may help to add another variable representing this difference."
    ]
@@ -382,9 +322,7 @@
    "cell_type": "code",
    "execution_count": 11,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -394,10 +332,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "We use constraints to define what values this variable shall take"
    ]
@@ -405,11 +340,7 @@
   {
    "cell_type": "code",
    "execution_count": 12,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "constraint = model.problem.Constraint(\n",
@@ -422,10 +353,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "Now we can access that difference directly during our knock-out exploration by looking at its primal value."
    ]
@@ -433,11 +361,7 @@
   {
    "cell_type": "code",
    "execution_count": 13,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -476,9 +400,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.5.2"
+   "version": "3.6.3"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
 }


=====================================
documentation_builder/sampling.ipynb
=====================================
--- a/documentation_builder/sampling.ipynb
+++ b/documentation_builder/sampling.ipynb
@@ -30,6 +30,19 @@
      "data": {
       "text/html": [
        "<div>\n",
+       "<style scoped>\n",
+       "    .dataframe tbody tr th:only-of-type {\n",
+       "        vertical-align: middle;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe tbody tr th {\n",
+       "        vertical-align: top;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe thead th {\n",
+       "        text-align: right;\n",
+       "    }\n",
+       "</style>\n",
        "<table border=\"1\" class=\"dataframe\">\n",
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
@@ -60,123 +73,123 @@
        "  <tbody>\n",
        "    <tr>\n",
        "      <th>0</th>\n",
-       "      <td>-3.706944</td>\n",
-       "      <td>-0.163964</td>\n",
-       "      <td>-0.295823</td>\n",
-       "      <td>8.975852</td>\n",
-       "      <td>8.975852</td>\n",
-       "      <td>-0.295823</td>\n",
-       "      <td>4.847986</td>\n",
-       "      <td>6.406533</td>\n",
-       "      <td>-0.081797</td>\n",
-       "      <td>-3.542980</td>\n",
+       "      <td>-0.577302</td>\n",
+       "      <td>-0.149662</td>\n",
+       "      <td>-0.338001</td>\n",
+       "      <td>10.090870</td>\n",
+       "      <td>10.090870</td>\n",
+       "      <td>-0.338001</td>\n",
+       "      <td>0.997694</td>\n",
+       "      <td>4.717467</td>\n",
+       "      <td>-0.070599</td>\n",
+       "      <td>-0.427639</td>\n",
        "      <td>...</td>\n",
-       "      <td>-1.649393</td>\n",
-       "      <td>20.917568</td>\n",
-       "      <td>20.977290</td>\n",
-       "      <td>744.206008</td>\n",
-       "      <td>-6.406533</td>\n",
-       "      <td>1.639515</td>\n",
-       "      <td>1.670533</td>\n",
-       "      <td>1.639515</td>\n",
-       "      <td>1.635542</td>\n",
-       "      <td>6.256787</td>\n",
+       "      <td>-2.255649</td>\n",
+       "      <td>6.152278</td>\n",
+       "      <td>6.692068</td>\n",
+       "      <td>821.012351</td>\n",
+       "      <td>-4.717467</td>\n",
+       "      <td>2.230392</td>\n",
+       "      <td>133.608893</td>\n",
+       "      <td>2.230392</td>\n",
+       "      <td>2.220236</td>\n",
+       "      <td>5.263234</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>1</th>\n",
-       "      <td>-1.340710</td>\n",
-       "      <td>-0.175665</td>\n",
-       "      <td>-0.429169</td>\n",
-       "      <td>11.047827</td>\n",
-       "      <td>11.047827</td>\n",
-       "      <td>-0.429169</td>\n",
-       "      <td>2.901598</td>\n",
-       "      <td>7.992916</td>\n",
-       "      <td>-0.230564</td>\n",
-       "      <td>-1.165045</td>\n",
+       "      <td>-0.639279</td>\n",
+       "      <td>-0.505704</td>\n",
+       "      <td>-0.031929</td>\n",
+       "      <td>10.631865</td>\n",
+       "      <td>10.631865</td>\n",
+       "      <td>-0.031929</td>\n",
+       "      <td>4.207702</td>\n",
+       "      <td>6.763224</td>\n",
+       "      <td>-0.024720</td>\n",
+       "      <td>-0.133575</td>\n",
        "      <td>...</td>\n",
-       "      <td>-0.066975</td>\n",
-       "      <td>24.735567</td>\n",
-       "      <td>24.850041</td>\n",
-       "      <td>710.481004</td>\n",
-       "      <td>-7.992916</td>\n",
-       "      <td>0.056442</td>\n",
-       "      <td>9.680476</td>\n",
-       "      <td>0.056442</td>\n",
-       "      <td>0.052207</td>\n",
-       "      <td>7.184752</td>\n",
+       "      <td>-0.769792</td>\n",
+       "      <td>14.894313</td>\n",
+       "      <td>14.929989</td>\n",
+       "      <td>521.410118</td>\n",
+       "      <td>-6.763224</td>\n",
+       "      <td>0.755207</td>\n",
+       "      <td>66.656770</td>\n",
+       "      <td>0.755207</td>\n",
+       "      <td>0.749341</td>\n",
+       "      <td>7.135499</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>2</th>\n",
-       "      <td>-1.964087</td>\n",
-       "      <td>-0.160334</td>\n",
-       "      <td>-0.618029</td>\n",
-       "      <td>9.811474</td>\n",
-       "      <td>9.811474</td>\n",
-       "      <td>-0.618029</td>\n",
-       "      <td>17.513791</td>\n",
-       "      <td>8.635576</td>\n",
-       "      <td>-0.284992</td>\n",
-       "      <td>-1.803753</td>\n",
+       "      <td>-1.983410</td>\n",
+       "      <td>-0.434676</td>\n",
+       "      <td>-0.408318</td>\n",
+       "      <td>11.046294</td>\n",
+       "      <td>11.046294</td>\n",
+       "      <td>-0.408318</td>\n",
+       "      <td>5.510960</td>\n",
+       "      <td>7.240802</td>\n",
+       "      <td>-0.501086</td>\n",
+       "      <td>-1.548735</td>\n",
        "      <td>...</td>\n",
-       "      <td>-4.075515</td>\n",
-       "      <td>23.425719</td>\n",
-       "      <td>23.470968</td>\n",
-       "      <td>696.114154</td>\n",
-       "      <td>-8.635576</td>\n",
-       "      <td>4.063291</td>\n",
-       "      <td>52.316496</td>\n",
-       "      <td>4.063291</td>\n",
-       "      <td>4.058376</td>\n",
-       "      <td>5.122237</td>\n",
+       "      <td>-0.088852</td>\n",
+       "      <td>15.211972</td>\n",
+       "      <td>15.807554</td>\n",
+       "      <td>756.884622</td>\n",
+       "      <td>-7.240802</td>\n",
+       "      <td>0.065205</td>\n",
+       "      <td>42.830676</td>\n",
+       "      <td>0.065205</td>\n",
+       "      <td>0.055695</td>\n",
+       "      <td>8.109647</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>3</th>\n",
-       "      <td>-0.838442</td>\n",
-       "      <td>-0.123865</td>\n",
-       "      <td>-0.376067</td>\n",
-       "      <td>11.869552</td>\n",
-       "      <td>11.869552</td>\n",
-       "      <td>-0.376067</td>\n",
-       "      <td>7.769872</td>\n",
-       "      <td>9.765178</td>\n",
-       "      <td>-0.325219</td>\n",
-       "      <td>-0.714577</td>\n",
+       "      <td>-1.893551</td>\n",
+       "      <td>-0.618887</td>\n",
+       "      <td>-0.612598</td>\n",
+       "      <td>8.879426</td>\n",
+       "      <td>8.879426</td>\n",
+       "      <td>-0.612598</td>\n",
+       "      <td>6.194372</td>\n",
+       "      <td>5.382222</td>\n",
+       "      <td>-0.563573</td>\n",
+       "      <td>-1.274664</td>\n",
        "      <td>...</td>\n",
-       "      <td>-0.838094</td>\n",
-       "      <td>23.446704</td>\n",
-       "      <td>23.913036</td>\n",
-       "      <td>595.787313</td>\n",
-       "      <td>-9.765178</td>\n",
-       "      <td>0.822987</td>\n",
-       "      <td>36.019720</td>\n",
-       "      <td>0.822987</td>\n",
-       "      <td>0.816912</td>\n",
-       "      <td>8.364314</td>\n",
+       "      <td>-1.728387</td>\n",
+       "      <td>15.960829</td>\n",
+       "      <td>17.404437</td>\n",
+       "      <td>556.750972</td>\n",
+       "      <td>-5.382222</td>\n",
+       "      <td>1.714682</td>\n",
+       "      <td>37.386748</td>\n",
+       "      <td>1.714682</td>\n",
+       "      <td>1.709171</td>\n",
+       "      <td>7.003325</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>4</th>\n",
-       "      <td>-0.232088</td>\n",
-       "      <td>-0.034346</td>\n",
-       "      <td>-1.067684</td>\n",
-       "      <td>7.972039</td>\n",
-       "      <td>7.972039</td>\n",
-       "      <td>-1.067684</td>\n",
-       "      <td>5.114975</td>\n",
-       "      <td>5.438125</td>\n",
-       "      <td>-0.787864</td>\n",
-       "      <td>-0.197742</td>\n",
+       "      <td>-1.759520</td>\n",
+       "      <td>-0.321021</td>\n",
+       "      <td>-0.262520</td>\n",
+       "      <td>10.801480</td>\n",
+       "      <td>10.801480</td>\n",
+       "      <td>-0.262520</td>\n",
+       "      <td>4.815146</td>\n",
+       "      <td>9.236588</td>\n",
+       "      <td>-0.359817</td>\n",
+       "      <td>-1.438499</td>\n",
        "      <td>...</td>\n",
-       "      <td>-3.109205</td>\n",
-       "      <td>8.902309</td>\n",
-       "      <td>9.888083</td>\n",
-       "      <td>584.552692</td>\n",
-       "      <td>-5.438125</td>\n",
-       "      <td>3.088152</td>\n",
-       "      <td>12.621811</td>\n",
-       "      <td>3.088152</td>\n",
-       "      <td>3.079686</td>\n",
-       "      <td>6.185089</td>\n",
+       "      <td>-2.840577</td>\n",
+       "      <td>12.379023</td>\n",
+       "      <td>13.141259</td>\n",
+       "      <td>440.132011</td>\n",
+       "      <td>-9.236588</td>\n",
+       "      <td>2.822071</td>\n",
+       "      <td>0.292885</td>\n",
+       "      <td>2.822071</td>\n",
+       "      <td>2.814629</td>\n",
+       "      <td>6.205245</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
        "</table>\n",
@@ -184,26 +197,26 @@
        "</div>"
       ],
       "text/plain": [
-       "      ACALD    ACALDt      ACKr     ACONTa     ACONTb     ACt2r       ADK1  \\\n",
-       "0 -3.706944 -0.163964 -0.295823   8.975852   8.975852 -0.295823   4.847986   \n",
-       "1 -1.340710 -0.175665 -0.429169  11.047827  11.047827 -0.429169   2.901598   \n",
-       "2 -1.964087 -0.160334 -0.618029   9.811474   9.811474 -0.618029  17.513791   \n",
-       "3 -0.838442 -0.123865 -0.376067  11.869552  11.869552 -0.376067   7.769872   \n",
-       "4 -0.232088 -0.034346 -1.067684   7.972039   7.972039 -1.067684   5.114975   \n",
+       "      ACALD    ACALDt      ACKr     ACONTa     ACONTb     ACt2r      ADK1  \\\n",
+       "0 -0.577302 -0.149662 -0.338001  10.090870  10.090870 -0.338001  0.997694   \n",
+       "1 -0.639279 -0.505704 -0.031929  10.631865  10.631865 -0.031929  4.207702   \n",
+       "2 -1.983410 -0.434676 -0.408318  11.046294  11.046294 -0.408318  5.510960   \n",
+       "3 -1.893551 -0.618887 -0.612598   8.879426   8.879426 -0.612598  6.194372   \n",
+       "4 -1.759520 -0.321021 -0.262520  10.801480  10.801480 -0.262520  4.815146   \n",
        "\n",
        "      AKGDH    AKGt2r    ALCD2x    ...          RPI   SUCCt2_2     SUCCt3  \\\n",
-       "0  6.406533 -0.081797 -3.542980    ...    -1.649393  20.917568  20.977290   \n",
-       "1  7.992916 -0.230564 -1.165045    ...    -0.066975  24.735567  24.850041   \n",
-       "2  8.635576 -0.284992 -1.803753    ...    -4.075515  23.425719  23.470968   \n",
-       "3  9.765178 -0.325219 -0.714577    ...    -0.838094  23.446704  23.913036   \n",
-       "4  5.438125 -0.787864 -0.197742    ...    -3.109205   8.902309   9.888083   \n",
+       "0  4.717467 -0.070599 -0.427639    ...    -2.255649   6.152278   6.692068   \n",
+       "1  6.763224 -0.024720 -0.133575    ...    -0.769792  14.894313  14.929989   \n",
+       "2  7.240802 -0.501086 -1.548735    ...    -0.088852  15.211972  15.807554   \n",
+       "3  5.382222 -0.563573 -1.274664    ...    -1.728387  15.960829  17.404437   \n",
+       "4  9.236588 -0.359817 -1.438499    ...    -2.840577  12.379023  13.141259   \n",
        "\n",
-       "        SUCDi    SUCOAS      TALA       THD2      TKT1      TKT2       TPI  \n",
-       "0  744.206008 -6.406533  1.639515   1.670533  1.639515  1.635542  6.256787  \n",
-       "1  710.481004 -7.992916  0.056442   9.680476  0.056442  0.052207  7.184752  \n",
-       "2  696.114154 -8.635576  4.063291  52.316496  4.063291  4.058376  5.122237  \n",
-       "3  595.787313 -9.765178  0.822987  36.019720  0.822987  0.816912  8.364314  \n",
-       "4  584.552692 -5.438125  3.088152  12.621811  3.088152  3.079686  6.185089  \n",
+       "        SUCDi    SUCOAS      TALA        THD2      TKT1      TKT2       TPI  \n",
+       "0  821.012351 -4.717467  2.230392  133.608893  2.230392  2.220236  5.263234  \n",
+       "1  521.410118 -6.763224  0.755207   66.656770  0.755207  0.749341  7.135499  \n",
+       "2  756.884622 -7.240802  0.065205   42.830676  0.065205  0.055695  8.109647  \n",
+       "3  556.750972 -5.382222  1.714682   37.386748  1.714682  1.709171  7.003325  \n",
+       "4  440.132011 -9.236588  2.822071    0.292885  2.822071  2.814629  6.205245  \n",
        "\n",
        "[5 rows x 95 columns]"
       ]
@@ -239,11 +252,11 @@
      "output_type": "stream",
      "text": [
       "One process:\n",
-      "CPU times: user 5.31 s, sys: 433 ms, total: 5.74 s\n",
-      "Wall time: 5.27 s\n",
+      "CPU times: user 7.91 s, sys: 4.09 s, total: 12 s\n",
+      "Wall time: 6.52 s\n",
       "Two processes:\n",
-      "CPU times: user 217 ms, sys: 488 ms, total: 705 ms\n",
-      "Wall time: 2.8 s\n"
+      "CPU times: user 288 ms, sys: 495 ms, total: 783 ms\n",
+      "Wall time: 3.52 s\n"
      ]
     }
    ],
@@ -396,8 +409,7 @@
     {
      "data": {
       "text/plain": [
-       "array(['le'], \n",
-       "      dtype='<U3')"
+       "array(['le'], dtype='<U3')"
       ]
      },
      "execution_count": 8,
@@ -434,8 +446,7 @@
        "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
        "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
        "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
-       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v'], \n",
-       "      dtype='<U3')"
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v'], dtype='<U3')"
       ]
      },
      "execution_count": 9,
@@ -451,6 +462,34 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Even though most models are numerically stable enought that the sampler should only generate valid samples we still urge to check this. `validate` is pretty fast and works quickly even for large models and many samples. If you find invalid samples you do not necessarily have to rerun the entire sampling but can exclude them from the sample DataFrame."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "100"
+      ]
+     },
+     "execution_count": 10,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "s1_valid = s1[achr.validate(s1) == \"v\"]\n",
+    "len(s1_valid)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "### Batch sampling"
    ]
   },
@@ -465,14 +504,14 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": 11,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Usually 8.70% +- 2.72% grow...\n"
+      "Usually 10.90% +- 3.83% grow...\n"
      ]
     }
    ],
@@ -498,7 +537,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
+   "execution_count": 12,
    "metadata": {
     "collapsed": true
    },
@@ -517,23 +556,23 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 13,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "0    0.175547\n",
-      "1    0.111499\n",
-      "2    0.123073\n",
-      "3    0.151874\n",
-      "4    0.122541\n",
-      "5    0.121878\n",
-      "6    0.147333\n",
-      "7    0.106499\n",
-      "8    0.174448\n",
-      "9    0.143273\n",
+      "0    0.118106\n",
+      "1    0.120205\n",
+      "2    0.206187\n",
+      "3    0.198633\n",
+      "4    0.206575\n",
+      "5    0.119032\n",
+      "6    0.119231\n",
+      "7    0.127219\n",
+      "8    0.120086\n",
+      "9    0.182586\n",
       "Name: Biomass_Ecoli_core, dtype: float64\n"
      ]
     }
@@ -568,9 +607,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.5.3"
+   "version": "3.6.4"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 1
+ "nbformat_minor": 2
 }


=====================================
release-notes/0.11.1.md
=====================================
--- /dev/null
+++ b/release-notes/0.11.1.md
@@ -0,0 +1,6 @@
+# Release notes for cobrapy 0.11.1
+
+## Fixes
+
+* Catch all `optlang` errors and re-raise them as `OptimizationError` with
+  corresponding message.


=====================================
release-notes/0.11.2.md
=====================================
--- /dev/null
+++ b/release-notes/0.11.2.md
@@ -0,0 +1,5 @@
+# Release notes for cobrapy 0.11.2
+
+## Fixes
+
+* Correctly set the constraint for a previous minimization objective in FVA.


=====================================
release-notes/0.11.3.md
=====================================
--- /dev/null
+++ b/release-notes/0.11.3.md
@@ -0,0 +1,10 @@
+# Release notes for cobrapy 0.11.3
+
+## Fixes
+
+* Improve convergence and stability of sampling.
+* Reverse inplace operations after leaving a context.
+
+## Docs
+
+* Better description of custom constraints.


=====================================
release-notes/next-release.md
=====================================
--- a/release-notes/next-release.md
+++ b/release-notes/next-release.md
@@ -2,6 +2,9 @@
 
 ## Fixes
 
+* Correctly set a constraint in FVA for recording the old objective
+  value when the objective is a minimization.
+
 ## New features
 
 ## Deprecated features


=====================================
setup.cfg
=====================================
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
 [bumpversion]
-current_version = 0.11.0
+current_version = 0.11.3
 commit = True
 tag = True
 parse = (?P<major>\d+)


=====================================
setup.py
=====================================
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ except IOError:
 
 setup(
     name="cobra",
-    version="0.11.0",
+    version="0.11.3",
     packages=find_packages(),
     setup_requires=setup_requirements,
     install_requires=[
@@ -44,7 +44,7 @@ setup(
         "future",
         "swiglpk",
         "ruamel.yaml<0.15",
-        "numpy>=1.6",
+        "numpy>=1.13",
         "pandas>=0.17.0",
         "optlang>=1.2.5",
         "tabulate"



View it on GitLab: https://salsa.debian.org/med-team/python-cobra/commit/e3e44933c3e06cdb4e59ad2d5b1e8d649a56fb19

---
View it on GitLab: https://salsa.debian.org/med-team/python-cobra/commit/e3e44933c3e06cdb4e59ad2d5b1e8d649a56fb19
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20180428/0329cdab/attachment-0001.html>


More information about the debian-med-commit mailing list