[med-svn] [Git][med-team/python-dendropy][upstream] New upstream version 4.5.2

Nilesh Patra gitlab at salsa.debian.org
Wed Apr 21 08:23:33 BST 2021



Nilesh Patra pushed to branch upstream at Debian Med / python-dendropy


Commits:
63174079 by Nilesh Patra at 2021-04-21T12:47:07+05:30
New upstream version 4.5.2
- - - - -


12 changed files:

- PKG-INFO
- src/DendroPy.egg-info/PKG-INFO
- src/DendroPy.egg-info/SOURCES.txt
- src/dendropy/__init__.py
- src/dendropy/dataio/ioservice.py
- src/dendropy/dataio/nexusprocessing.py
- src/dendropy/dataio/nexusreader.py
- src/dendropy/datamodel/basemodel.py
- src/dendropy/model/birthdeath.py
- src/dendropy/model/protractedspeciation.py
- + tests/data/trees/pythonidae.lower.mle.nex
- + tests/data/trees/pythonidae.upper.mle.nex


Changes:

=====================================
PKG-INFO
=====================================
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: DendroPy
-Version: 4.5.1
+Version: 4.5.2
 Summary: A Python library for phylogenetics and phylogenetic computing: reading, writing, simulation, processing and manipulation of phylogenetic trees (phylogenies) and characters.
 Home-page: http://packages.python.org/DendroPy/
 Author: Jeet Sukumaran and Mark T. Holder
@@ -65,7 +65,7 @@ Description: .. image:: https://raw.githubusercontent.com/jeetsukumaran/DendroPy
         Current Release
         ===============
         
-        The current release of DendroPy is version 4.5.1.
+        The current release of DendroPy is version 4.5.2.
         
         
 Keywords: phylogenetics phylogeny phylogenies phylogeography evolution evolutionary biology systematics coalescent population genetics phyloinformatics bioinformatics


=====================================
src/DendroPy.egg-info/PKG-INFO
=====================================
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: DendroPy
-Version: 4.5.1
+Version: 4.5.2
 Summary: A Python library for phylogenetics and phylogenetic computing: reading, writing, simulation, processing and manipulation of phylogenetic trees (phylogenies) and characters.
 Home-page: http://packages.python.org/DendroPy/
 Author: Jeet Sukumaran and Mark T. Holder
@@ -65,7 +65,7 @@ Description: .. image:: https://raw.githubusercontent.com/jeetsukumaran/DendroPy
         Current Release
         ===============
         
-        The current release of DendroPy is version 4.5.1.
+        The current release of DendroPy is version 4.5.2.
         
         
 Keywords: phylogenetics phylogeny phylogenies phylogeography evolution evolutionary biology systematics coalescent population genetics phyloinformatics bioinformatics


=====================================
src/DendroPy.egg-info/SOURCES.txt
=====================================
@@ -625,6 +625,7 @@ tests/data/trees/pythonidae.annotated.bad.nexml
 tests/data/trees/pythonidae.annotated.nexml
 tests/data/trees/pythonidae.beast.mcmc.trees
 tests/data/trees/pythonidae.beast.summary.tre
+tests/data/trees/pythonidae.lower.mle.nex
 tests/data/trees/pythonidae.mb.con
 tests/data/trees/pythonidae.mb.run1.t
 tests/data/trees/pythonidae.mb.run2.t
@@ -643,5 +644,6 @@ tests/data/trees/pythonidae.reference-trees.nexus
 tests/data/trees/pythonidae.reference-trees.no-taxa-block.nexus
 tests/data/trees/pythonidae.reference-trees.no-taxa-no-translate-block.nexus
 tests/data/trees/pythonidae.reference-trees.taxon-numbers-only.newick
+tests/data/trees/pythonidae.upper.mle.nex
 tests/data/trees/rana.trees.nexus
 tests/data/trees/treebase_s373.xml
\ No newline at end of file


=====================================
src/dendropy/__init__.py
=====================================
@@ -104,7 +104,7 @@ from dendropy.legacy import treesum
 ## PACKAGE METADATA
 import collections
 __project__ = "DendroPy"
-__version__ = "4.5.1"
+__version__ = "4.5.2"
 __author__ = "Jeet Sukumaran and Mark T. Holder"
 __copyright__ = "Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder."
 __citation__ = "Sukumaran, J and MT Holder. 2010. DendroPy: a Python library for phylogenetic computing. Bioinformatics 26: 1569-1571."


=====================================
src/dendropy/dataio/ioservice.py
=====================================
@@ -20,6 +20,7 @@
 import sys
 import collections
 import warnings
+from dendropy.datamodel import basemodel
 from dendropy.datamodel import taxonmodel
 from dendropy.utility import deprecate
 from dendropy.utility import textprocessing
@@ -355,12 +356,26 @@ class DataReader(IOService):
 
         """
         # ``product`` is a namedtuple("DataReaderProducts", ["taxon_namespaces", "tree_lists", "char_matrices"])
+
+        ### Put this here as a way to propagate annotations from main file to trees, chars;
+        ### Removed it because not sure if we should actually do this (metadata may not apply).
+        ### For now, if we want to pull metadata from main file, we need to read as dataset and extract from there
+
+        # if global_annotations_target is None:
+        #     global_annotations_target = basemodel.Annotable()
+        #     is_post_process_global_annotations = True
+        # else:
+        #     is_post_process_global_annotations = False
+
         product = self._read(stream=stream,
                 taxon_namespace_factory=taxon_namespace_factory,
                 tree_list_factory=tree_list_factory,
                 char_matrix_factory=None,
                 state_alphabet_factory=None,
                 global_annotations_target=global_annotations_target)
+        # if is_post_process_global_annotations:
+        #     for tree_list in product.tree_lists:
+        #         tree_list.copy_annotations_from(global_annotations_target)
         return product.tree_lists
 
     def read_char_matrices(self,
@@ -369,12 +384,23 @@ class DataReader(IOService):
             char_matrix_factory,
             state_alphabet_factory,
             global_annotations_target=None):
+        ### Put this here as a way to propagate annotations from main file to trees, chars;
+        ### Removed it because not sure if we should actually do this (metadata may not apply).
+        ### For now, if we want to pull metadata from main file, we need to read as dataset and extract from there
+        # if global_annotations_target is None:
+        #     global_annotations_target = basemodel.Annotable()
+        #     is_post_process_global_annotations = True
+        # else:
+        #     is_post_process_global_annotations = False
         product = self._read(stream=stream,
                 taxon_namespace_factory=taxon_namespace_factory,
                 tree_list_factory=None,
                 char_matrix_factory=char_matrix_factory,
                 state_alphabet_factory=state_alphabet_factory,
                 global_annotations_target=global_annotations_target)
+        # if is_post_process_global_annotations:
+        #     for char_matrix in product.char_matrices:
+        #         char_matrix.copy_annotations_from(global_annotations_target)
         return product.char_matrices
 
 ###############################################################################


=====================================
src/dendropy/dataio/nexusprocessing.py
=====================================
@@ -40,13 +40,13 @@ class NexusTokenizer(Tokenizer):
             preserve_unquoted_underscores=False):
         Tokenizer.__init__(self,
             src=src,
-            uncaptured_delimiters=list(" \t\n\r"),
-            captured_delimiters=list("{}(),;:=\\\""),
-            quote_chars="'",
+            uncaptured_delimiters=set(" \t\n\r"),
+            captured_delimiters=set("{}(),;:=\\\""),
+            quote_chars=set("'"),
             escape_quote_by_doubling=True,
-            escape_chars="",
-            comment_begin="[",
-            comment_end="]",
+            escape_chars=set(""),
+            comment_begin=set("["),
+            comment_end=set("]"),
             capture_comments=True,
             preserve_unquoted_underscores=preserve_unquoted_underscores)
         # self.preserve_unquoted_underscores = preserve_unquoted_underscores
@@ -61,38 +61,38 @@ class NexusTokenizer(Tokenizer):
     def set_capture_eol(self, capture_eol):
         if capture_eol:
             try:
-                self.uncaptured_delimiters.remove("\n")
+                self.uncaptured_delimiters.discard("\n")
             except ValueError:
                 pass
             try:
-                self.uncaptured_delimiters.remove("\r")
+                self.uncaptured_delimiters.discard("\r")
             except ValueError:
                 pass
             if "\n" not in self.captured_delimiters:
-                self.captured_delimiters.append("\n")
+                self.captured_delimiters.add("\n")
             if "\r" not in self.captured_delimiters:
-                self.captured_delimiters.append("\r")
+                self.captured_delimiters.add("\r")
         else:
             try:
-                self.captured_delimiters.remove("\n")
+                self.captured_delimiters.discard("\n")
             except ValueError:
                 pass
             try:
-                self.captured_delimiters.remove("\r")
+                self.captured_delimiters.discard("\r")
             except ValueError:
                 pass
             if "\n" not in self.uncaptured_delimiters:
-                self.uncaptured_delimiters.append("\n")
+                self.uncaptured_delimiters.add("\n")
             if "\r" not in self.uncaptured_delimiters:
-                self.uncaptured_delimiters.append("\r")
+                self.uncaptured_delimiters.add("\r")
 
     def set_hyphens_as_captured_delimiters(self, hyphens_as_captured_delimiters):
         if hyphens_as_captured_delimiters:
             if "-" not in self.captured_delimiters:
-                self.captured_delimiters.append("-")
+                self.captured_delimiters.add("-")
         else:
             try:
-                self.captured_delimiters.remove("-")
+                self.captured_delimiters.discard("-")
             except ValueError:
                 pass
 


=====================================
src/dendropy/dataio/nexusreader.py
=====================================
@@ -281,6 +281,11 @@ class NexusReader(ioservice.DataReader):
         exclude_trees : bool
             If |False|, then tree data will not be read. Defaults to
             |True|: tree data will be read.
+        store_ignored_blocks : bool
+            If |True|, then ignored NEXUS blocks will be stored under the annotation
+            (NOT attribute!) ``ignored_nexus_blocks''.
+            To dereference, for e.g.: ``dataset.annotations["ignored_nexus_blocks"]``.
+            Defaults to |False|: non-character and tree blocks will not be read.
         attached_taxon_namespace : |TaxonNamespace|
             Unify all operational taxonomic unit definitions in this namespace.
         ignore_unrecognized_keyword_arguments : boolean, default: |False|
@@ -297,6 +302,7 @@ class NexusReader(ioservice.DataReader):
         # keyword validation scheme
         self.exclude_chars = kwargs.pop("exclude_chars", False)
         self.exclude_trees = kwargs.pop("exclude_trees", False)
+        self.store_ignored_blocks = kwargs.pop("store_ignored_blocks", False)
         self._data_type = kwargs.pop("data_type", "standard")
         self.attached_taxon_namespace = kwargs.pop("attached_taxon_namespace", None)
 
@@ -336,6 +342,7 @@ class NexusReader(ioservice.DataReader):
         self._char_matrices = []
         self._tree_lists = []
         self._product = None
+        self._ignored_blocks = []
 
     ###########################################################################
     ## Reader Implementation
@@ -365,6 +372,16 @@ class NexusReader(ioservice.DataReader):
                 taxon_namespaces=self._taxon_namespaces,
                 tree_lists=self._tree_lists,
                 char_matrices=self._char_matrices)
+        if self._global_annotations_target is not None and self._ignored_blocks:
+            a = self._global_annotations_target.annotations.find(name="ignored_nexus_blocks")
+            if a is None:
+                self._global_annotations_target.annotations.add_new(
+                        name="ignored_nexus_blocks",
+                        value=self._ignored_blocks,
+                        datatype_hint="xsd:list",
+                        )
+            else:
+                a.extend(self._ignored_blocks)
         return self._product
 
     ###########################################################################
@@ -590,7 +607,11 @@ class NexusReader(ioservice.DataReader):
                         NexusReader.IncompleteBlockError)
             else:
                 # unknown block
-                token = self._consume_to_end_of_block(token)
+                if token is not None and self.store_ignored_blocks:
+                    b = self._read_block_without_processing(token=token)
+                    self._ignored_blocks.append(b)
+                else:
+                    token = self._consume_to_end_of_block(token)
 
     ###########################################################################
     ## TAXA BLOCK
@@ -640,19 +661,45 @@ class NexusReader(ioservice.DataReader):
         if taxon_namespace is None:
             taxon_namespace = self._get_taxon_namespace()
         token = self._nexus_tokenizer.next_token()
+
+        # Construct label lookup set
+        # The get_taxon call is expensive for large taxon namespaces as it requires
+        # a linear search. This causes significant performance penalties for loading
+        # very large trees into an empty taxon namespace as each new taxon requires
+        # a worst case search of the existing namespace before it can be inserted.
+        # To alleviate this, we build a temporary one-time set of all the labels
+        # in the taxon namespace. Now we can determine in constant-time whether
+        # a label token corresponds to a new taxon that requires insertion,
+        # or if an existing taxon can be fetched with get_taxon.
+        label_set = set([])
+        for taxon in taxon_namespace._taxa:
+            if taxon_namespace.is_case_sensitive:
+                label_set.add(taxon.label)
+            else:
+                label_set.add(taxon.lower_cased_label)
+
         while token != ';':
             label = token
-            # if taxon_namespace.has_taxon(label=label):
-            #     pass
-            # elif len(taxon_namespace) >= self._file_specified_ntax and not self.attached_taxon_namespace:
-            #     raise self._too_many_taxa_error(taxon_namespace=taxon_namespace, label=label)
-            # else:
-            #     taxon_namespace.require_taxon(label=label)
-            taxon = taxon_namespace.get_taxon(label=label)
-            if taxon is None:
+
+            # Convert the token to the appropriate case to check against label set
+            if taxon_namespace.is_case_sensitive:
+                check_label = label
+            else:
+                check_label = label.lower()
+
+            if check_label in label_set:
+                taxon = taxon_namespace.get_taxon(label=label)
+            else:
                 if len(taxon_namespace) >= self._file_specified_ntax and not self.attached_taxon_namespace and not self.unconstrained_taxa_accumulation_mode:
                     raise self._too_many_taxa_error(taxon_namespace=taxon_namespace, label=label)
                 taxon = taxon_namespace.new_taxon(label=label)
+
+                # Add the new label to the label lookup set too
+                if taxon_namespace.is_case_sensitive:
+                    label_set.add(taxon.label)
+                else:
+                    label_set.add(taxon.lower_cased_label)
+
             token = self._nexus_tokenizer.next_token()
             self._nexus_tokenizer.process_and_clear_comments_for_item(taxon,
                     self.extract_comment_metadata)
@@ -1228,12 +1275,40 @@ class NexusReader(ioservice.DataReader):
         else:
             token = "DUMMY"
         while not (token == 'END' or token == 'ENDBLOCK') \
-            and not self._nexus_tokenizer.is_eof() \
-            and not token==None:
+                and not self._nexus_tokenizer.is_eof() \
+                and not token==None:
             self._nexus_tokenizer.skip_to_semicolon()
             token = self._nexus_tokenizer.next_token_ucase()
         return token
 
+    def _read_block_without_processing(self, token=None):
+        # used for unknown blocks we want to save
+        # NOT (really) TESTED
+        # Everybody else except Jeet: (REALLY) DO NOT USE!
+        # Jeet: SORTA DO NOT USE WITHOUT MORE TESTING
+        if token:
+            token = token.upper()
+        block = ["BEGIN", token]
+        old_uncaptured_delimiters = self._nexus_tokenizer.uncaptured_delimiters
+        old_captured_delimiters = self._nexus_tokenizer.captured_delimiters
+        to_switch = "\n\r"
+        for ch in to_switch:
+            self._nexus_tokenizer.uncaptured_delimiters.discard(ch)
+            self._nexus_tokenizer.captured_delimiters.add(ch)
+        while not (token == 'END' or token == 'ENDBLOCK') \
+                and not self._nexus_tokenizer.is_eof() \
+                and not token==None:
+            token = self._nexus_tokenizer.require_next_token()
+            uctoken = token.upper()
+            if uctoken == "END" or uctoken == "ENDBLOCK":
+                token = uctoken
+            block.append(token)
+        self._nexus_tokenizer.uncaptured_delimiters = old_uncaptured_delimiters
+        self._nexus_tokenizer.captured_delimiters = old_captured_delimiters
+        self._nexus_tokenizer.skip_to_semicolon() # move past end
+        block.append(";")
+        return " ".join(block)
+
     def _read_character_states(self,
             character_data_vector,
             state_alphabet,


=====================================
src/dendropy/datamodel/basemodel.py
=====================================
@@ -632,6 +632,9 @@ class Annotable(object):
     or other information as metadata should subclass.
     """
 
+    def __init__(self):
+        self.comments = []
+
     def _get_annotations(self):
         if not hasattr(self, "_annotations"):
             self._annotations = AnnotationSet(self)


=====================================
src/dendropy/model/birthdeath.py
=====================================
@@ -494,6 +494,509 @@ def birth_death_tree(birth_rate, death_rate, birth_rate_sd=0.0, death_rate_sd=0.
             nd.taxon = taxon
     return tree
 
+
+#NICOLA: new version of birth-death tree simulation with linear time cost
+#in number of tips instead of quadratic. To obtain this accelleration, however,
+#I have removed variation in birth and death rates.
+def fast_birth_death_tree(birth_rate, death_rate, **kwargs):
+    #NICOLA: we are not allowing variation in birth and death rate so to have
+    #increased efficiency.
+    birth_rate_sd=0.0
+    death_rate_sd=0.0
+
+    """
+    Returns a birth-death tree with birth rate specified by ``birth_rate``, and
+    death rate specified by ``death_rate``, with edge lengths in continuous (real)
+    units.
+
+    Tree growth is controlled by one or more of the following arguments, of which
+    at least one must be specified:
+
+        - If ``num_extant_tips`` is given as a keyword argument, tree is grown until the
+          number of EXTANT tips equals this number.
+        - If ``num_extinct_tips`` is given as a keyword argument, tree is grown until the
+          number of EXTINCT tips equals this number.
+        - If ``num_total_tips`` is given as a keyword argument, tree is grown until the
+          number of EXTANT plus EXTINCT tips equals this number.
+        - If 'max_time' is given as a keyword argument, tree is grown for
+          a maximum of ``max_time``.
+        - If ``gsa_ntax`` is given then the tree will be simulated up to this number of
+          EXTANT tips (or 0 tips), then a tree will be randomly selected from the
+          intervals which corresond to times at which the tree had exactly ``num_extant_tips``
+          leaves. This allows for simulations according to the "General
+          Sampling Approach" of Hartmann et al. (2010). If this option is
+          specified, then ``num_extant_tips`` MUST be specified and
+          ``num_extinct_tips`` and ``num_total_tips`` CANNOT be specified.
+
+    If more than one of the above is given, then tree growth will terminate when
+    *any* one of the termination conditions are met.
+
+    Parameters
+    ----------
+
+    birth_rate : float
+        The birth rate.
+    death_rate : float
+        The death rate.
+    birth_rate_sd : float
+        The standard deviation of the normally-distributed mutation added to
+        the birth rate as it is inherited by daughter nodes; if 0, birth rate
+        does not evolve on the tree.
+    death_rate_sd : float
+        The standard deviation of the normally-distributed mutation added to
+        the death rate as it is inherited by daughter nodes; if 0, death rate
+        does not evolve on the tree.
+
+    Keyword Arguments
+    -----------------
+
+    num_extant_tips: int
+        If specified, branching process is terminated when number of EXTANT
+        tips equals this number.
+    num_extinct_tips: int
+        If specified, branching process is terminated when number of EXTINCT
+        tips equals this number.
+    num_total_tips: int
+        If specified, branching process is terminated when number of EXTINCT
+        plus EXTANT tips equals this number.
+    max_time: float
+        If specified, branching process is terminated when time reaches or
+        exceeds this value.
+    gsa_ntax: int
+        The General Sampling Approach threshold for number of taxa. See above
+        for details.
+    tree : Tree instance
+        If given, then this tree will be used; otherwise a new one will be created.
+    taxon_namespace : TaxonNamespace instance
+        If given, then this will be assigned to the new tree, and, in addition,
+        taxa assigned to tips will be sourced from or otherwise created with
+        reference to this.
+    is_assign_extant_taxa : bool [default: True]
+        If False, then taxa will not be assigned to extant tips. If True
+        (default), then taxa will be assigned to extant tips. Taxa will be
+        assigned from the specified ``taxon_namespace`` or
+        ``tree.taxon_namespace``. If the number of taxa required exceeds the
+        number of taxa existing in the taxon namespace, new |Taxon| objects
+        will be created as needed and added to the taxon namespace.
+    is_assign_extinct_taxa : bool [default: True]
+        If False, then taxa will not be assigned to extant tips. If True
+        (default), then taxa will be assigned to extant tips. Taxa will be
+        assigned from the specified ``taxon_namespace`` or
+        ``tree.taxon_namespace``. If the number of taxa required exceeds the
+        number of taxa existing in the taxon namespace, new |Taxon| objects
+        will be created as needed and added to the taxon namespace. Note that
+        this option only makes sense if extinct tips are retained (specified via
+        'is_retain_extinct_tips' option), and will otherwise be ignored.
+    is_add_extinct_attr: bool [default: True]
+        If True (default), add an boolean attribute indicating whether or not a
+        node is an extinct tip or not. False will skip this. Name of attribute
+        is set by 'extinct_attr_name' argument, defaulting to 'is_extinct'.
+        Note that this option only makes sense if extinct tips are retained
+        (specified via 'is_retain_extinct_tips' option), and will otherwise be
+        ignored.
+    extinct_attr_name: str [default: 'is_extinct']
+        Name of attribute to add to nodes indicating whether or not tip is extinct.
+        Note that this option only makes sense if extinct tips are retained
+        (specified via 'is_retain_extinct_tips' option), and will otherwise be
+        ignored.
+    is_retain_extinct_tips : bool [default: False]
+        If True, extinct tips will be retained on tree. Defaults to False:
+        extinct lineages removed from tree.
+    repeat_until_success: bool [default: True]
+        Under some conditions, it is possible for all lineages on a tree to go
+        extinct. In this case, if this argument is given as |True| (the
+        default), then a new branching process is initiated. If |False|
+        (default), then a TreeSimTotalExtinctionException is raised.
+    rng: random.Random() or equivalent instance
+        A Random() object or equivalent can be passed using the ``rng`` keyword;
+        otherwise GLOBAL_RNG is used.
+
+    References
+    ----------
+
+    Hartmann, Wong, and Stadler "Sampling Trees from Evolutionary Models" Systematic Biology. 2010. 59(4). 465-476
+
+    """
+    if "assign_taxa" in kwargs:
+        deprecate.dendropy_deprecation_warning(
+                message="Deprecated: 'assign_taxa' will no longer be supported as an argument to this function. Use 'is_assign_extant_taxa' and/or 'is_assign_extinct_taxa' instead",
+                stacklevel=3)
+        a = kwargs.pop("assign_taxa")
+        kwargs["is_assign_extant_taxa"] = a
+        kwargs["is_assign_extant_taxa"] = a
+    if "ntax" in kwargs:
+        deprecate.dendropy_deprecation_warning(
+                message="Deprecated: 'ntax' is no longer supported as an argument to this function. Use one or more of the following instead: 'num_extant_tips', 'num_extinct_tips', 'num_total_tips', or 'max_time'",
+                stacklevel=3)
+        kwargs["num_extant_tips"] = kwargs.pop("ntax")
+    if (("num_extant_tips" not in kwargs)
+            and ("num_extinct_tips" not in kwargs)
+            and ("num_total_tips" not in kwargs)
+            and ("max_time" not in kwargs) ):
+        if "taxon_namespace" in kwargs:
+            ### cannot support legacy approach, b/c ``taxon_namespace`` may grow during function, leading to unpredictable behavior
+            # deprecate.dendropy_deprecation_warning(
+            #         preamble="Deprecated: The 'taxon_namespace' argument can no longer be used to specify a termination condition as a side-effect. Use one or more of the following instead with the length of the taxon namespace instance as a value: 'num_extant_tips', 'num_extinct_tips', or 'num_total_tips'",
+            #         old_construct="tree = birth_death_tree(\n    ...\n    taxon_namespace=taxon_namespace,\n    ...\n)",
+            #         new_construct="tree = birth_death_tree(\n    ...\n    taxon_namespace=taxon_namespace,\n    num_extant_tips=len(taxon_namespace),\n    ...\n)")
+            # kwargs["num_extant_tips"] = len(kwargs["taxon_namespace"])
+            raise ValueError("The 'taxon_namespace' argument can no longer be used to specify a termination condition as a side-effect."
+                             "Use one or more of the following instead with the length of the taxon namespace instance as a value: "
+                             "'num_extant_tips', 'num_extinct_tips', or 'num_total_tips'.\n"
+                             "That is, instead of:\n\n"
+                             "    tree = birth_death_tree(\n        ...\n        taxon_namespace=taxon_namespace,\n        ...\n    )\n\n"
+                             "Use:\n\n"
+                             "    ntax = len(taxon_namespace)\n    tree = birth_death_tree(\n        ...\n        taxon_namespace=taxon_namespace,\n        num_extant_tips=ntax,\n        ...\n    )\n"
+                             "\nOr (recommended):\n\n"
+                             "    tree = birth_death_tree(\n        ...\n        taxon_namespace=taxon_namespace,\n        num_extant_tips=100,\n        ...\n    )\n"
+                             "\nNote that the taxon namespace instance size may grow during any particular call of the function depending on taxon assignment/creation settings, so"
+                             " for stable and predictable behavor it is important to take a snapshot of the desired taxon namespace size before any call of the function, or, better yet"
+                             " simply pass in a constant value."
+                             )
+        else:
+            raise ValueError("One or more of the following must be specified: 'num_extant_tips', 'num_extinct_tips', or 'max_time'")
+    target_num_extant_tips = kwargs.pop("num_extant_tips", None)
+    target_num_extinct_tips = kwargs.pop("num_extinct_tips", None)
+    target_num_total_tips = kwargs.pop("num_total_tips", None)
+    max_time = kwargs.pop('max_time', None)
+    gsa_ntax = kwargs.pop('gsa_ntax', None)
+    is_add_extinct_attr = kwargs.pop('is_add_extinct_attr', True)
+    extinct_attr_name = kwargs.pop('extinct_attr_name', 'is_extinct')
+    is_retain_extinct_tips = kwargs.pop('is_retain_extinct_tips', False)
+    is_assign_extant_taxa = kwargs.pop('is_assign_extant_taxa', True)
+    is_assign_extinct_taxa = kwargs.pop('is_assign_extinct_taxa', True)
+    repeat_until_success = kwargs.pop('repeat_until_success', True)
+
+    tree = kwargs.pop("tree", None)
+    taxon_namespace = kwargs.pop("taxon_namespace", None)
+
+    rng = kwargs.pop('rng', GLOBAL_RNG)
+
+    ignore_unrecognized_keyword_arguments = kwargs.pop('ignore_unrecognized_keyword_arguments', False)
+    if kwargs and not ignore_unrecognized_keyword_arguments:
+        raise ValueError("Unsupported keyword arguments: {}".format(kwargs.keys()))
+
+    terminate_at_full_tree = False
+
+    if gsa_ntax is None:
+        terminate_at_full_tree = True
+        # gsa_ntax = 1 + target_num_taxa
+    elif target_num_extant_tips is None:
+        raise ValueError("If 'gsa_ntax' is specified, 'num_extant_tips' must be specified")
+    elif target_num_extinct_tips is not None:
+        raise ValueError("If 'gsa_ntax' is specified, 'num_extinct_tups' cannot be specified")
+    elif target_num_total_tips is not None:
+        raise ValueError("If 'gsa_ntax' is specified, 'num_total_tips' cannot be specified")
+    elif gsa_ntax < target_num_extant_tips:
+        raise ValueError("'gsa_ntax' ({}) must be greater than 'num_extant_tips' ({})".format(gsa_ntax, target_num_extant_tips))
+
+    # initialize tree
+    if tree is not None:
+        if taxon_namespace is not None:
+            assert tree.taxon_namespace is taxon_namespace
+        else:
+            taxon_namespace = tree.taxon_namespace
+        extant_tips = []
+        extinct_tips = []
+        for nd in tree:
+            if not nd._child_nodes:
+                if getattr(nd, extinct_attr_name, False):
+                    extant_tips.append(nd)
+                    #NICOLA: the reason for this will become clear later
+                    nd.edge.length=-nd.edge.length
+                    if is_add_extinct_attr:
+                        setattr(nd, extinct_attr_name, False)
+                else:
+                    extinct_tips.append(nd)
+                    if is_add_extinct_attr:
+                        setattr(nd, extinct_attr_name, True)
+            elif is_add_extinct_attr:
+                setattr(nd, extinct_attr_name, None)
+    else:
+        if taxon_namespace is None:
+            taxon_namespace = dendropy.TaxonNamespace()
+        tree = dendropy.Tree(taxon_namespace=taxon_namespace)
+        tree.is_rooted = True
+        tree.seed_node.edge.length = 0.0
+        tree.seed_node.birth_rate = birth_rate
+        tree.seed_node.death_rate = death_rate
+        if is_add_extinct_attr:
+            setattr(tree.seed_node, extinct_attr_name, False)
+        extant_tips = [tree.seed_node]
+        extinct_tips = []
+    initial_extant_tip_set = list(extant_tips)
+    initial_extinct_tip_set = list(extinct_tips)
+    #NICOLA: in case we need a restart, let's record the initial branch lengths.
+    initial_lengths=[]
+    for nd in initial_extant_tip_set:
+        initial_lengths.append(nd.edge.length)
+
+    total_time = 0
+
+    # for the GSA simulations targetted_time_slices is a list of tuple
+    #   the first element in the tuple is the duration of the amount
+    #   that the simulation spent at the (targetted) number of taxa
+    #   and a list of edge information. The list of edge information includes
+    #   a list of terminal edges in the tree and the length for that edge
+    #   that marks the beginning of the time slice that corresponds to the
+    #   targetted number of taxa.
+    targetted_time_slices = []
+
+    while True:
+        if gsa_ntax is None:
+            if target_num_extant_tips is not None and len(extant_tips) >= target_num_extant_tips:
+                #NICOLA: here and in the other cases, update the branch length so to
+                #represent the true branch length (explained better below).
+                for nd in extant_tips:
+                        nd.edge.length=total_time-nd.edge.length
+                break
+            if target_num_extinct_tips is not None and len(extinct_tips) >= target_num_extinct_tips:
+                for nd in extant_tips:
+                        nd.edge.length=total_time-nd.edge.length
+                break
+            if target_num_total_tips is not None and (len(extant_tips) + len(extinct_tips)) >= target_num_total_tips:
+                for nd in extant_tips:
+                        nd.edge.length=total_time-nd.edge.length
+                break
+            if max_time is not None and total_time >= max_time:
+                for nd in extant_tips:
+                        nd.edge.length=total_time-nd.edge.length
+                break
+        elif len(extant_tips) >= gsa_ntax:
+                for nd in extant_tips:
+                        nd.edge.length=total_time-nd.edge.length
+                break
+
+
+        # get vector of birth/death probabilities, and
+        # associate with nodes/events
+        #NICOLA: replaced this
+   #      event_rates = []
+#         event_nodes = []
+#         for nd in extant_tips:
+#             if not hasattr(nd, 'birth_rate'):
+#                 nd.birth_rate = birth_rate
+#             if not hasattr(nd, 'death_rate'):
+#                 nd.death_rate = death_rate
+#             event_rates.append(nd.birth_rate)
+#             event_nodes.append((nd, True)) # birth event = True
+#             event_rates.append(nd.death_rate)
+#             event_nodes.append((nd, False)) # birth event = False; i.e. death
+
+
+        # get total probability of any birth/death
+        #rate_of_any_event = sum(event_rates)
+        #NICOLA: instead, assume that each node has the same birth and death rate, and sample
+        #from the total rate and then sample one node at random from extant ones.
+        #this has constant cost instead of linear in the number of extant taxa.
+        rate_of_any_event=len(extant_tips)*(birth_rate+death_rate)
+
+        # waiting time based on above probability
+        waiting_time = rng.expovariate(rate_of_any_event)
+
+        if ( (gsa_ntax is not None)
+                and (len(extant_tips) == target_num_extant_tips)
+                ):
+            edge_and_start_length = []
+            for nd in extant_tips:
+                e = nd.edge
+                #NICOLA: modified accordingly
+                edge_and_start_length.append((e, total_time-e.length))
+            targetted_time_slices.append((waiting_time, edge_and_start_length))
+            if terminate_at_full_tree:
+                #NICOLA: terminal update as before
+                for nd in extant_tips:
+                        nd.edge.length=total_time-nd.edge.length
+                break
+
+        # add waiting time to nodes
+        #Nicola: this is the part that makes it quadratic in ntax.
+        #instead, we use a trick of initializing nd.edge.length to the time of node creation,
+        # and then we update it to the correct branch length when the node is closed or simulations are terminated.
+        #for nd in extant_tips:
+        #    try:
+        #        nd.edge.length += waiting_time
+        #    except TypeError:
+        #        nd.edge.length = waiting_time
+        total_time += waiting_time
+
+        # if event occurs within time constraints
+        if max_time is None or total_time <= max_time:
+            # normalize probability
+            #for i in range(len(event_rates)):
+            #    event_rates[i] = event_rates[i]/rate_of_any_event
+            # select node/event and process
+            #nd, birth_event = probability.weighted_choice(event_nodes, event_rates, rng=rng)
+            #NICOLA: instead of sampling from a categorical distribution (requiring linear time in ntax)
+            #sample a random integer to represent the chosen taxon, and sample if birth or death event.
+            taxI=rng.randint(0, len(extant_tips)-1)
+            if rng.random()< birth_rate/(birth_rate+death_rate):
+                birth_event=True
+            else:
+                birth_event=False
+            nd=extant_tips[taxI]
+            #extant_tips.remove(nd)
+            if birth_event:
+                if is_add_extinct_attr:
+                    setattr(nd, extinct_attr_name, None)
+                c1 = nd.new_child()
+                c2 = nd.new_child()
+                extant_tips[taxI]=c1
+                #NICOLA: we initialize branch lengths to total time;
+                #this may not make sense at first, but when we are done with a node, we
+                #update the branch length to the new current time minus the time of node creation
+                #which gives us the branch length. The advantage is that this takes linear time
+                #instead of quadratic.
+                c1.edge.length = total_time
+                c2.edge.length = total_time
+                nd.edge.length = total_time-nd.edge.length
+                #c1.edge.length = 0
+                #c2.edge.length = 0
+                #NICOLA: here, for speed, we don't allow variation in birth rates.
+                #It could be possible to allow it, but it would take ntax*log(ntax) time,
+                #which wouldn't be bad, but it would also require a more complicated approach.
+                #c1.birth_rate = nd.birth_rate + rng.gauss(0, birth_rate_sd)
+                #c1.death_rate = nd.death_rate + rng.gauss(0, death_rate_sd)
+                #c2.birth_rate = nd.birth_rate + rng.gauss(0, birth_rate_sd)
+                #c2.death_rate = nd.death_rate + rng.gauss(0, death_rate_sd)
+                c1.birth_rate = nd.birth_rate
+                c1.death_rate = nd.death_rate
+                c2.birth_rate = nd.birth_rate
+                c2.death_rate = nd.death_rate
+                #extant_tips.append(c1)
+                extant_tips.append(c2)
+            else:
+                del extant_tips[taxI]
+                if len(extant_tips) > 0:
+                    extinct_tips.append(nd)
+                    if is_add_extinct_attr:
+                        setattr(nd, extinct_attr_name, None)
+                else:
+                    # total extinction
+                    if (gsa_ntax is not None):
+                        if (len(targetted_time_slices) > 0):
+                            break
+                    if not repeat_until_success:
+                        raise TreeSimTotalExtinctionException()
+                    # We are going to basically restart the simulation because
+                    # the tree has gone extinct (without reaching the specified
+                    # ntax)
+                    extant_tips = list(initial_extant_tip_set)
+                    extinct_tips = list(initial_extinct_tip_set)
+                    ndIndex=0
+                    for nd in extant_tips:
+                        if is_add_extinct_attr:
+                            setattr(nd, extinct_attr_name, False)
+                        nd.clear_child_nodes()
+                        #NICOLA: restore branch length to original value
+                        nd.edge.length=initial_lengths[ndIndex]
+                        ndIndex+=1
+                    total_time = 0
+
+
+    if gsa_ntax is not None:
+        total_duration_at_target_n_tax = 0.0
+        for i in targetted_time_slices:
+            total_duration_at_target_n_tax += i[0]
+        r = rng.random()*total_duration_at_target_n_tax
+        selected_slice = None
+        for n, i in enumerate(targetted_time_slices):
+            r -= i[0]
+            if r < 0.0:
+                selected_slice = i
+        assert(selected_slice is not None)
+        edges_at_slice = selected_slice[1]
+        last_waiting_time = selected_slice[0]
+
+        for e, prev_length in edges_at_slice:
+            daughter_nd = e.head_node
+            for nd in daughter_nd.child_nodes():
+                nd._parent_node = None
+                try:
+                    extinct_tips.remove(nd)
+                except ValueError:
+                    pass
+                try:
+                    extant_tips.remove(nd)
+                except ValueError:
+                    pass
+                for desc in nd.preorder_iter():
+                    try:
+                        extant_tips.remove(desc)
+                    except ValueError:
+                        pass
+            daughter_nd.clear_child_nodes()
+            try:
+                extinct_tips.remove(daughter_nd)
+            except ValueError:
+                pass
+            extant_tips.append(daughter_nd)
+            if is_add_extinct_attr:
+                setattr(daughter_nd, extinct_attr_name, False)
+            e.length = prev_length + last_waiting_time
+
+    if not is_retain_extinct_tips:
+        processed_nodes = set()
+        for nd in list(extinct_tips):
+            if nd in processed_nodes:
+                continue
+            processed_nodes.add(nd)
+            try:
+                extinct_tips.remove(nd)
+            except ValueError:
+                pass
+            assert not nd._child_nodes
+            while (nd.parent_node is not None) and (len(nd.parent_node._child_nodes) == 1):
+                nd = nd.parent_node
+                processed_nodes.add(nd)
+            tree.prune_subtree(nd, suppress_unifurcations=False)
+    tree.suppress_unifurcations()
+
+    if is_assign_extant_taxa or is_assign_extinct_taxa:
+        taxon_pool = [t for t in taxon_namespace]
+        rng.shuffle(taxon_pool)
+        taxon_pool_labels = set([t.label for t in taxon_pool])
+
+        ### ONLY works if in GSA sub-section we remove ALL extant and
+        ### extinct nodes beyond time slice: expensive
+        ### Furthermore, main reason to use this approach is to have different
+        ### label prefixes for extinct vs. extant lineages, but the second time
+        ### this function is called with the same taxon namespace or any time
+        ### this function is called with a populated taxon namespace, that
+        ### aesthetic is lost.
+        # node_pool_labels = ("T", "X")
+        # for node_pool_idx, node_pool in enumerate((extant_tips, extinct_tips)):
+        #     for node_idx, nd in enumerate(node_pool):
+        #         if taxon_pool:
+        #             taxon = taxon_pool.pop()
+        #         else:
+        #             taxon = taxon_namespace.require_taxon("{}{}".format(node_pool_labels[node_pool_idx], node_idx+1))
+        #         nd.taxon = taxon
+        #         assert not nd._child_nodes
+
+        tlabel_counter = 0
+        leaf_nodes = tree.leaf_nodes()
+        rng.shuffle(leaf_nodes)
+        for nd_idx, nd in enumerate(leaf_nodes):
+            if not is_assign_extant_taxa and nd in extant_tips:
+                continue
+            if not is_assign_extant_taxa and nd in extinct_tips:
+                continue
+            if taxon_pool:
+                taxon = taxon_pool.pop()
+            else:
+                while True:
+                    tlabel_counter += 1
+                    label = "{}{}".format("T", tlabel_counter)
+                    if label not in taxon_pool_labels:
+                        break
+                taxon = taxon_namespace.require_taxon(label=label)
+                taxon_pool_labels.add(label)
+            nd.taxon = taxon
+    return tree
+
+
+
 def discrete_birth_death_tree(birth_rate, death_rate, birth_rate_sd=0.0, death_rate_sd=0.0, **kwargs):
     """
     Returns a birth-death tree with birth rate specified by ``birth_rate``, and


=====================================
src/dendropy/model/protractedspeciation.py
=====================================
@@ -691,25 +691,27 @@ class ProtractedSpeciationProcess(object):
                 break
             # we do this here so that the (newest) tip lineages have the
             # waiting time to the next event branch lengths
-            if (num_extant_lineages is None
-                    and min_extant_lineages is None
-                    and max_extant_lineages is None):
-                lineage_requirements_met = True
+            if (num_extant_lineages is not None
+                    or min_extant_lineages is not None
+                    or max_extant_lineages is not None):
+                has_lineage_count_requirements = True
+                if (
+                        (num_extant_lineages is None or ((num_incipient_species + num_orthospecies) == num_extant_lineages))
+                        and (min_extant_lineages is None or ((num_incipient_species + num_orthospecies) >= min_extant_lineages))
+                        and (max_extant_lineages is None or ((num_incipient_species + num_orthospecies) == max_extant_lineages))
+                        ):
+                    is_lineage_count_requirements_met = True
+                else:
+                    is_lineage_count_requirements_met = False
             else:
-                lineage_requirements_met = False
+                has_lineage_count_requirements = False
+                is_lineage_count_requirements_met = None
             if max_extant_lineages is not None and (num_incipient_species + num_orthospecies) > max_extant_lineages:
                 raise ProcessFailedException()
-            if (
-                    (num_extant_lineages is None or ((num_incipient_species + num_orthospecies) == num_extant_lineages))
-                    and (min_extant_lineages is None or ((num_incipient_species + num_orthospecies) >= min_extant_lineages))
-                    and (max_extant_lineages is None or ((num_incipient_species + num_orthospecies) == max_extant_lineages))
-                    ):
-                lineage_requirements_met = True
-            else:
-                lineage_requirements_met = False
             if num_extant_orthospecies is not None or max_extant_orthospecies is not None or min_extant_orthospecies is not None:
                 ## note: very expensive operation to count orthospecies leaves!
-                orthospecies_requirements_met = False
+                has_orthospecies_count_requirements = True
+                is_orthospecies_count_requirements_met = False
                 final_time = current_time + self.rng.uniform(0, waiting_time)
                 lineage_collection_snapshot = [lineage.clone() for lineage in itertools.chain(lineage_data[0]["lineage_collection"], lineage_data[1]["lineage_collection"])]
                 try:
@@ -737,66 +739,71 @@ class ProtractedSpeciationProcess(object):
                                 )
                         lineage_data[phase_idx]["lineage_tree"] = lineage_tree
                         lineage_data[phase_idx]["orthospecies_tree"] = orthospecies_tree
-                        orthospecies_requirements_met = True
+                        is_orthospecies_count_requirements_met = True
                 except ProcessFailedException:
                     pass
                 if max_extant_orthospecies is not None and num_leaves > max_extant_orthospecies:
                     raise ProcessFailedException
             else:
-                orthospecies_requirements_met = True
-            # add to current time
-            current_time += waiting_time
-            # Select event
-            event_type_idx = probability.weighted_index_choice(weights=event_rates, rng=self.rng)
-            assert (event_type_idx >= 0 and event_type_idx <= 4)
-            if event_type_idx == 0:
-                # Splitting of new incipient species lineage from orthospecies lineage
-                parent_lineage = self.rng.choice(orthospecies_lineages)
-                lineage_data[phase_idx]["lineage_id"] += 1
-                new_lineage = self._new_lineage(
-                        lineage_id=lineage_data[phase_idx]["lineage_id"],
-                        parent_lineage=parent_lineage,
-                        origin_time=current_time,
-                        )
-                lineage_collection.append(new_lineage)
-                incipient_species_lineages.append(new_lineage)
-            elif event_type_idx == 1:
-                # Extinction of an orthospecies lineage
-                lineage_idx = self.rng.randint(0, len(orthospecies_lineages)-1)
-                orthospecies_lineages[lineage_idx].extinction_time = current_time
-                del orthospecies_lineages[lineage_idx]
-            elif event_type_idx == 2:
-                # Splitting of new incipient species lineage from incipient lineage
-                parent_lineage = self.rng.choice(incipient_species_lineages)
-                lineage_data[phase_idx]["lineage_id"] += 1
-                new_lineage = self._new_lineage(
-                        lineage_id=lineage_data[phase_idx]["lineage_id"],
-                        parent_lineage=parent_lineage,
-                        origin_time=current_time,
-                        )
-                lineage_collection.append(new_lineage)
-                incipient_species_lineages.append(new_lineage)
-            elif event_type_idx == 3:
-                # Completion of speciation
-                lineage_idx = self.rng.randint(0, len(incipient_species_lineages)-1)
-                lineage = incipient_species_lineages[lineage_idx]
-                lineage.speciation_completion_time = current_time
-                lineage_data[phase_idx]["species_id"] += 1
-                lineage.species_id = lineage_data[phase_idx]["species_id"]
-                orthospecies_lineages.append(lineage)
-                del incipient_species_lineages[lineage_idx]
-            elif event_type_idx == 4:
-                # Extinction of an incipient_species lineage
-                lineage_idx = self.rng.randint(0, len(incipient_species_lineages)-1)
-                incipient_species_lineages[lineage_idx].extinction_time = current_time
-                del incipient_species_lineages[lineage_idx]
-            else:
-                raise Exception("Unexpected event type index: {}".format(event_type_idx))
-            if lineage_requirements_met and orthospecies_requirements_met:
+                has_orthospecies_count_requirements = False
+                is_orthospecies_count_requirements_met = None
+            if (
+                    ( (has_lineage_count_requirements and is_lineage_count_requirements_met) and (has_orthospecies_count_requirements and is_orthospecies_count_requirements_met) )
+                    or ( (has_lineage_count_requirements and is_lineage_count_requirements_met) and (not has_orthospecies_count_requirements) )
+                    or ( (not has_lineage_count_requirements) and (has_orthospecies_count_requirements and is_orthospecies_count_requirements_met) )
+            ):
                 final_time = current_time + self.rng.uniform(0, waiting_time)
                 lineage_data[phase_idx]["final_time"] = final_time
-                print("OK")
                 break
+            else:
+                # add to current time
+                current_time += waiting_time
+                # Select event
+                event_type_idx = probability.weighted_index_choice(weights=event_rates, rng=self.rng)
+                assert (event_type_idx >= 0 and event_type_idx <= 4)
+                if event_type_idx == 0:
+                    # Splitting of new incipient species lineage from orthospecies lineage
+                    parent_lineage = self.rng.choice(orthospecies_lineages)
+                    lineage_data[phase_idx]["lineage_id"] += 1
+                    new_lineage = self._new_lineage(
+                            lineage_id=lineage_data[phase_idx]["lineage_id"],
+                            parent_lineage=parent_lineage,
+                            origin_time=current_time,
+                            )
+                    lineage_collection.append(new_lineage)
+                    incipient_species_lineages.append(new_lineage)
+                elif event_type_idx == 1:
+                    # Extinction of an orthospecies lineage
+                    lineage_idx = self.rng.randint(0, len(orthospecies_lineages)-1)
+                    orthospecies_lineages[lineage_idx].extinction_time = current_time
+                    del orthospecies_lineages[lineage_idx]
+                elif event_type_idx == 2:
+                    # Splitting of new incipient species lineage from incipient lineage
+                    parent_lineage = self.rng.choice(incipient_species_lineages)
+                    lineage_data[phase_idx]["lineage_id"] += 1
+                    new_lineage = self._new_lineage(
+                            lineage_id=lineage_data[phase_idx]["lineage_id"],
+                            parent_lineage=parent_lineage,
+                            origin_time=current_time,
+                            )
+                    lineage_collection.append(new_lineage)
+                    incipient_species_lineages.append(new_lineage)
+                elif event_type_idx == 3:
+                    # Completion of speciation
+                    lineage_idx = self.rng.randint(0, len(incipient_species_lineages)-1)
+                    lineage = incipient_species_lineages[lineage_idx]
+                    lineage.speciation_completion_time = current_time
+                    lineage_data[phase_idx]["species_id"] += 1
+                    lineage.species_id = lineage_data[phase_idx]["species_id"]
+                    orthospecies_lineages.append(lineage)
+                    del incipient_species_lineages[lineage_idx]
+                elif event_type_idx == 4:
+                    # Extinction of an incipient_species lineage
+                    lineage_idx = self.rng.randint(0, len(incipient_species_lineages)-1)
+                    incipient_species_lineages[lineage_idx].extinction_time = current_time
+                    del incipient_species_lineages[lineage_idx]
+                else:
+                    raise Exception("Unexpected event type index: {}".format(event_type_idx))
 
     def _new_lineage(self,
             lineage_id,


=====================================
tests/data/trees/pythonidae.lower.mle.nex
=====================================
@@ -0,0 +1,46 @@
+#NEXUS
+
+
+BEGIN TAXA;
+    DIMENSIONS NTAX=33;
+    TAXLABELS
+        python_regius
+        python_sebae
+        python_molurus
+        python_curtus
+        morelia_bredli
+        morelia_spilota
+        morelia_tracyae
+        morelia_clastolepis
+        morelia_kinghorni
+        morelia_nauta
+        morelia_amethistina
+        morelia_oenpelliensis
+        antaresia_maculosa
+        antaresia_perthensis
+        antaresia_stimsoni
+        antaresia_childreni
+        morelia_carinata
+        morelia_viridisn
+        morelia_viridiss
+        apodora_papuana
+        liasis_olivaceus
+        liasis_fuscus
+        liasis_mackloti
+        antaresia_melanocephalus
+        antaresia_ramsayi
+        liasis_albertisii
+        bothrochilus_boa
+        morelia_boeleni
+        python_timoriensis
+        python_reticulatus
+        xenopeltis_unicolor
+        candoia_aspera
+        loxocemus_bicolor
+  ;
+END;
+
+BEGIN TREES;
+    TREE 0 = [&U] (((python_regius:0.1058922755,((python_sebae:0.0629755585,python_molurus:0.0335903967):0.02165,python_curtus:0.1067094932):0.016163):0.032743,(((((morelia_bredli:0.0274921037,morelia_spilota:0.0241663426):0.026356,((morelia_tracyae:0.0377936102,((morelia_clastolepis:0.0045446653,(morelia_kinghorni:0.0075825724,morelia_nauta:0.0086155842):0.004182):0.018597,morelia_amethistina:0.0227641045):0.007181):0.024796,morelia_oenpelliensis:0.0579745143):0.004283):0.031732,((antaresia_maculosa:0.0679212061,(antaresia_perthensis:0.0760812159,(antaresia_stimsoni:0.0152390165,antaresia_childreni:0.023141749):0.032397):0.012848):0.011617,(morelia_carinata:0.0660356718,(morelia_viridisn:0.0377499268,morelia_viridiss:0.0473589755):0.027329):0.013482):0.015469):0.006602,(((((apodora_papuana:0.0670782319,liasis_olivaceus:0.0430801028):0.010168,(liasis_fuscus:0.0194903208,liasis_mackloti:0.0141916418):0.048505):0.013422,(antaresia_melanocephalus:0.0380695554,antaresia_ramsayi:0.0325474267):0.043626):0.007734,(liasis_albertisii:0.0542142498,bothrochilus_boa:0.0638595214):0.038444):0.002713,morelia_boeleni:0.0843874314):0.002859):0.027099,(python_timoriensis:0.074479767,python_reticulatus:0.0562613055):0.06004):0.030952):0.060789,(xenopeltis_unicolor:0.1983677797,candoia_aspera:0.4092923305):0.048508,loxocemus_bicolor:0.2627888765);
+END;
+


=====================================
tests/data/trees/pythonidae.upper.mle.nex
=====================================
@@ -0,0 +1,46 @@
+#NEXUS
+
+
+BEGIN TAXA;
+    DIMENSIONS NTAX=33;
+    TAXLABELS
+        PYTHON_REGIUS
+        PYTHON_SEBAE
+        PYTHON_MOLURUS
+        PYTHON_CURTUS
+        MORELIA_BREDLI
+        MORELIA_SPILOTA
+        MORELIA_TRACYAE
+        MORELIA_CLASTOLEPIS
+        MORELIA_KINGHORNI
+        MORELIA_NAUTA
+        MORELIA_AMETHISTINA
+        MORELIA_OENPELLIENSIS
+        ANTARESIA_MACULOSA
+        ANTARESIA_PERTHENSIS
+        ANTARESIA_STIMSONI
+        ANTARESIA_CHILDRENI
+        MORELIA_CARINATA
+        MORELIA_VIRIDISN
+        MORELIA_VIRIDISS
+        APODORA_PAPUANA
+        LIASIS_OLIVACEUS
+        LIASIS_FUSCUS
+        LIASIS_MACKLOTI
+        ANTARESIA_MELANOCEPHALUS
+        ANTARESIA_RAMSAYI
+        LIASIS_ALBERTISII
+        BOTHROCHILUS_BOA
+        MORELIA_BOELENI
+        PYTHON_TIMORIENSIS
+        PYTHON_RETICULATUS
+        XENOPELTIS_UNICOLOR
+        CANDOIA_ASPERA
+        LOXOCEMUS_BICOLOR
+  ;
+END;
+
+BEGIN TREES;
+    TREE 0 = [&U] (((PYTHON_REGIUS:0.1058922755,((PYTHON_SEBAE:0.0629755585,PYTHON_MOLURUS:0.0335903967):0.02165,PYTHON_CURTUS:0.1067094932):0.016163):0.032743,(((((MORELIA_BREDLI:0.0274921037,MORELIA_SPILOTA:0.0241663426):0.026356,((MORELIA_TRACYAE:0.0377936102,((MORELIA_CLASTOLEPIS:0.0045446653,(MORELIA_KINGHORNI:0.0075825724,MORELIA_NAUTA:0.0086155842):0.004182):0.018597,MORELIA_AMETHISTINA:0.0227641045):0.007181):0.024796,MORELIA_OENPELLIENSIS:0.0579745143):0.004283):0.031732,((ANTARESIA_MACULOSA:0.0679212061,(ANTARESIA_PERTHENSIS:0.0760812159,(ANTARESIA_STIMSONI:0.0152390165,ANTARESIA_CHILDRENI:0.023141749):0.032397):0.012848):0.011617,(MORELIA_CARINATA:0.0660356718,(MORELIA_VIRIDISN:0.0377499268,MORELIA_VIRIDISS:0.0473589755):0.027329):0.013482):0.015469):0.006602,(((((APODORA_PAPUANA:0.0670782319,LIASIS_OLIVACEUS:0.0430801028):0.010168,(LIASIS_FUSCUS:0.0194903208,LIASIS_MACKLOTI:0.0141916418):0.048505):0.013422,(ANTARESIA_MELANOCEPHALUS:0.0380695554,ANTARESIA_RAMSAYI:0.0325474267):0.043626):0.007734,(LIASIS_ALBERTISII:0.0542142498,BOTHROCHILUS_BOA:0.0638595214):0.038444):0.002713,MORELIA_BOELENI:0.0843874314):0.002859):0.027099,(PYTHON_TIMORIENSIS:0.074479767,PYTHON_RETICULATUS:0.0562613055):0.06004):0.030952):0.060789,(XENOPELTIS_UNICOLOR:0.1983677797,CANDOIA_ASPERA:0.4092923305):0.048508,LOXOCEMUS_BICOLOR:0.2627888765);
+END;
+



View it on GitLab: https://salsa.debian.org/med-team/python-dendropy/-/commit/63174079f462f65ff253d2eefbd059d39a19a5a4

-- 
View it on GitLab: https://salsa.debian.org/med-team/python-dendropy/-/commit/63174079f462f65ff253d2eefbd059d39a19a5a4
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20210421/85520971/attachment-0001.htm>


More information about the debian-med-commit mailing list